diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api.rst index 953a330a215ca1..1107961ded34fa 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api.rst @@ -8,13 +8,13 @@ Legacy Conversion API :maxdepth: 1 :hidden: - openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model - openvino_docs_MO_DG_prepare_model_convert_model_Cutting_Model - openvino_docs_MO_DG_Additional_Optimization_Use_Cases - openvino_docs_MO_DG_FP16_Compression - openvino_docs_MO_DG_Python_API - openvino_docs_MO_DG_prepare_model_Model_Optimizer_FAQ - Supported_Model_Formats_MO_DG + Setting Input Shapes + Cutting Off Parts of a Model + Embedding Preprocessing Computation + Compressing a Model to FP16 + Convert Models Represented as Python Objects + Model Optimizer Frequently Asked Questions + Supported Model Formats .. meta:: :description: Model conversion (MO) furthers the transition between training and diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/convert_python_model_objects.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/convert_python_model_objects.rst index e04814b8ea07ee..0eb9ca76060cd9 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/convert_python_model_objects.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/convert_python_model_objects.rst @@ -1,8 +1,13 @@ .. {#openvino_docs_MO_DG_Python_API} -Convert Models Represented as Python Objects -============================================ +[LEGACY] Convert Models Represented as Python Objects +============================================================= +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Model Preparation ` article. Model conversion API is represented by ``convert_model()`` method in openvino.tools.mo namespace. ``convert_model()`` is compatible with types from openvino.runtime, like PartialShape, Layout, Type, etc. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/cutting_model.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/cutting_model.rst index a0519aa75146cb..1a43044c4bcffa 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/cutting_model.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/cutting_model.rst @@ -1,8 +1,11 @@ .. {#openvino_docs_MO_DG_prepare_model_convert_model_Cutting_Model} -Cutting Off Parts of a Model -============================ +[LEGACY] Cutting Off Parts of a Model +================================================ +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. Sometimes, it is necessary to remove parts of a model when converting it to OpenVINO IR. This chapter describes how to do it, using model conversion API parameters. Model cutting applies mostly to TensorFlow models, which is why TensorFlow will be used in this chapter's examples, but it may be also useful for other frameworks. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/embedding_preprocessing_computation.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/embedding_preprocessing_computation.rst index bb8585a489c5f6..dd6aac35bd0de0 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/embedding_preprocessing_computation.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/embedding_preprocessing_computation.rst @@ -1,8 +1,13 @@ .. {#openvino_docs_MO_DG_Additional_Optimization_Use_Cases} -Embedding Preprocessing Computation -=================================== +[LEGACY] Embedding Preprocessing Computation +===================================================== +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Conversion Parameters ` article. Input data for inference can be different from the training dataset and requires additional preprocessing before inference. To accelerate the whole pipeline including diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/fp16_compression.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/fp16_compression.rst index e2820ecbaddab1..a692fe389451ae 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/fp16_compression.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/fp16_compression.rst @@ -1,9 +1,14 @@ .. {#openvino_docs_MO_DG_FP16_Compression} -Compressing a Model to FP16 -=========================== +[LEGACY] Compressing a Model to FP16 +============================================= +.. danger:: + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Conversion Parameters ` article. + By default, when IR is saved all relevant floating-point weights are compressed to ``FP16`` data type during model conversion. It results in creating a "compressed ``FP16`` model", which occupies about half of the original space in the file system. The compression may introduce a minor drop in accuracy, diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.rst index 4ab9963388147b..0138307a04aec6 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/model_optimizer_faq.rst @@ -1,7 +1,7 @@ .. {#openvino_docs_MO_DG_prepare_model_Model_Optimizer_FAQ} -Model Optimizer Frequently Asked Questions -========================================== +[LEGACY] Model Optimizer Frequently Asked Questions +=========================================================== .. important:: diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/setting_input_shapes.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/setting_input_shapes.rst index c9c4ad6aac121b..a4e5e1fb935149 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/setting_input_shapes.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/setting_input_shapes.rst @@ -1,7 +1,13 @@ .. {#openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model} -Setting Input Shapes -==================== +[LEGACY] Setting Input Shapes +==================================== + +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Setting Input Shapes ` article. With model conversion API you can increase your model's efficiency by providing an additional shape definition, with these two parameters: `input_shape` and `static_shape`. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.rst index a9fc5aad4703f4..d66f8596f09fe6 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.rst @@ -1,19 +1,24 @@ .. {#Supported_Model_Formats_MO_DG} -Supported Model Formats -======================= +[LEGACY] Supported Model Formats +===================================== +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Supported Model Formats ` article. .. toctree:: :maxdepth: 1 :hidden: - openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_TensorFlow - openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_ONNX - openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_PyTorch - openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_TensorFlow_Lite - openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Paddle - openvino_docs_MO_DG_prepare_model_convert_model_tutorials + Converting a TensorFlow Model + Converting an ONNX Model + Converting a PyTorch Model + Converting a TensorFlow Lite Model + Converting a PaddlePaddle Model + Model Conversion Tutorials .. meta:: :description: Learn about supported model formats and the methods used to convert, read, and compile them in OpenVINO™. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_ONNX.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_ONNX.rst index 6da4ac3ba36e5b..db3a16b4954643 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_ONNX.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_ONNX.rst @@ -1,13 +1,20 @@ .. {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_ONNX} -Converting an ONNX Model -======================== - +[LEGACY] Converting an ONNX Model +============================================= .. meta:: :description: Learn how to convert a model from the ONNX format to the OpenVINO Intermediate Representation. + +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Converting an ONNX Model ` article. + + .. note:: ONNX models are supported via FrontEnd API. You may skip conversion to IR and read models directly by OpenVINO runtime API. Refer to the :doc:`inference example ` for more details. Using ``convert_model`` is still necessary in more complex cases, such as new custom inputs/outputs in model pruning, adding pre-processing, or using Python conversion extensions. Converting an ONNX Model diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_Paddle.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_Paddle.rst index 0819d97ed90934..09f6a8d99f3057 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_Paddle.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_Paddle.rst @@ -1,7 +1,7 @@ .. {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Paddle} -Converting a PaddlePaddle Model -=============================== +[LEGACY] Converting a PaddlePaddle Model +====================================================== .. meta:: @@ -9,6 +9,13 @@ Converting a PaddlePaddle Model PaddlePaddle format to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Converting a PaddlePaddle Model ` article. + + This page provides general instructions on how to convert a model from a PaddlePaddle format to the OpenVINO IR format using Model Optimizer. The instructions are different depending on PaddlePaddle model format. .. note:: PaddlePaddle models are supported via FrontEnd API. You may skip conversion to IR and read models directly by OpenVINO runtime API. Refer to the :doc:`inference example ` for more details. Using ``convert_model`` is still necessary in more complex cases, such as new custom inputs/outputs in model pruning, adding pre-processing, or using Python conversion extensions. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_PyTorch.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_PyTorch.rst index 87963bc9531bee..f4003b700446d6 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_PyTorch.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_PyTorch.rst @@ -1,7 +1,7 @@ .. {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_PyTorch} -Converting a PyTorch Model -========================== +[LEGACY] Converting a PyTorch Model +============================================ .. meta:: @@ -9,6 +9,12 @@ Converting a PyTorch Model PyTorch format to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Converting a PyTorch Model ` article. + This page provides instructions on how to convert a model from the PyTorch format to the OpenVINO IR format. The conversion is a required step to run inference using OpenVINO API. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow.rst index 637912310bba6c..54bb1016bf800c 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow.rst @@ -1,13 +1,18 @@ .. {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_TensorFlow} -Converting a TensorFlow Model -============================= - +[LEGACY] Converting a TensorFlow Model +============================================ .. meta:: :description: Learn how to convert a model from a TensorFlow format to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Converting a TensorFlow Model ` article. + .. note:: TensorFlow models are supported via :doc:`FrontEnd API `. You may skip conversion to IR and read models directly by OpenVINO runtime API. Refer to the :doc:`inference example ` for more details. Using ``convert_model`` is still necessary in more complex cases, such as new custom inputs/outputs in model pruning, adding pre-processing, or using Python conversion extensions. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow_Lite.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow_Lite.rst index f986048c566456..996785d8fc7655 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow_Lite.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/Convert_Model_From_TensorFlow_Lite.rst @@ -1,13 +1,18 @@ .. {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_TensorFlow_Lite} -Converting a TensorFlow Lite Model -================================== +[LEGACY] Converting a TensorFlow Lite Model +===================================================== .. meta:: :description: Learn how to convert a model from a TensorFlow Lite format to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Converting a TensorFlow Lite Model ` article. To convert a TensorFlow Lite model, use the ``mo`` script and specify the path to the input ``.tflite`` model file: diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials.rst index 53878b922ad321..ae8a20f937a9b0 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials.rst @@ -1,7 +1,7 @@ .. {#openvino_docs_MO_DG_prepare_model_convert_model_tutorials} -Model Conversion Tutorials -========================== +[LEGACY] Model Conversion Tutorials +==================================================== .. toctree:: @@ -39,6 +39,12 @@ Model Conversion Tutorials :description: Get to know conversion methods for specific TensorFlow, ONNX, PyTorch, MXNet, and Kaldi models. +.. danger:: + + The code described in the tutorials has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + This section provides a set of tutorials that demonstrate conversion methods for specific TensorFlow, ONNX, and PyTorch models. Note that these instructions do not cover all use cases and may not reflect your particular needs. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_AttentionOCR_From_Tensorflow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_AttentionOCR_From_Tensorflow.rst index 46a362f40838d1..e24f50c82d6efd 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_AttentionOCR_From_Tensorflow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_AttentionOCR_From_Tensorflow.rst @@ -10,6 +10,12 @@ Converting a TensorFlow Attention OCR Model OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + This tutorial explains how to convert the Attention OCR (AOCR) model from the `TensorFlow Attention OCR repository `__ to the Intermediate Representation (IR). Extracting a Model from ``aocr`` Library diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_BERT_From_Tensorflow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_BERT_From_Tensorflow.rst index 37dda47124b92c..1fb3df0ccca34d 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_BERT_From_Tensorflow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_BERT_From_Tensorflow.rst @@ -9,6 +9,12 @@ Converting a TensorFlow BERT Model from TensorFlow to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + Pretrained models for BERT (Bidirectional Encoder Representations from Transformers) are `publicly available `__. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Bert_ner.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Bert_ner.rst index 4074a0446eb685..a2fa512c62a061 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Bert_ner.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Bert_ner.rst @@ -8,6 +8,11 @@ Converting a PyTorch BERT-NER Model :description: Learn how to convert a BERT-NER model from PyTorch to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. The goal of this article is to present a step-by-step guide on how to convert PyTorch BERT-NER model to OpenVINO IR. First, you need to download the model and convert it to ONNX. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_CRNN_From_Tensorflow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_CRNN_From_Tensorflow.rst index bb84fd01296da2..a101ce8ac24d91 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_CRNN_From_Tensorflow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_CRNN_From_Tensorflow.rst @@ -9,6 +9,12 @@ Converting a TensorFlow CRNN Model from TensorFlow to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + This tutorial explains how to convert a CRNN model to OpenVINO™ Intermediate Representation (IR). There are several public versions of TensorFlow CRNN model implementation available on GitHub. This tutorial explains how to convert the model from diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Cascade_RCNN_res101.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Cascade_RCNN_res101.rst index 7242607070d101..5de41b8473b4e4 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Cascade_RCNN_res101.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Cascade_RCNN_res101.rst @@ -9,6 +9,12 @@ Converting a PyTorch Cascade RCNN R-101 Model model from PyTorch to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + The goal of this article is to present a step-by-step guide on how to convert a PyTorch Cascade RCNN R-101 model to OpenVINO IR. First, you need to download the model and convert it to ONNX. Downloading and Converting Model to ONNX diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_DeepSpeech_From_Tensorflow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_DeepSpeech_From_Tensorflow.rst index 51a190fb823a7c..48e473511e8068 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_DeepSpeech_From_Tensorflow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_DeepSpeech_From_Tensorflow.rst @@ -8,7 +8,12 @@ Converting a TensorFlow DeepSpeech Model :description: Learn how to convert a DeepSpeech model from TensorFlow to the OpenVINO Intermediate Representation. +.. danger:: + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + `DeepSpeech project `__ provides an engine to train speech-to-text models. Downloading the Pretrained DeepSpeech Model diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_EfficientDet_Models.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_EfficientDet_Models.rst index ca59af4c210d95..4adb0107e792f0 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_EfficientDet_Models.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_EfficientDet_Models.rst @@ -9,6 +9,12 @@ Converting TensorFlow EfficientDet Models from TensorFlow to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + This tutorial explains how to convert EfficientDet public object detection models to the Intermediate Representation (IR). .. _efficientdet-to-ir: diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_F3Net.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_F3Net.rst index 70d5eebddfcec1..a4489d6a6349d8 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_F3Net.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_F3Net.rst @@ -8,7 +8,12 @@ Converting a PyTorch F3Net Model :description: Learn how to convert a F3Net model from PyTorch to the OpenVINO Intermediate Representation. +.. danger:: + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + `F3Net `__ : Fusion, Feedback and Focus for Salient Object Detection Cloning the F3Net Repository diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_FaceNet_From_Tensorflow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_FaceNet_From_Tensorflow.rst index 949fe0b98f02e5..dd8bab2c05adcd 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_FaceNet_From_Tensorflow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_FaceNet_From_Tensorflow.rst @@ -8,7 +8,12 @@ Converting TensorFlow FaceNet Models :description: Learn how to convert a FaceNet model from TensorFlow to the OpenVINO Intermediate Representation. +.. danger:: + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Supported Model Formats ` article. + `Public pre-trained FaceNet models `__ contain both training and inference part of graph. Switch between this two states is manageable with placeholder value. Intermediate Representation (IR) models are intended for inference, which means that train part is redundant. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Faster_RCNN.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Faster_RCNN.rst index 5813d026f8ac11..02aa087fff08e4 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Faster_RCNN.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Faster_RCNN.rst @@ -8,7 +8,12 @@ Converting an ONNX Faster R-CNN Model :description: Learn how to convert a Faster R-CNN model from ONNX to the OpenVINO Intermediate Representation. +.. danger:: + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + The instructions below are applicable **only** to the Faster R-CNN model converted to the ONNX file format from the `maskrcnn-benchmark model `__: 1. Download the pretrained model file from `onnx/models `__ (commit-SHA: 8883e49e68de7b43e263d56b9ed156dfa1e03117). diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_GNMT_From_Tensorflow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_GNMT_From_Tensorflow.rst index df4759b75f0e4d..7857d3fd4c9836 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_GNMT_From_Tensorflow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_GNMT_From_Tensorflow.rst @@ -8,7 +8,12 @@ Converting a TensorFlow GNMT Model :description: Learn how to convert a GNMT model from TensorFlow to the OpenVINO Intermediate Representation. +.. danger:: + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + This tutorial explains how to convert Google Neural Machine Translation (GNMT) model to the Intermediate Representation (IR). There are several public versions of TensorFlow GNMT model implementation available on GitHub. This tutorial explains how to convert the GNMT model from the `TensorFlow Neural Machine Translation (NMT) repository `__ to the IR. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_GPT2.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_GPT2.rst index 26e5d0821d7cc4..2a6de424f3136d 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_GPT2.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_GPT2.rst @@ -8,6 +8,11 @@ Converting an ONNX GPT-2 Model :description: Learn how to convert a pre-trained GPT-2 model from ONNX to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. `Public pre-trained GPT-2 model `__ is a large transformer-based language model with a simple objective: predict the next word, given all of the previous words within some text. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Mask_RCNN.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Mask_RCNN.rst index 9f4d7906372e51..24aca44222ac54 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Mask_RCNN.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Mask_RCNN.rst @@ -8,6 +8,11 @@ Converting an ONNX Mask R-CNN Model :description: Learn how to convert a pre-trained Mask R-CNN model from ONNX to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. The instructions below are applicable **only** to the Mask R-CNN model converted to the ONNX file format from the `maskrcnn-benchmark model `__. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_NCF_From_Tensorflow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_NCF_From_Tensorflow.rst index 5ae521e38e9228..407e2373eaf79a 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_NCF_From_Tensorflow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_NCF_From_Tensorflow.rst @@ -9,6 +9,11 @@ Converting a TensorFlow Neural Collaborative Filtering Model Filtering Model from TensorFlow to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. This tutorial explains how to convert Neural Collaborative Filtering (NCF) model to the OpenVINO Intermediate Representation. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Object_Detection_API_Models.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Object_Detection_API_Models.rst index fe19d38a288e5d..f5688887b253f2 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Object_Detection_API_Models.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Object_Detection_API_Models.rst @@ -10,6 +10,12 @@ Converting TensorFlow Object Detection API Models Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + * Starting with the 2022.1 release, model conversion API can convert the TensorFlow Object Detection API Faster and Mask RCNNs topologies differently. By default, model conversion adds operation "Proposal" to the generated IR. This operation needs an additional input to the model with name "image_info" which should be fed with several values describing the preprocessing applied to the input image (refer to the :doc:`Proposal ` operation specification for more information). However, this input is redundant for the models trained and inferred with equal size images. Model conversion API can generate IR for such models and insert operation :doc:`DetectionOutput ` instead of ``Proposal``. The `DetectionOutput` operation does not require additional model input "image_info". Moreover, for some models the produced inference results are closer to the original TensorFlow model. In order to trigger new behavior, the attribute "operation_to_add" in the corresponding JSON transformation configuration file should be set to value "DetectionOutput" instead of default one "Proposal". * Starting with the 2021.1 release, model conversion API converts the TensorFlow Object Detection API SSDs, Faster and Mask RCNNs topologies keeping shape-calculating sub-graphs by default, so topologies can be re-shaped in the OpenVINO Runtime using dedicated reshape API. Refer to the :doc:`Using Shape Inference ` guide for more information on how to use this feature. It is possible to change the both spatial dimensions of the input image and batch size. * To generate IRs for TF 1 SSD topologies, model conversion API creates a number of ``PriorBoxClustered`` operations instead of a constant node with prior boxes calculated for the particular input image size. This change allows you to reshape the topology in the OpenVINO Runtime using dedicated API. The reshaping is supported for all SSD topologies except FPNs, which contain hardcoded shapes for some operations preventing from changing topology input shape. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_QuartzNet.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_QuartzNet.rst index 738c5b4fb8e68c..e0db665df7f25d 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_QuartzNet.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_QuartzNet.rst @@ -8,7 +8,12 @@ Converting a PyTorch QuartzNet Model :description: Learn how to convert a QuartzNet model from PyTorch to the OpenVINO Intermediate Representation. +.. danger:: + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + `NeMo project `__ provides the QuartzNet model. Downloading the Pre-trained QuartzNet Model diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RCAN.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RCAN.rst index 3fb9d5cad3775f..1d2254c129ec3e 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RCAN.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RCAN.rst @@ -8,7 +8,12 @@ Converting a PyTorch RCAN Model :description: Learn how to convert a RCAN model from PyTorch to the OpenVINO Intermediate Representation. +.. danger:: + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + `RCAN `__ : Image Super-Resolution Using Very Deep Residual Channel Attention Networks Downloading and Converting the Model to ONNX diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RNNT.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RNNT.rst index 2df33ff523a170..e1fae7f6f70474 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RNNT.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RNNT.rst @@ -8,7 +8,12 @@ Converting a PyTorch RNN-T Model :description: Learn how to convert a RNN-T model from PyTorch to the OpenVINO Intermediate Representation. +.. danger:: + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + This guide covers conversion of RNN-T model from `MLCommons `__ repository. Follow the instructions below to export a PyTorch model into ONNX, before converting it to IR: diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RetinaNet_From_Tensorflow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RetinaNet_From_Tensorflow.rst index 6c76154292883a..554b3fff9dd181 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RetinaNet_From_Tensorflow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RetinaNet_From_Tensorflow.rst @@ -9,6 +9,12 @@ Converting a TensorFlow RetinaNet Model from TensorFlow to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + This tutorial explains how to convert a RetinaNet model to the Intermediate Representation (IR). `Public RetinaNet model `__ does not contain pretrained TensorFlow weights. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Slim_Library_Models.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Slim_Library_Models.rst index ddeaca05ec6fe9..59a5a0389b6de4 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Slim_Library_Models.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_Slim_Library_Models.rst @@ -9,6 +9,11 @@ Converting TensorFlow Slim Image Classification Model Library Models Classification model from TensorFlow to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. `TensorFlow-Slim Image Classification Model Library `__ is a library to define, train and evaluate classification models in TensorFlow. The library contains Python scripts defining the classification topologies together with checkpoint files for several pre-trained classification topologies. To convert a TensorFlow-Slim library model, complete the following steps: diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_WideAndDeep_Family_Models.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_WideAndDeep_Family_Models.rst index 18d8b134df38ef..d5fd1bb320072d 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_WideAndDeep_Family_Models.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_WideAndDeep_Family_Models.rst @@ -9,6 +9,12 @@ Converting TensorFlow Wide and Deep Family Models models from TensorFlow to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + The Wide and Deep models is a combination of wide and deep parts for memorization and generalization of object features respectively. These models can contain different types of object features such as numerical, categorical, sparse and sequential features. These feature types are specified through Tensorflow tf.feature_column API. Table below presents what feature types are supported by the OpenVINO toolkit. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_XLNet_From_Tensorflow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_XLNet_From_Tensorflow.rst index cbabde8ff6cc40..2d23feca2ded5b 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_XLNet_From_Tensorflow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_XLNet_From_Tensorflow.rst @@ -8,7 +8,12 @@ Converting a TensorFlow XLNet Model :description: Learn how to convert an XLNet model from TensorFlow to the OpenVINO Intermediate Representation. +.. danger:: + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + Pretrained models for XLNet (Bidirectional Encoder Representations from Transformers) are `publicly available `__. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_YOLACT.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_YOLACT.rst index 53f212c27e1f2f..23495f299f8e65 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_YOLACT.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_YOLACT.rst @@ -9,6 +9,12 @@ Converting a PyTorch YOLACT Model from PyTorch to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + You Only Look At CoefficienTs (YOLACT) is a simple, fully convolutional model for real-time instance segmentation. The PyTorch implementation is publicly available in `this GitHub repository `__. The YOLACT++ model is not supported, because it uses deformable convolutional layers that cannot be represented in ONNX format. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_YOLO_From_Tensorflow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_YOLO_From_Tensorflow.rst index d2e3b4d919c531..a2273e68505ff6 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_YOLO_From_Tensorflow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_YOLO_From_Tensorflow.rst @@ -8,6 +8,11 @@ Converting TensorFlow YOLO Models :description: Learn how to convert YOLO models from TensorFlow to the OpenVINO Intermediate Representation. +.. danger:: + + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. This document explains how to convert real-time object detection YOLOv1, YOLOv2, YOLOv3 and YOLOv4 public models to the Intermediate Representation (IR). All YOLO models are originally implemented in the DarkNet framework and consist of two files: diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_lm_1b_From_Tensorflow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_lm_1b_From_Tensorflow.rst index a7d348ea304d1d..b4dec76b524353 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_lm_1b_From_Tensorflow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_lm_1b_From_Tensorflow.rst @@ -9,7 +9,12 @@ Converting a TensorFlow Language Model on One Billion Word Benchmark Model on One Billion Word Benchmark to the OpenVINO Intermediate Representation. +.. danger:: + The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. + + This guide describes a deprecated conversion method. The guide on the new and recommended method can be found in the :doc:`Python tutorials `. + Downloading a Pre-trained Language Model on One Billion Word Benchmark ###################################################################### diff --git a/docs/documentation_build_instructions.md b/docs/documentation_build_instructions.md index dc0c33a388da35..31759abb9b3b2a 100644 --- a/docs/documentation_build_instructions.md +++ b/docs/documentation_build_instructions.md @@ -1,37 +1,44 @@ -# Build Documentation Using CMake +# Building documentation with CMake -1. Clone submodules: +**NOTE**: Instructions were tested inside container based on Ubuntu 22.04 docker image. + +1. Clone the OpenVINO repository and setup its submodules ``` -cd openvino -git submodule update --init --recursive +$ git clone +$ cd +$ git submodule update --init --recursive ``` -2. Install build dependencies using the `install_build_dependencies.sh` script in the project root folder. +2. Install build dependencies using the `install_build_dependencies.sh` script located in OpenVINO root directory ``` -chmod +x install_build_dependencies.sh -./install_build_dependencies.sh +$ chmod +x install_build_dependencies.sh +$ ./install_build_dependencies.sh ``` - -3. Install [doxyrest](https://github.com/vovkos/doxyrest/releases/tag/doxyrest-2.1.2) and put the `bin` folder in your path - -4. Install python dependencies: - +3. Install additional packages needed to build documentation ``` -python -m pip install -r docs/requirements.txt +$ apt install -y doxygen graphviz texlive ``` - -5. Install the sphinx theme - -6. Create a build folder: - +4. Create python virtualenv and install needed libraries ``` -mkdir build && cd build +$ python3 -m venv env +$ source env/bin/activate +(env) $ pip install -r docs/requirements.txt ``` - -7. Build documentation using these commands: +5. Install the sphinx theme ``` -cmake .. -DENABLE_DOCS=ON -DENABLE_DOCKER=ON +(env) $ cd docs/openvino_sphinx_theme && python setup.py install && cd - +`````` +6. Create build folder: ``` - +(env) $ mkdir build && cd build +``` +7. Build documentation using these commands: ``` -cmake --build . --target sphinx_docs +(env) $ cmake .. -DENABLE_DOCS=ON +(env) $ cmake --build . --target sphinx_docs ``` +Depending on the needs, following variables can be added to first cmake call: +- building C/C++ API: `-DENABLE_CPP_API=ON` +- building Python API: `-DENABLE_PYTHON_API=ON` +- building Notebooks: `-DENABLE_NOTEBOOKS=ON` +- building OMZ: `-DENABLE_OMZ=ON` +- building OVMS: `-DENABLE_OVMS=ON -DOVMS_DOCS_DIR=` diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/utils.py b/src/bindings/python/src/openvino/frontend/tensorflow/utils.py index 298914ffdbd27d..aa99adbffdd2d8 100644 --- a/src/bindings/python/src/openvino/frontend/tensorflow/utils.py +++ b/src/bindings/python/src/openvino/frontend/tensorflow/utils.py @@ -352,7 +352,11 @@ def extract_model_graph(argv): from tensorflow.python.training.tracking.base import Trackable # pylint: disable=no-name-in-module,import-error trackable_is_imported = True except: - log.warning("Could not import tensorflow.python.training.tracking.base.Trackable type.") + try: + from tensorflow.python.trackable.base import Trackable + trackable_is_imported = True + except: + log.warning("Could not import tensorflow.python.training.tracking.base.Trackable type.") env_setup = get_environment_setup("tf") if isinstance(model, tf.Graph): return True diff --git a/src/core/include/openvino/op/constant.hpp b/src/core/include/openvino/op/constant.hpp index 2033b5b9de709a..fe91da44baf6f4 100644 --- a/src/core/include/openvino/op/constant.hpp +++ b/src/core/include/openvino/op/constant.hpp @@ -70,19 +70,22 @@ class OPENVINO_API Constant : public Op { template Constant(const element::Type& type, const Shape& shape, const std::vector& values) : Constant(false, type, shape) { + const auto this_shape_size = shape_size(m_shape); + const auto values_size = values.size(); + const auto has_single_value = (values_size == 1); NODE_VALIDATION_CHECK(this, - values.size() == 1 || values.size() == shape_size(m_shape), + has_single_value || values_size == this_shape_size, "Did not get the expected number of literals for a constant of shape ", m_shape, " (got ", - values.size(), + values_size, ", expected ", - (shape_size(m_shape) == 1 ? "" : "1 or "), - shape_size(m_shape), + (this_shape_size == 1 ? "" : "1 or "), + this_shape_size, ")."); - if (values.size() == 1) { - fill_data(type, values.front()); + if (has_single_value) { + fill_data(type, values[0]); } else { write_values(values); } @@ -890,6 +893,16 @@ class OPENVINO_API Constant : public Op { # pragma GCC diagnostic pop #endif } + + template + void fill_or_write(const bool fill, const element::Type& et, const std::vector& values) { + if (fill) { + fill_data(et, values[0]); + } else { + write_values(values); + } + } + template from_string_vector(const std::vector& str_values) { - std::vector values; +template ::value>::type* = nullptr> +T str_to_value(const std::string& s, size_t* pos) { + return static_cast(std::is_signed::value ? std::stoll(s, pos) : std::stoull(s, pos)); +} + +template ::value>::type* = nullptr> +T str_to_value(const std::string& s, size_t* pos) { + return static_cast(std::stod(s, pos)); +} + +template +std::vector from_string_vector(const std::vector& str_values) { + std::vector values; values.reserve(str_values.size()); std::transform(str_values.cbegin(), str_values.cend(), std::back_inserter(values), [](const std::string& s) { size_t pos; - auto v = std::stold(s, &pos); + auto v = str_to_value(s, &pos); OPENVINO_ASSERT(s.size() == pos, "Could not parse literal '", s, "'"); return v; }); @@ -97,33 +108,30 @@ Constant::Constant(const Tensor& tensor) Constant::Constant(const element::Type& type, const Shape& shape, const std::vector& values) : Constant(false, type, shape) { + const auto this_shape_size = shape_size(m_shape); + const auto values_size = values.size(); + const auto has_single_value = (values_size == 1); NODE_VALIDATION_CHECK(this, - values.size() == 1 || values.size() == shape_size(m_shape), + has_single_value || values_size == this_shape_size, "Did not get the expected number of literals for a constant of shape ", m_shape, " (got ", - values.size(), + values_size, ", expected ", - (shape_size(m_shape) == 1 ? "" : "1 or "), - shape_size(m_shape), + (this_shape_size == 1 ? "" : "1 or "), + this_shape_size, ")."); + const auto is_checked_and_identical = has_single_value && (this_shape_size != 1); if (type == element::string) { - if (values.size() == 1) { - fill_data(type, values.front()); - } else { - write_values(values); - } + fill_or_write(is_checked_and_identical, type, values); + } else if (type.is_real()) { + fill_or_write(is_checked_and_identical, type, from_string_vector(values)); + } else if (type.is_signed()) { + fill_or_write(is_checked_and_identical, type, from_string_vector(values)); } else { - auto parsed_values = from_string_vector(values); - if (parsed_values.size() == 1) { - fill_data(type, parsed_values.front()); - } else { - write_values(parsed_values); - } + fill_or_write(is_checked_and_identical, type, from_string_vector(values)); } - const auto is_checked_and_identical = (values.size() == 1) && (shape_size(m_shape) != 1); - update_identical_flags(is_checked_and_identical, is_checked_and_identical); } Constant::Constant(const element::Type& type, const Shape& shape) : Constant(true, type, shape) {} @@ -385,10 +393,11 @@ bool Constant::evaluate(TensorVector& outputs, const TensorVector& inputs) const outputs.emplace_back(m_element_type, m_shape); else outputs[0].set_shape(m_shape); + if (m_element_type == ov::element::string) { auto num_elements = shape_size(m_shape); - const std::string* src_strings = static_cast(get_data_ptr()); - std::string* dst_strings = static_cast(outputs[0].data()); + auto src_strings = static_cast(get_data_ptr()); + auto dst_strings = static_cast(outputs[0].data()); std::copy_n(src_strings, num_elements, dst_strings); } else { std::memcpy(outputs[0].data(), get_data_ptr(), outputs[0].get_byte_size()); diff --git a/src/core/tests/constant.cpp b/src/core/tests/constant.cpp index f0f69d49517c6e..1481f94f0e8053 100644 --- a/src/core/tests/constant.cpp +++ b/src/core/tests/constant.cpp @@ -701,6 +701,29 @@ TEST(constant, int64_vector_broadcast) { EXPECT_EQ(p[3], 1); } +TEST(constant, int64_string_max) { + Shape shape{4}; + vector input{"9223372036854775807", "9223372036854775807", "9223372036854775807", "9223372036854775807"}; + + constexpr auto exp_value = std::numeric_limits::max(); + ov::op::v0::Constant c(element::i64, shape, input); + auto v = c.get_vector(); + ASSERT_EQ(v.size(), shape_size(shape)); + EXPECT_THAT(v, testing::Each(exp_value)); + + const auto p = c.get_data_ptr(); + EXPECT_EQ(p[0], exp_value); + EXPECT_EQ(p[1], exp_value); + EXPECT_EQ(p[2], exp_value); + EXPECT_EQ(p[3], exp_value); + + EXPECT_EQ(input, c.get_value_strings()); + + for (unsigned i = 0; i != input.size(); ++i) { + EXPECT_EQ(input[i], c.convert_value_to_string(i)); + } +} + // // uint1 // @@ -1184,6 +1207,31 @@ TEST(constant, uint64_vector_broadcast) { EXPECT_EQ(p[3], 1); } +TEST(constant, uint64_string_max) { + Shape shape{4}; + vector input{"18446744073709551615", + "18446744073709551615", + "18446744073709551615", + "18446744073709551615"}; + ov::op::v0::Constant c(element::u64, shape, input); + constexpr auto exp_value = std::numeric_limits::max(); + auto v = c.get_vector(); + ASSERT_EQ(v.size(), shape_size(shape)); + EXPECT_THAT(v, testing::Each(exp_value)); + + const auto p = c.get_data_ptr(); + EXPECT_EQ(p[0], exp_value); + EXPECT_EQ(p[1], exp_value); + EXPECT_EQ(p[2], exp_value); + EXPECT_EQ(p[3], exp_value); + + EXPECT_EQ(input, c.get_value_strings()); + + for (unsigned i = 0; i != input.size(); ++i) { + EXPECT_EQ(input[i], c.convert_value_to_string(i)); + } +} + // // bfloat16 //