forked from openvinotoolkit/openvino
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ie_core.hpp
369 lines (337 loc) · 15.9 KB
/
ie_core.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief This is a header file for the Inference Engine Core class C++ API
*
* @file ie_core.hpp
*/
#pragma once
#include <istream>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "cpp/ie_executable_network.hpp"
#include "ie_extension.h"
#include "ie_plugin_config.hpp"
#include "ie_remote_context.hpp"
#include "ie_version.hpp"
namespace InferenceEngine {
/**
* @ingroup ie_cpp
* @brief This class represents Inference Engine Core entity.
*
* It can throw exceptions safely for the application, where it is properly handled.
*/
class INFERENCE_ENGINE_API_CLASS(Core) {
class Impl;
std::shared_ptr<Impl> _impl;
public:
/** @brief Constructs an OpenVINO Core instance with devices
* and their plugins description.
*
* There are two ways how to configure device plugins:
* 1. (default) Use XML configuration file in case of dynamic libraries build;
* 2. Use strictly defined configuration in case of static libraries build.
*
* @param xmlConfigFile Path to the .xml file with plugins to load from. If the XML configuration file is not
* specified, default OpenVINO Runtime plugins are loaded from:
* 1. (dynamic build) default `plugins.xml` file located in the same folder as OpenVINO runtime shared library;
* 2. (static build) statically defined configuration. In this case path to the .xml file is ignored.
*/
explicit Core(const std::string& xmlConfigFile = {});
/**
* @brief Returns plugins version information
*
* @param deviceName Device name to identify plugin
* @return A vector of versions
*/
std::map<std::string, Version> GetVersions(const std::string& deviceName) const;
#ifdef ENABLE_UNICODE_PATH_SUPPORT
/**
* @brief Reads models from IR and ONNX formats
* @param modelPath path to model
* @param binPath path to data file
* For IR format (*.bin):
* * if path is empty, will try to read bin file with the same name as xml and
* * if bin file with the same name was not found, will load IR without weights.
* For ONNX format (*.onnx):
* * binPath parameter is not used.
* @return CNNNetwork
*/
CNNNetwork ReadNetwork(const std::wstring& modelPath, const std::wstring& binPath = {}) const;
#endif
/**
* @brief Reads models from IR and ONNX formats
* @param modelPath path to model
* @param binPath path to data file
* For IR format (*.bin):
* * if path is empty, will try to read bin file with the same name as xml and
* * if bin file with the same name was not found, will load IR without weights.
* For ONNX format (*.onnx):
* * binPath parameter is not used.
* @return CNNNetwork
*/
CNNNetwork ReadNetwork(const std::string& modelPath, const std::string& binPath = {}) const;
/**
* @brief Reads models from IR and ONNX formats
* @param model string with model in IR or ONNX format
* @param weights shared pointer to constant blob with weights
* Reading ONNX models doesn't support loading weights from data blobs.
* If you are using an ONNX model with external data files, please use the
* `InferenceEngine::Core::ReadNetwork(const std::string& model, const Blob::CPtr& weights) const`
* function overload which takes a filesystem path to the model.
* For ONNX case the second parameter should contain empty blob.
* @note Created InferenceEngine::CNNNetwork object shares the weights with `weights` object.
* So, do not create `weights` on temporary data which can be later freed, since the network
* constant data becomes to point to invalid memory.
* @return CNNNetwork
*/
CNNNetwork ReadNetwork(const std::string& model, const Blob::CPtr& weights) const;
/**
* @brief Creates an executable network from a network object and uses AUTO plugin as the default device to load
* executable network.
*
* Users can create as many networks as they need and use
* them simultaneously (up to the limitation of the hardware resources)
*
* @param network CNNNetwork object acquired from Core::ReadNetwork
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation
* @return An executable network reference
*/
ExecutableNetwork LoadNetwork(const CNNNetwork& network, const std::map<std::string, std::string>& config = {});
/**
* @brief Creates an executable network from a network object.
*
* Users can create as many networks as they need and use
* them simultaneously (up to the limitation of the hardware resources)
*
* @param network CNNNetwork object acquired from Core::ReadNetwork
* @param deviceName Name of device to load network to
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation
* @return An executable network reference
*/
ExecutableNetwork LoadNetwork(const CNNNetwork& network,
const std::string& deviceName,
const std::map<std::string, std::string>& config = {});
/**
* @brief Reads model and creates an executable network from IR or ONNX file and uses AUTO plugin as the default
* device to load executable network.
*
* This can be more efficient than using ReadNetwork + LoadNetwork(CNNNetwork) flow
* especially for cases when caching is enabled and cached model is available
*
* @param modelPath path to model
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation/
*
* @return An executable network reference
*/
ExecutableNetwork LoadNetwork(const std::string& modelPath, const std::map<std::string, std::string>& config = {});
/**
* @brief Reads model and creates an executable network from IR or ONNX file
*
* This can be more efficient than using ReadNetwork + LoadNetwork(CNNNetwork) flow
* especially for cases when caching is enabled and cached model is available
*
* @param modelPath path to model
* @param deviceName Name of device to load network to
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation/
*
* @return An executable network reference
*/
ExecutableNetwork LoadNetwork(const std::string& modelPath,
const std::string& deviceName,
const std::map<std::string, std::string>& config = {});
/**
* @brief Registers extension
* @param extension Pointer to already loaded extension
*/
void AddExtension(const IExtensionPtr& extension);
/**
* @brief Creates an executable network from a network object within a specified remote context.
* @param network CNNNetwork object acquired from Core::ReadNetwork
* @param context Pointer to RemoteContext object
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation
* @return An executable network object
*/
ExecutableNetwork LoadNetwork(const CNNNetwork& network,
RemoteContext::Ptr context,
const std::map<std::string, std::string>& config = {});
/**
* @brief Registers extension for the specified plugin
*
* @param extension Pointer to already loaded extension
* @param deviceName Device name to identify plugin to add an executable extension
*/
void AddExtension(IExtensionPtr extension, const std::string& deviceName);
/**
* @brief Creates an executable network from a previously exported network
*
* @param modelFileName Path to the location of the exported file
* @param deviceName Name of device load executable network on
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation*
* @return An executable network reference
*/
ExecutableNetwork ImportNetwork(const std::string& modelFileName,
const std::string& deviceName,
const std::map<std::string, std::string>& config = {});
/**
* @brief Creates an executable network from a previously exported network
* @param networkModel network model stream
* @param deviceName Name of device load executable network on
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation*
* @return An executable network reference
*/
ExecutableNetwork ImportNetwork(std::istream& networkModel,
const std::string& deviceName,
const std::map<std::string, std::string>& config = {});
/**
* @deprecated Use Core::ImportNetwork with explicit device name
* @brief Creates an executable network from a previously exported network
* @param networkModel network model stream
* @return An executable network reference
*/
INFERENCE_ENGINE_DEPRECATED("Use Core::ImportNetwork with explicit device name")
ExecutableNetwork ImportNetwork(std::istream& networkModel);
/**
* @brief Creates an executable network from a previously exported network within a specified
* remote context.
*
* @param networkModel Network model stream
* @param context Pointer to RemoteContext object
* @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
* operation
* @return An executable network reference
*/
ExecutableNetwork ImportNetwork(std::istream& networkModel,
const RemoteContext::Ptr& context,
const std::map<std::string, std::string>& config = {});
/**
* @brief Query device if it supports specified network with specified configuration
*
* @param deviceName A name of a device to query
* @param network Network object to query
* @param config Optional map of pairs: (config parameter name, config parameter value)
* @return An object containing a map of pairs a layer name -> a device name supporting this layer.
*/
QueryNetworkResult QueryNetwork(const CNNNetwork& network,
const std::string& deviceName,
const std::map<std::string, std::string>& config = {}) const;
/**
* @brief Sets configuration for device, acceptable keys can be found in ie_plugin_config.hpp
*
* @param deviceName An optional name of a device. If device name is not specified, the config is set for all the
* registered devices.
*
* @param config Map of pairs: (config parameter name, config parameter value)
*/
void SetConfig(const std::map<std::string, std::string>& config, const std::string& deviceName = {});
/**
* @brief Gets configuration dedicated to device behaviour.
*
* The method is targeted to extract information which can be set via SetConfig method.
*
* @param deviceName - A name of a device to get a configuration value.
* @param name - config key.
* @return Value of config corresponding to config key.
*/
Parameter GetConfig(const std::string& deviceName, const std::string& name) const;
/**
* @brief Gets general runtime metric for dedicated hardware.
*
* The method is needed to request common device properties
* which are executable network agnostic. It can be device name, temperature, other devices-specific values.
*
* @param deviceName - A name of a device to get a metric value.
* @param name - metric name to request.
* @param options - optional parameters to get a metric value
* @return Metric value corresponding to metric key.
*/
Parameter GetMetric(const std::string& deviceName, const std::string& name, const ParamMap& options = {}) const;
/**
* @brief Returns devices available for neural networks inference
*
* @return A vector of devices. The devices are returned as { CPU, GPU.0, GPU.1, GNA }
* If there more than one device of specific type, they are enumerated with .# suffix.
*/
std::vector<std::string> GetAvailableDevices() const;
/**
* @brief Register new device and plugin which implement this device inside Inference Engine.
*
* @param plugin Path (absolute or relative) or name of a plugin. Depending on platform, `plugin` is wrapped with
* shared library suffix and prefix to identify library full name
*
* @param deviceName A device name to register plugin for
*/
void RegisterPlugin(const std::string& plugin, const std::string& deviceName);
/**
* @brief Unloads previously loaded plugin with a specified name from Inference Engine
* The method is needed to remove plugin instance and free its resources. If plugin for a
* specified device has not been created before, the method throws an exception.
*
* @param deviceName Device name identifying plugin to remove from Inference Engine
*/
void UnregisterPlugin(const std::string& deviceName);
/** @brief Registers plugin to Inference Engine Core instance using XML configuration file with
* plugins description.
*
* XML file has the following structure:
*
* ```xml
* <ie>
* <plugins>
* <plugin name="" location="">
* <extensions>
* <extension location=""/>
* </extensions>
* <properties>
* <property key="" value=""/>
* </properties>
* </plugin>
* </plugins>
* </ie>
* ```
*
* - `name` identifies name of device enabled by plugin
* - `location` specifies absolute path to dynamic library with plugin. A path can also be relative to inference
* engine shared library. It allows to have common config for different systems with different configurations.
* - Properties are set to plugin via the `SetConfig` method.
* - Extensions are set to plugin via the `AddExtension` method.
*
* @param xmlConfigFile A path to .xml file with plugins to register.
*/
void RegisterPlugins(const std::string& xmlConfigFile);
/**
* @brief Create a new shared context object on specified accelerator device
* using specified plugin-specific low level device API parameters (device handle, pointer, etc.)
* @param deviceName Name of a device to create new shared context on.
* @param params Map of device-specific shared context parameters.
* @return A shared pointer to a created remote context.
*/
RemoteContext::Ptr CreateContext(const std::string& deviceName, const ParamMap& params);
/**
* @brief Get a pointer to default(plugin-supplied) shared context object for specified accelerator device.
* @param deviceName - A name of a device to get create shared context from.
* @return A shared pointer to a default remote context.
*/
RemoteContext::Ptr GetDefaultContext(const std::string& deviceName);
};
/**
* @brief Shut down the OpenVINO by deleting all static-duration objects allocated by the library and releasing
* dependent resources
*
* @note This function should be used by advanced user to control unload the resources.
*
* You might want to use this function if you are developing a dynamically-loaded library which should clean up all
* resources after itself when the library is unloaded.
*/
INFERENCE_ENGINE_API_CPP(void) shutdown();
} // namespace InferenceEngine