ie_plugin.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief A header file for Main Inference Engine API
7  * @file ie_plugin.hpp
8  */
9 #pragma once
10 
11 #include <ie_icnn_network.hpp>
12 #include <ie_iextension.h>
13 #include "ie_api.h"
14 #include "details/ie_no_copy.hpp"
15 #include "ie_error.hpp"
16 #include "ie_version.hpp"
18 #include <string>
19 #include <vector>
20 #include <memory>
21 #include <map>
22 #include <set>
23 
24 
25 #if defined(_WIN32)
26  #ifdef IMPLEMENT_INFERENCE_ENGINE_PLUGIN
27  #define INFERENCE_PLUGIN_API(type) extern "C" __declspec(dllexport) type
28  #else
29  #define INFERENCE_PLUGIN_API(type) extern "C" type
30  #endif
31 #elif(__GNUC__ >= 4)
32  #ifdef IMPLEMENT_INFERENCE_ENGINE_PLUGIN
33  #define INFERENCE_PLUGIN_API(type) extern "C" __attribute__((visibility("default"))) type
34  #else
35  #define INFERENCE_PLUGIN_API(type) extern "C" type
36  #endif
37 #else
38  #define INFERENCE_PLUGIN_API(TYPE) extern "C" TYPE
39 #endif
40 
41 namespace InferenceEngine {
42 
43 /**
44  * @brief Responce structure encapsulating information about supported layer
45  */
46 struct INFERENCE_ENGINE_API_CLASS(QueryNetworkResult) {
47  /**
48  * @deprecated Use QueryNetworkResult::supportedLayersMap which provides layer -> device mapping
49  * @brief Set of supported layers by specific device
50  */
51  INFERENCE_ENGINE_DEPRECATED
52  std::set<std::string> supportedLayers;
53 
54  /**
55  * @brief A map of supported layers:
56  * - key - a layer name
57  * - value - a device name on which layer is assigned
58  */
59  std::map<std::string, std::string> supportedLayersMap;
60 
61  /**
62  * @brief A status code
63  */
65 
66  /**
67  * @brief Response mssage
68  */
70 
71  /**
72  * @brief A default constructor
73  */
75 
76  /**
77  * @brief A copy constructor
78  * @param q Value to copy from
79  */
81 
82  /**
83  * @brief A copy assignment operator
84  * @param q A value to copy from
85  */
86  const QueryNetworkResult & operator= (const QueryNetworkResult & q);
87 
88  /**
89  * @brief A move assignment operator
90  * @param q A value to move from
91  */
92  QueryNetworkResult & operator= (QueryNetworkResult && q);
93 
94  /**
95  * @brief A desctructor
96  */
98 };
99 
100 /**
101  * @brief This class is a main plugin interface
102  */
103 class IInferencePlugin : public details::IRelease {
104 public:
105  /**
106  * @brief Returns plugin version information
107  * @param versionInfo Pointer to version info. Is set by plugin
108  */
109  virtual void GetVersion(const Version *&versionInfo) noexcept = 0;
110 
111  /**
112  * @brief Sets logging callback
113  * Logging is used to track what is going on inside
114  * @param listener Logging sink
115  */
116  virtual void SetLogCallback(IErrorListener &listener) noexcept = 0;
117 
118  /**
119  * @deprecated Use IInferencePlugin::LoadNetwork(IExecutableNetwork::Ptr &, ICNNNetwork &, const std::map<std::string, std::string> &, ResponseDesc *)
120  * @brief Loads a pre-built network with weights to the engine. In case of success the plugin will
121  * be ready to infer
122  * @param network Network object acquired from CNNNetReader
123  * @param resp Pointer to the response message that holds a description of an error if any occurred
124  * @return Status code of the operation. OK if succeeded
125  */
126  INFERENCE_ENGINE_DEPRECATED
127  virtual StatusCode LoadNetwork(ICNNNetwork &network, ResponseDesc *resp) noexcept = 0;
128 
129  /**
130  * @brief Creates an executable network from a network object. User can create as many networks as they need and use
131  * them simultaneously (up to the limitation of the hardware resources)
132  * @param ret Reference to a shared ptr of the returned network interface
133  * @param network Network object acquired from CNNNetReader
134  * @param config Map of pairs: (config parameter name, config parameter value) relevant only for this load operation
135  * @param resp Pointer to the response message that holds a description of an error if any occurred
136  * @return Status code of the operation. OK if succeeded
137  */
138  virtual StatusCode
139  LoadNetwork(IExecutableNetwork::Ptr &ret, ICNNNetwork &network, const std::map<std::string, std::string> &config,
140  ResponseDesc *resp) noexcept = 0;
141 
142  /**
143  * @brief Creates an executable network from a previously exported network
144  * @param ret Reference to a shared ptr of the returned network interface
145  * @param modelFileName Path to the location of the exported file
146  * @param config Map of pairs: (config parameter name, config parameter value) relevant only for this load operation*
147  * @param resp Pointer to the response message that holds a description of an error if any occurred
148  * @return Status code of the operation. OK if succeeded
149  */
150  virtual StatusCode
151  ImportNetwork(IExecutableNetwork::Ptr &ret, const std::string &modelFileName,
152  const std::map<std::string, std::string> &config, ResponseDesc *resp) noexcept = 0;
153 
154  /**
155  * @deprecated Load IExecutableNetwork to create IInferRequest
156  * @brief Infers an image(s).
157  * Input and output dimensions depend on the topology.
158  * As an example for classification topologies use a 4D Blob as input (batch, channels, width,
159  * height) and get a 1D blob as output (scoring probability vector). To Infer a batch,
160  * use a 4D Blob as input and get a 2D blob as output in both cases the method will
161  * allocate the resulted blob
162  * @param input Any TBlob<> object that contains the data to infer. The type of TBlob must match the network input precision and size.
163  * @param result Related TBlob<> object that contains the result of the inference action, typically this is a float blob.
164  The blob does not need to be allocated or initialized, the engine allocates the relevant data.
165  * @param resp Pointer to the response message that holds a description of an error if any occurred
166  * @return Status code of the operation. OK if succeeded
167  */
168  INFERENCE_ENGINE_DEPRECATED
169  virtual StatusCode Infer(const Blob &input, Blob &result, ResponseDesc *resp) noexcept = 0;
170 
171  /**
172  * @deprecated Load IExecutableNetwork to create IInferRequest.
173  * @brief Infers tensors. Input and output dimensions depend on the topology.
174  * As an example for classification topologies use a 4D Blob as input (batch, channels, width,
175  * height) and get a 1D blob as output (scoring probability vector). To Infer a batch,
176  * use a 4D Blob as input and get a 2D blob as output in both cases the method will
177  * allocate the resulted blob
178  * @param input Map of input blobs accessed by input names
179  * @param result Map of output blobs accessed by output names
180  * @param resp Pointer to the response message that holds a description of an error if any occurred
181  * @return Status code of the operation. OK if succeeded
182  */
183  INFERENCE_ENGINE_DEPRECATED
184  virtual StatusCode Infer(const BlobMap &input, BlobMap &result, ResponseDesc *resp) noexcept = 0;
185 
186  /**
187  * @deprecated Use IInferRequest to get performance measures
188  * @brief Queries performance measures per layer to get feedback of what is the most time consuming layer
189  * Note: not all plugins provide meaningful data
190  * @param perfMap Map of layer names to profiling information for that layer
191  * @param resp Pointer to the response message that holds a description of an error if any occurred
192  * @return Status code of the operation. OK if succeeded
193  */
194  INFERENCE_ENGINE_DEPRECATED
195  virtual StatusCode GetPerformanceCounts(std::map<std::string, InferenceEngineProfileInfo> &perfMap,
196  ResponseDesc *resp) const noexcept = 0;
197 
198  /**
199  * @brief Registers extension within the plugin
200  * @param extension Pointer to already loaded extension
201  * @param resp Pointer to the response message that holds a description of an error if any occurred
202  * @return Status code of the operation. OK if succeeded
203  */
204  virtual StatusCode AddExtension(InferenceEngine::IExtensionPtr extension,
205  InferenceEngine::ResponseDesc *resp) noexcept = 0;
206 
207  /**
208  * @brief Sets configuration for plugin, acceptable keys can be found in ie_plugin_config.hpp
209  * @param config Map of pairs: (config parameter name, config parameter value)
210  * @param resp Pointer to the response message that holds a description of an error if any occurred
211  * @return Status code of the operation. OK if succeeded
212  */
213  virtual StatusCode SetConfig(const std::map<std::string, std::string> &config, ResponseDesc *resp) noexcept = 0;
214 
215 
216  /**
217  * @deprecated Use IInferencePlugin::QueryNetwork(const ICNNNetwork&, const std::map<std::string, std::string> &, QueryNetworkResult&) const
218  * @brief Query plugin if it supports specified network
219  * @param network Network object to query
220  * @param res Reference to query network result
221  */
222  INFERENCE_ENGINE_DEPRECATED
223  virtual void QueryNetwork(const ICNNNetwork& /*network*/, QueryNetworkResult& res) const noexcept {
224  res.rc = InferenceEngine::NOT_IMPLEMENTED;
225  }
226 
227  /**
228  * @brief Query plugin if it supports specified network with specified configuration
229  * @param network Network object to query
230  * @param config Map of pairs: (config parameter name, config parameter value)
231  * @param res Reference to query network result
232  */
233  virtual void QueryNetwork(const ICNNNetwork& /*network*/,
234  const std::map<std::string, std::string> &/*config*/, QueryNetworkResult& res) const noexcept {
235  res.rc = InferenceEngine::NOT_IMPLEMENTED;
236  }
237 };
238 
239 /**
240  * @brief Creates the default instance of the interface (per plugin)
241  * @param plugin Pointer to the plugin
242  * @param resp Pointer to the response message that holds a description of an error if any occurred
243  * @return Status code of the operation. OK if succeeded
244  */
245 INFERENCE_PLUGIN_API(StatusCode) CreatePluginEngine(IInferencePlugin *&plugin, ResponseDesc *resp) noexcept;
246 } // namespace InferenceEngine
std::map< std::string, std::string > supportedLayersMap
A map of supported layers:
Definition: ie_plugin.hpp:59
A header file that provides versioning information for the inference engine shared library...
A header file for a plugin logging mechanism.
Definition: ie_argmax_layer.hpp:11
Represents version information that describes plugins and the inference engine runtime library...
Definition: ie_version.hpp:20
a header file for IExecutableNetwork interface
ResponseDesc resp
Response mssage.
Definition: ie_plugin.hpp:69
StatusCode
This enum contains codes for all possible return values of the interface functions.
Definition: ie_common.h:205
This is a header file for the ICNNNetwork class.
Represents detailed information for an error.
Definition: ie_common.h:228
StatusCode CreatePluginEngine(IInferencePlugin *&plugin, ResponseDesc *resp) noexcept
Creates the default instance of the interface (per plugin)
virtual void QueryNetwork(const ICNNNetwork &, QueryNetworkResult &res) const noexcept
Query plugin if it supports specified network.
Definition: ie_plugin.hpp:223
This class is a main plugin interface.
Definition: ie_plugin.hpp:103
This is the main interface to describe the NN topology.
Definition: ie_icnn_network.hpp:35
virtual void QueryNetwork(const ICNNNetwork &, const std::map< std::string, std::string > &, QueryNetworkResult &res) const noexcept
Query plugin if it supports specified network with specified configuration.
Definition: ie_plugin.hpp:233
std::set< std::string > supportedLayers
Set of supported layers by specific device.
Definition: ie_plugin.hpp:52
StatusCode rc
A status code.
Definition: ie_plugin.hpp:64
std::map< std::string, Blob::Ptr > BlobMap
This is a convenient type for working with a map containing pairs(string, pointer to a Blob instance)...
Definition: ie_blob.h:478
The macro defines a symbol import/export mechanism essential for Microsoft Windows(R) OS...
This class represents a universal container in the Inference Engine.
Definition: ie_blob.h:35
This is a header file for Inference Engine Extension Interface.
This class represents a custom error listener. Plugin consumers can provide it via InferenceEngine::S...
Definition: ie_error.hpp:16
header file for no_copy class
std::shared_ptr< IExecutableNetwork > Ptr
A smart pointer to the current IExecutableNetwork object.
Definition: ie_iexecutable_network.hpp:38
Responce structure encapsulating information about supported layer.
Definition: ie_plugin.hpp:46