- Global InferenceEngine::Blob::Blob (Precision p)
- Use Blob::Blob(const TensorDesc &).
- Global InferenceEngine::Blob::Blob (Precision p, Layout l)
- Use Blob::Blob(const TensorDesc &).
- Global InferenceEngine::Blob::Blob (Precision p, const SizeVector &dims)
- Use Blob::Blob(const TensorDesc &).
- Global InferenceEngine::Blob::Blob (Precision p, Layout l, const SizeVector &dims)
- Use Blob::Blob(const TensorDesc &).
- Global InferenceEngine::Blob::dims () const noexcept
- Use Blob::getTensorDesc and InferenceEngine::TensorDesc::getDims.
- Global InferenceEngine::Blob::layout () const noexcept
- Use Blob::getTensorDesc and InferenceEngine::TensorDesc::getLayout to get the current layout
- Global InferenceEngine::Blob::precision () const noexcept
- Use Blob::getTensorDesc and InferenceEngine::TensorDesc::getPrecision to get the precision
- Global InferenceEngine::Blob::Reshape (const SizeVector &dims, Layout layout=Layout::ANY) noexcept
- The method works with reversed dimensions. Use Blob::getTensorDesc and InferenceEngine::TensorDesc::reshape.
- Global InferenceEngine::Blob::Resize (const SizeVector &dims, Layout layout=Layout::ANY) noexcept
- The method works with reversed dimensions. Create a new blob if you want to change a size.
- Global InferenceEngine::Blob::type () const noexcept
- Use Blob::getTensorDesc and InferenceEngine::TensorDesc::getPrecision to get the precision
- Global InferenceEngine::CNNLayer::GetParamsAsBool (const char *param, bool def) const
- Use CNNLayer::GetParamAsBool
- Global InferenceEngine::CNNNetwork::CNNNetwork (ICNNNetwork *actual)
- Use CNNNetwork::CNNNetwork(std::shared_ptr<ICNNNetwork>) to construct a network
- Global InferenceEngine::CNNNetwork::setTargetDevice (TargetDevice device)
- No needs to specify target device to the network. Use InferenceEngine::Core with target device directly
- Global InferenceEngine::ConvertLayout (Layout sourceLayout, Layout destLayout, const T *sourceBuffer, T *destBuffer, SizeVector dims)
- Please use TensorDesc for conversion
- Global InferenceEngine::Data::creatorLayer
- Use Data::getCreatorLayer
- Global InferenceEngine::Data::dims
- Use Data::getDims
- Global InferenceEngine::Data::inputTo
- Use Data::getInputTo
- Global InferenceEngine::Data::layout
- Use Data::getFormat
- Global InferenceEngine::Data::name
- Use Data::getName
- Global InferenceEngine::Data::precision
- Use Data::getPrecision
- Global InferenceEngine::Data::setBatchSize (size_t batch_size)
- Use Data::setDims to set batch size.
- Global InferenceEngine::Data::userObject
- Use Data::getUserObject
- Global InferenceEngine::findPlugin (const FindPluginRequest &req)
- Deprecated since InferenceEngine::TargetDevice is deprecated
- Global InferenceEngine::findPlugin (const FindPluginRequest &req, FindPluginResponse &result, ResponseDesc *resp) noexcept
- Deprecated since InferenceEngine::TargetDevice is deprecated
- Class InferenceEngine::FindPluginRequest
Deprecated since InferenceEngine::TargetDevice is deprecated
- Class InferenceEngine::FindPluginResponse
Deprecated since InferenceEngine::TargetDevice is deprecated
- Global InferenceEngine::getDeviceName (TargetDevice device)
- Deprecated since InferenceEngine::TargetDevice is deprecated
- Global InferenceEngine::HeteroConfigParams::DECLARE_CONFIG_KEY (HETERO_DUMP_DLA_MESSAGES)
- Use DLIA_CONFIG_KEY(DUMP_SUPPORTED_LAYERS_INFORMATION) FPGA configuration boolean key instead
- Global InferenceEngine::ICNNNetwork::getTargetDevice () const noexcept=0
- Deprecated since TargetDevice is deprecated
- Global InferenceEngine::ICNNNetwork::setBatchSize (const size_t size) noexcept
- Use ICNNNetwork::setBatchSize(size_t, ResponseDesc*)
- Global InferenceEngine::ICNNNetwork::setTargetDevice (TargetDevice device) noexcept=0
- Deprecated since TargetDevice is deprecated. Specify target device in InferenceEngine::Core directly.
- Class InferenceEngine::IHeteroDeviceLoader
- Use InferenceEngine::Core to work with HETERO device
- Global InferenceEngine::IHeteroDeviceLoader::LoadNetwork (const std::string &device, IExecutableNetwork::Ptr &ret, ICNNNetwork &network, const std::map< std::string, std::string > &config, ResponseDesc *resp) noexcept=0
- Use InferenceEngine::Core with HETERO device in InferenceEngine::Core::LoadNetwork.
- Global InferenceEngine::IHeteroDeviceLoader::QueryNetwork (const std::string &device, const ICNNNetwork &network, QueryNetworkResult &res) noexcept
- Use the IHeteroDeviceLoader::QueryNetwork
- Global InferenceEngine::IHeteroDeviceLoader::QueryNetwork (const std::string &device, const ICNNNetwork &network, const std::map< std::string, std::string > &, QueryNetworkResult &res) noexcept=0
- Use InferenceEngine::Core with HETERO device in InferenceEngine::Core::QueryNetwork.
- Class InferenceEngine::IHeteroInferencePlugin
- Use InferenceEngine::Core with HETERO mode in LoadNetwork, QueryNetwork, etc
- Global InferenceEngine::IHeteroInferencePlugin::SetAffinity (ICNNNetwork &network, const std::map< std::string, std::string > &config, ResponseDesc *resp) noexcept=0
- Use InferenceEngine::Core::QueryNetwork with HETERO device and QueryNetworkResult::supportedLayersMap to set affinities to a network
- Global InferenceEngine::IHeteroInferencePlugin::SetDeviceLoader (const std::string &device, IHeteroDeviceLoader::Ptr loader) noexcept=0
- Use InferenceEngine::Core to work with HETERO device Registers device loader for the device
- Global InferenceEngine::IInferencePlugin::GetPerformanceCounts (std::map< std::string, InferenceEngineProfileInfo > &perfMap, ResponseDesc *resp) const noexcept=0
- Use IInferRequest to get performance measures
- Global InferenceEngine::IInferencePlugin::Infer (const Blob &input, Blob &result, ResponseDesc *resp) noexcept=0
- Load IExecutableNetwork to create IInferRequest
- Global InferenceEngine::IInferencePlugin::Infer (const BlobMap &input, BlobMap &result, ResponseDesc *resp) noexcept=0
- Load IExecutableNetwork to create IInferRequest.
- Global InferenceEngine::IInferencePlugin::LoadNetwork (ICNNNetwork &network, ResponseDesc *resp) noexcept=0
- Use IInferencePlugin::LoadNetwork(IExecutableNetwork::Ptr &, ICNNNetwork &, const std::map<std::string, std::string> &, ResponseDesc *)
- Global InferenceEngine::IInferencePlugin::QueryNetwork (const ICNNNetwork &, QueryNetworkResult &res) const noexcept
- Use IInferencePlugin::QueryNetwork(const ICNNNetwork&, const std::map<std::string, std::string> &, QueryNetworkResult&) const
- Global InferenceEngine::ILayerImplFactory::getShapes (const std::vector< TensorDesc > &, std::vector< TensorDesc > &, ResponseDesc *) noexcept
- Implement IShapeInferImpl extension for shape inference.
- Global InferenceEngine::InferencePlugin::GetPerformanceCounts () const
- Use IInferRequest to get performance counters
- Global InferenceEngine::InferencePlugin::Infer (const BlobMap &input, BlobMap &result)
- Use IExecutableNetwork to create IInferRequest.
- Global InferenceEngine::InferencePlugin::LoadNetwork (ICNNNetwork &network)
- Use InferencePlugin::LoadNetwork(ICNNNetwork &, const std::map<std::string, std::string> &)
- Global InferenceEngine::InferencePlugin::operator InferenceEngine::HeteroPluginPtr ()
- Deprecated since HeteroPluginPtr is deprecated
- Global InferenceEngine::InferencePlugin::QueryNetwork (const ICNNNetwork &network, QueryNetworkResult &res) const
- Use InferencePlugin::QueryNetwork(const ICNNNetwork &, const std::map<std::string, std::string> &, QueryNetworkResult &) const
- Global InferenceEngine::InputInfo::getDims () const
- Please use InputInfo::getTensorDesc for working with layouts and dimensions
- Global InferenceEngine::InputInfo::getInputPrecision () const
- Use InputInfo::getPrecision
- Global InferenceEngine::InputInfo::setInputPrecision (Precision p)
- Use InputInfo::setPrecision
- Global InferenceEngine::IShapeInferImpl::inferShapes (const std::vector< SizeVector > &, const std::map< std::string, std::string > &, const std::map< std::string, Blob::Ptr > &, std::vector< SizeVector > &, ResponseDesc *) noexcept
- Use IShapeInferImpl::inferShapes(const std::vector<Blob::CPtr>&, const std::map<std::string, std::string>&, const std::map<std::string, Blob::Ptr>&, std::vector<SizeVector>&, ResponseDesc* ) noexcept.
- Class InferenceEngine::LayoutOffsetCounter
- Uses TensorDesc working with layouts
- Global InferenceEngine::make_shared_blob (Precision p, const SizeVector &dims)
- Use the make_shared_blob signature which accepts TensorDesc
- Global InferenceEngine::make_shared_blob (Precision p, Layout l, SizeVector dims, const std::vector< TypeTo > &arg)
- Use InferenceEngine::make_shared_blob(const TensorDesc&)
- Global InferenceEngine::make_shared_blob (Precision p, Layout l, const std::vector< TypeTo > &arg)
- Use InferenceEngine::make_shared_blob(const TensorDesc&)
- Global InferenceEngine::make_shared_blob (Precision p, const SizeVector &dims, TypeTo *ptr, size_t size=0)
- Use InferenceEngine::make_shared_blob(const TensorDesc&)
- Global InferenceEngine::make_shared_blob (TBlob< TypeTo > &&arg)
- Use InferenceEngine::make_shared_blob(const TensorDesc&)
- Global InferenceEngine::make_shared_blob (Precision p, const TArg &arg)
- Use the make_shared_blob signature which accepts TensorDesc
- Global InferenceEngine::make_shared_blob (Precision p, Layout l, const SizeVector &dims, TypeTo *ptr, size_t size=0)
- Use InferenceEngine::make_shared_blob(const TensorDesc&)
- Global InferenceEngine::make_shared_blob (Precision p, Layout l=NCHW)
- Use InferenceEngine::make_shared_blob(const TensorDesc&)
- Global InferenceEngine::make_shared_blob (Precision p, const std::vector< TypeTo > &arg)
- Use InferenceEngine::make_shared_blob(const TensorDesc&)
- Global InferenceEngine::make_shared_blob (Precision p, Layout l, const SizeVector &dims)
- Use InferenceEngine::make_shared_blob(const TensorDesc&)
- Global InferenceEngine::make_shared_blob (Precision p, Layout l, const TArg &arg)
- Use the make_shared_blob signature which accepts TensorDesc
- Global InferenceEngine::PluginDispatcher::getPluginByDevice (const std::string &deviceName) const
- Use InferenceEngine::Core to work with devices by name
- Global InferenceEngine::PluginDispatcher::getSuitablePlugin (TargetDevice device) const
- Use InferenceEngine::Core to work with devices by name
- Global InferenceEngine::Precision::size () const
- : size of precision will be reported in bits in future releases
- Global InferenceEngine::QueryNetworkResult::supportedLayers
- Use QueryNetworkResult::supportedLayersMap which provides layer -> device mapping
- Class InferenceEngine::TargetDeviceInfo
- Deprecated since InferenceEngine::TargetDevice is deprecated
- Global InferenceEngine::TargetDeviceInfo::fromStr (const std::string &deviceName)
- Deprecated since InferenceEngine::TargetDevice is deprecated
- Global InferenceEngine::TargetDeviceInfo::name (TargetDevice device)
- Deprecated since InferenceEngine::TargetDevice is deprecated
- Global InferenceEngine::TBlob< T, typename >::set (const std::vector< T > &that)
- Deprecated to avoid memcpy() calls. Use TBlob::buffer to get raw pointer and set data
- Global InferenceEngine::TBlob< T, typename >::TBlob (Precision p, Layout l, const SizeVector &dims, T *ptr, size_t data_size=0)
- Use TBlob::TBlob(const TensorDesc&).
- Global InferenceEngine::TBlob< T, typename >::TBlob (Precision p, Layout l)
- Use TBlob::TBlob(const TensorDesc&).
- Global InferenceEngine::TBlob< T, typename >::TBlob (Precision p, Layout l, const SizeVector &dims)
- Use TBlob::TBlob(const TensorDesc&).
- Global InferenceEngine::TBlob< T, typename >::TBlob (Precision p, Layout l, const SizeVector &dims, std::shared_ptr< IAllocator > alloc)
- Use TBlob::TBlob(const TensorDesc&).
- Global InferenceEngine::VPUConfigParams::DECLARE_CONFIG_KEY (VPU_INPUT_BIAS)
- Global InferenceEngine::VPUConfigParams::DECLARE_CONFIG_KEY (VPU_INPUT_NORM)
- Global InferenceEngine::VPUConfigParams::DECLARE_CONFIG_KEY (VPU_FORCE_RESET)
- Use VPU_MYRIAD_CONFIG_KEY(FORCE_RESET) instead.
- Global InferenceEngine::VPUConfigParams::DECLARE_CONFIG_KEY (VPU_PLATFORM)
- Use VPU_MYRIAD_CONFIG_KEY(PLATFORM) instead.