1
mirror of https://git.videolan.org/git/ffmpeg.git synced 2024-07-11 23:27:55 +02:00
ffmpeg/libavfilter/dnn_interface.h
Guo, Yejun ff37ebaf30 dnn: add openvino as one of dnn backend
OpenVINO is a Deep Learning Deployment Toolkit at
https://github.com/openvinotoolkit/openvino, it supports CPU, GPU
and heterogeneous plugins to accelerate deep learning inferencing.

Please refer to https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md
to build openvino (c library is built at the same time). Please add
option -DENABLE_MKL_DNN=ON for cmake to enable CPU path. The header
files and libraries are installed to /usr/local/deployment_tools/inference_engine/
with default options on my system.

To build FFmpeg with openvion, take my system as an example, run with:
$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/deployment_tools/inference_engine/lib/intel64/:/usr/local/deployment_tools/inference_engine/external/tbb/lib/
$ ../ffmpeg/configure --enable-libopenvino --extra-cflags=-I/usr/local/deployment_tools/inference_engine/include/ --extra-ldflags=-L/usr/local/deployment_tools/inference_engine/lib/intel64
$ make

Here are the features provided by OpenVINO inference engine:
- support more DNN model formats
It supports TensorFlow, Caffe, ONNX, MXNet and Kaldi by converting them
into OpenVINO format with a python script. And torth model
can be first converted into ONNX and then to OpenVINO format.

see the script at https://github.com/openvinotoolkit/openvino/tree/master/model-optimizer/mo.py
which also does some optimization at model level.

- optimize at inference stage
It optimizes for X86 CPUs with SSE, AVX etc.

It also optimizes based on OpenCL for Intel GPUs.
(only Intel GPU supported becuase Intel OpenCL extension is used for optimization)

Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
2020-07-02 09:36:34 +08:00

68 lines
2.4 KiB
C

/*
* Copyright (c) 2018 Sergey Lavrushkin
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DNN inference engine interface.
*/
#ifndef AVFILTER_DNN_INTERFACE_H
#define AVFILTER_DNN_INTERFACE_H
#include <stdint.h>
typedef enum {DNN_SUCCESS, DNN_ERROR} DNNReturnType;
typedef enum {DNN_NATIVE, DNN_TF, DNN_OV} DNNBackendType;
typedef enum {DNN_FLOAT = 1, DNN_UINT8 = 4} DNNDataType;
typedef struct DNNData{
void *data;
DNNDataType dt;
int width, height, channels;
} DNNData;
typedef struct DNNModel{
// Stores model that can be different for different backends.
void *model;
// Gets model input information
// Just reuse struct DNNData here, actually the DNNData.data field is not needed.
DNNReturnType (*get_input)(void *model, DNNData *input, const char *input_name);
// Sets model input and output.
// Should be called at least once before model execution.
DNNReturnType (*set_input_output)(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output);
} DNNModel;
// Stores pointers to functions for loading, executing, freeing DNN models for one of the backends.
typedef struct DNNModule{
// Loads model and parameters from given file. Returns NULL if it is not possible.
DNNModel *(*load_model)(const char *model_filename);
// Executes model with specified input and output. Returns DNN_ERROR otherwise.
DNNReturnType (*execute_model)(const DNNModel *model, DNNData *outputs, uint32_t nb_output);
// Frees memory allocated for model.
void (*free_model)(DNNModel **model);
} DNNModule;
// Initializes DNNModule depending on chosen backend.
DNNModule *ff_get_dnn_module(DNNBackendType backend_type);
#endif