summaryrefslogtreecommitdiffstats
path: root/libavfilter/dnn
diff options
context:
space:
mode:
authorGuo, Yejun <yejun.guo@intel.com>2019-10-21 20:38:10 +0800
committerPedro Arthur <bygrandao@gmail.com>2019-10-30 11:00:41 -0300
commite1b45b85963b5aa9d67e23638ef9b045e7fbd875 (patch)
tree8f42ca165f693649ea2ec8f6f9a8e62c1a505027 /libavfilter/dnn
parentdff39ea9f0154ec52b7548b122a4a5332df3c2c6 (diff)
downloadffmpeg-streaming-e1b45b85963b5aa9d67e23638ef9b045e7fbd875.zip
ffmpeg-streaming-e1b45b85963b5aa9d67e23638ef9b045e7fbd875.tar.gz
avfilter/dnn: get the data type of network output from dnn execution result
so, we can make a filter more general to accept different network models, by adding a data type convertion after getting data from network. After we add dt field into struct DNNData, it becomes the same as DNNInputData, so merge them with one struct: DNNData. Signed-off-by: Guo, Yejun <yejun.guo@intel.com> Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
Diffstat (limited to 'libavfilter/dnn')
-rw-r--r--libavfilter/dnn/dnn_backend_native.c3
-rw-r--r--libavfilter/dnn/dnn_backend_native_layer_conv2d.c1
-rw-r--r--libavfilter/dnn/dnn_backend_native_layer_depth2space.c1
-rw-r--r--libavfilter/dnn/dnn_backend_native_layer_pad.c1
-rw-r--r--libavfilter/dnn/dnn_backend_tf.c5
5 files changed, 8 insertions, 3 deletions
diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
index ff280b5..add1db4 100644
--- a/libavfilter/dnn/dnn_backend_native.c
+++ b/libavfilter/dnn/dnn_backend_native.c
@@ -28,7 +28,7 @@
#include "dnn_backend_native_layer_conv2d.h"
#include "dnn_backend_native_layers.h"
-static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
+static DNNReturnType set_input_output_native(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output)
{
ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
DnnOperand *oprd = NULL;
@@ -263,6 +263,7 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
outputs[i].height = oprd->dims[1];
outputs[i].width = oprd->dims[2];
outputs[i].channels = oprd->dims[3];
+ outputs[i].dt = oprd->data_type;
}
return DNN_SUCCESS;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
index 6ec0fa7..7b29697 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
@@ -106,6 +106,7 @@ int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_
output_operand->dims[1] = height - pad_size * 2;
output_operand->dims[2] = width - pad_size * 2;
output_operand->dims[3] = conv_params->output_num;
+ output_operand->data_type = operands[input_operand_index].data_type;
output_operand->length = calculate_operand_data_length(output_operand);
output_operand->data = av_realloc(output_operand->data, output_operand->length);
if (!output_operand->data)
diff --git a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
index 174676e..7dab19d 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
@@ -69,6 +69,7 @@ int dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_ope
output_operand->dims[1] = height * block_size;
output_operand->dims[2] = width * block_size;
output_operand->dims[3] = new_channels;
+ output_operand->data_type = operands[input_operand_index].data_type;
output_operand->length = calculate_operand_data_length(output_operand);
output_operand->data = av_realloc(output_operand->data, output_operand->length);
if (!output_operand->data)
diff --git a/libavfilter/dnn/dnn_backend_native_layer_pad.c b/libavfilter/dnn/dnn_backend_native_layer_pad.c
index 8fa35de..8e5959b 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_pad.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_pad.c
@@ -105,6 +105,7 @@ int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_ind
output_operand->dims[1] = new_height;
output_operand->dims[2] = new_width;
output_operand->dims[3] = new_channel;
+ output_operand->data_type = operands[input_operand_index].data_type;
output_operand->length = calculate_operand_data_length(output_operand);
output_operand->data = av_realloc(output_operand->data, output_operand->length);
if (!output_operand->data)
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index c8dff51..ed91d05 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -83,7 +83,7 @@ static TF_Buffer *read_graph(const char *model_filename)
return graph_buf;
}
-static TF_Tensor *allocate_input_tensor(const DNNInputData *input)
+static TF_Tensor *allocate_input_tensor(const DNNData *input)
{
TF_DataType dt;
size_t size;
@@ -105,7 +105,7 @@ static TF_Tensor *allocate_input_tensor(const DNNInputData *input)
input_dims[1] * input_dims[2] * input_dims[3] * size);
}
-static DNNReturnType set_input_output_tf(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
+static DNNReturnType set_input_output_tf(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output)
{
TFModel *tf_model = (TFModel *)model;
TF_SessionOptions *sess_opts;
@@ -603,6 +603,7 @@ DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, u
outputs[i].width = TF_Dim(tf_model->output_tensors[i], 2);
outputs[i].channels = TF_Dim(tf_model->output_tensors[i], 3);
outputs[i].data = TF_TensorData(tf_model->output_tensors[i]);
+ outputs[i].dt = TF_TensorType(tf_model->output_tensors[i]);
}
return DNN_SUCCESS;
OpenPOWER on IntegriCloud