Re: [FFmpeg-devel] [PATCH 2/2] lavfi/dnn_backend_ov: Rename RequestItem to OVRequestItem

2021-07-21 Thread Guo, Yejun


> -Original Message-
> From: ffmpeg-devel  On Behalf Of
> Shubhanshu Saxena
> Sent: 2021年7月12日 0:15
> To: ffmpeg-devel@ffmpeg.org
> Cc: Shubhanshu Saxena 
> Subject: [FFmpeg-devel] [PATCH 2/2] lavfi/dnn_backend_ov: Rename
> RequestItem to OVRequestItem
> 
> Rename RequestItem to OVRequestItem in the OpenVINO backend to avoid
> confusion.
> 
> Signed-off-by: Shubhanshu Saxena 
> ---
>  libavfilter/dnn/dnn_backend_openvino.c | 24 
>  1 file changed, 12 insertions(+), 12 deletions(-)
> 
> diff --git a/libavfilter/dnn/dnn_backend_openvino.c
> b/libavfilter/dnn/dnn_backend_openvino.c
> index b340859c12..f8d548feaf 100644
> --- a/libavfilter/dnn/dnn_backend_openvino.c
> +++ b/libavfilter/dnn/dnn_backend_openvino.c
> @@ -54,18 +54,18 @@ typedef struct OVModel{
>  ie_core_t *core;
>  ie_network_t *network;
>  ie_executable_network_t *exe_network;
> -SafeQueue *request_queue;   // holds RequestItem
> +SafeQueue *request_queue;   // holds OVRequestItem
>  Queue *task_queue;  // holds TaskItem
>  Queue *inference_queue; // holds InferenceItem
>  } OVModel;
> 
>  // one request for one call to openvino -typedef struct RequestItem {
> +typedef struct OVRequestItem {
>  ie_infer_request_t *infer_request;
>  InferenceItem **inferences;
>  uint32_t inference_count;
>  ie_complete_call_back_t callback;
> -} RequestItem;
> +} OVRequestItem;
> 
>  #define APPEND_STRING(generated_string, iterate_string)
> \
>  generated_string = generated_string ? av_asprintf("%s %s",
> generated_string, iterate_string) : \ @@ -111,7 +111,7 @@ static int
> get_datatype_size(DNNDataType dt)
>  }
>  }
> 
> -static DNNReturnType fill_model_input_ov(OVModel *ov_model,
> RequestItem *request)
> +static DNNReturnType fill_model_input_ov(OVModel *ov_model,
> +OVRequestItem *request)
>  {
>  dimensions_t dims;
>  precision_e precision;
> @@ -198,7 +198,7 @@ static void infer_completion_callback(void *args)
>  dimensions_t dims;
>  precision_e precision;
>  IEStatusCode status;
> -RequestItem *request = args;
> +OVRequestItem *request = args;
>  InferenceItem *inference = request->inferences[0];
>  TaskItem *task = inference->task;
>  OVModel *ov_model = task->model;
> @@ -381,7 +381,7 @@ static DNNReturnType init_model_ov(OVModel
> *ov_model, const char *input_name, co
>  }
> 
>  for (int i = 0; i < ctx->options.nireq; i++) {
> -RequestItem *item = av_mallocz(sizeof(*item));
> +OVRequestItem *item = av_mallocz(sizeof(*item));
>  if (!item) {
>  goto err;
>  }
> @@ -422,7 +422,7 @@ err:
>  return DNN_ERROR;
>  }
> 
> -static DNNReturnType execute_model_ov(RequestItem *request, Queue
> *inferenceq)
> +static DNNReturnType execute_model_ov(OVRequestItem *request,
> Queue
> +*inferenceq)
>  {
>  IEStatusCode status;
>  DNNReturnType ret;
> @@ -639,7 +639,7 @@ static DNNReturnType get_output_ov(void *model,
> const char *input_name, int inpu
>  OVModel *ov_model = model;
>  OVContext *ctx = _model->ctx;
>  TaskItem task;
> -RequestItem *request;
> +OVRequestItem *request;
>  AVFrame *in_frame = NULL;
>  AVFrame *out_frame = NULL;
>  IEStatusCode status;
> @@ -779,7 +779,7 @@ DNNReturnType ff_dnn_execute_model_ov(const
> DNNModel *model, DNNExecBaseParams *
>  OVModel *ov_model = model->model;
>  OVContext *ctx = _model->ctx;
>  TaskItem task;
> -RequestItem *request;
> +OVRequestItem *request;
> 
>  if (ff_check_exec_params(ctx, DNN_OV, model->func_type,
> exec_params) != 0) {
>  return DNN_ERROR;
> @@ -827,7 +827,7 @@ DNNReturnType
> ff_dnn_execute_model_async_ov(const DNNModel *model,
> DNNExecBasePa  {
>  OVModel *ov_model = model->model;
>  OVContext *ctx = _model->ctx;
> -RequestItem *request;
> +OVRequestItem *request;
>  TaskItem *task;
>  DNNReturnType ret;
> 
> @@ -904,7 +904,7 @@ DNNReturnType ff_dnn_flush_ov(const DNNModel
> *model)  {
>  OVModel *ov_model = model->model;
>  OVContext *ctx = _model->ctx;
> -RequestItem *request;
> +OVRequestItem *request;
>  IEStatusCode status;
>  DNNReturnType ret;
> 
> @@ -943,7 +943,7 @@ void ff_dnn_free_model_ov(DNNModel **model)
>  if (*model){
>  OVModel *ov_model = (*model)->model;
>  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
> -RequestItem *item = ff_safe_queue_pop_front(ov_model-
> >request_queu

[FFmpeg-devel] [PATCH 2/2] lavfi/dnn_backend_ov: Rename RequestItem to OVRequestItem

2021-07-11 Thread Shubhanshu Saxena
Rename RequestItem to OVRequestItem in the OpenVINO backend
to avoid confusion.

Signed-off-by: Shubhanshu Saxena 
---
 libavfilter/dnn/dnn_backend_openvino.c | 24 
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_openvino.c 
b/libavfilter/dnn/dnn_backend_openvino.c
index b340859c12..f8d548feaf 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -54,18 +54,18 @@ typedef struct OVModel{
 ie_core_t *core;
 ie_network_t *network;
 ie_executable_network_t *exe_network;
-SafeQueue *request_queue;   // holds RequestItem
+SafeQueue *request_queue;   // holds OVRequestItem
 Queue *task_queue;  // holds TaskItem
 Queue *inference_queue; // holds InferenceItem
 } OVModel;
 
 // one request for one call to openvino
-typedef struct RequestItem {
+typedef struct OVRequestItem {
 ie_infer_request_t *infer_request;
 InferenceItem **inferences;
 uint32_t inference_count;
 ie_complete_call_back_t callback;
-} RequestItem;
+} OVRequestItem;
 
 #define APPEND_STRING(generated_string, iterate_string)
\
 generated_string = generated_string ? av_asprintf("%s %s", 
generated_string, iterate_string) : \
@@ -111,7 +111,7 @@ static int get_datatype_size(DNNDataType dt)
 }
 }
 
-static DNNReturnType fill_model_input_ov(OVModel *ov_model, RequestItem 
*request)
+static DNNReturnType fill_model_input_ov(OVModel *ov_model, OVRequestItem 
*request)
 {
 dimensions_t dims;
 precision_e precision;
@@ -198,7 +198,7 @@ static void infer_completion_callback(void *args)
 dimensions_t dims;
 precision_e precision;
 IEStatusCode status;
-RequestItem *request = args;
+OVRequestItem *request = args;
 InferenceItem *inference = request->inferences[0];
 TaskItem *task = inference->task;
 OVModel *ov_model = task->model;
@@ -381,7 +381,7 @@ static DNNReturnType init_model_ov(OVModel *ov_model, const 
char *input_name, co
 }
 
 for (int i = 0; i < ctx->options.nireq; i++) {
-RequestItem *item = av_mallocz(sizeof(*item));
+OVRequestItem *item = av_mallocz(sizeof(*item));
 if (!item) {
 goto err;
 }
@@ -422,7 +422,7 @@ err:
 return DNN_ERROR;
 }
 
-static DNNReturnType execute_model_ov(RequestItem *request, Queue *inferenceq)
+static DNNReturnType execute_model_ov(OVRequestItem *request, Queue 
*inferenceq)
 {
 IEStatusCode status;
 DNNReturnType ret;
@@ -639,7 +639,7 @@ static DNNReturnType get_output_ov(void *model, const char 
*input_name, int inpu
 OVModel *ov_model = model;
 OVContext *ctx = _model->ctx;
 TaskItem task;
-RequestItem *request;
+OVRequestItem *request;
 AVFrame *in_frame = NULL;
 AVFrame *out_frame = NULL;
 IEStatusCode status;
@@ -779,7 +779,7 @@ DNNReturnType ff_dnn_execute_model_ov(const DNNModel 
*model, DNNExecBaseParams *
 OVModel *ov_model = model->model;
 OVContext *ctx = _model->ctx;
 TaskItem task;
-RequestItem *request;
+OVRequestItem *request;
 
 if (ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params) != 0) 
{
 return DNN_ERROR;
@@ -827,7 +827,7 @@ DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel 
*model, DNNExecBasePa
 {
 OVModel *ov_model = model->model;
 OVContext *ctx = _model->ctx;
-RequestItem *request;
+OVRequestItem *request;
 TaskItem *task;
 DNNReturnType ret;
 
@@ -904,7 +904,7 @@ DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
 {
 OVModel *ov_model = model->model;
 OVContext *ctx = _model->ctx;
-RequestItem *request;
+OVRequestItem *request;
 IEStatusCode status;
 DNNReturnType ret;
 
@@ -943,7 +943,7 @@ void ff_dnn_free_model_ov(DNNModel **model)
 if (*model){
 OVModel *ov_model = (*model)->model;
 while (ff_safe_queue_size(ov_model->request_queue) != 0) {
-RequestItem *item = 
ff_safe_queue_pop_front(ov_model->request_queue);
+OVRequestItem *item = 
ff_safe_queue_pop_front(ov_model->request_queue);
 if (item && item->infer_request) {
 ie_infer_request_free(>infer_request);
 }
-- 
2.25.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".