Re: [FFmpeg-devel] [PATCH 2/7] libavfilter: Code style fixes for pointers in DNN module and sr filter.

2018-08-07 Thread Pedro Arthur
2018-08-06 18:11 GMT-03:00 Sergey Lavrushkin :
> Updated patch.
>
> 2018-08-06 17:55 GMT+03:00 Pedro Arthur :
>
>> 2018-08-02 15:52 GMT-03:00 Sergey Lavrushkin :
>> > ---
>> >  libavfilter/dnn_backend_native.c |  84 +++---
>> >  libavfilter/dnn_backend_native.h |   8 +--
>> >  libavfilter/dnn_backend_tf.c | 108 +++---
>> -
>> >  libavfilter/dnn_backend_tf.h |   8 +--
>> >  libavfilter/dnn_espcn.h  |   6 +--
>> >  libavfilter/dnn_interface.c  |   4 +-
>> >  libavfilter/dnn_interface.h  |  16 +++---
>> >  libavfilter/dnn_srcnn.h  |   6 +--
>> >  libavfilter/vf_sr.c  |  60 +++---
>> >  9 files changed, 150 insertions(+), 150 deletions(-)
>> >
>> > diff --git a/libavfilter/dnn_backend_native.c b/libavfilter/dnn_backend_
>> native.c
>> > index 3e6b86280d..baefea7fcb 100644
>> > --- a/libavfilter/dnn_backend_native.c
>> > +++ b/libavfilter/dnn_backend_native.c
>> > @@ -34,15 +34,15 @@ typedef enum {RELU, TANH, SIGMOID} ActivationFunc;
>> >
>> >  typedef struct Layer{
>> >  LayerType type;
>> > -float* output;
>> > -void* params;
>> > +float *output;
>> > +void *params;
>> >  } Layer;
>> >
>> >  typedef struct ConvolutionalParams{
>> >  int32_t input_num, output_num, kernel_size;
>> >  ActivationFunc activation;
>> > -float* kernel;
>> > -float* biases;
>> > +float *kernel;
>> > +float *biases;
>> >  } ConvolutionalParams;
>> >
>> >  typedef struct InputParams{
>> > @@ -55,16 +55,16 @@ typedef struct DepthToSpaceParams{
>> >
>> >  // Represents simple feed-forward convolutional network.
>> >  typedef struct ConvolutionalNetwork{
>> > -Layer* layers;
>> > +Layer *layers;
>> >  int32_t layers_num;
>> >  } ConvolutionalNetwork;
>> >
>> > -static DNNReturnType set_input_output_native(void* model, DNNData*
>> input, DNNData* output)
>> > +static DNNReturnType set_input_output_native(void *model, DNNData
>> *input, DNNData *output)
>> >  {
>> > -ConvolutionalNetwork* network = (ConvolutionalNetwork*)model;
>> > -InputParams* input_params;
>> > -ConvolutionalParams* conv_params;
>> > -DepthToSpaceParams* depth_to_space_params;
>> > +ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
>> > +InputParams *input_params;
>> > +ConvolutionalParams *conv_params;
>> > +DepthToSpaceParams *depth_to_space_params;
>> >  int cur_width, cur_height, cur_channels;
>> >  int32_t layer;
>> >
>> > @@ -72,7 +72,7 @@ static DNNReturnType set_input_output_native(void*
>> model, DNNData* input, DNNDat
>> >  return DNN_ERROR;
>> >  }
>> >  else{
>> > -input_params = (InputParams*)network->layers[0].params;
>> > +input_params = (InputParams *)network->layers[0].params;
>> >  input_params->width = cur_width = input->width;
>> >  input_params->height = cur_height = input->height;
>> >  input_params->channels = cur_channels = input->channels;
>> > @@ -88,14 +88,14 @@ static DNNReturnType set_input_output_native(void*
>> model, DNNData* input, DNNDat
>> >  for (layer = 1; layer < network->layers_num; ++layer){
>> >  switch (network->layers[layer].type){
>> >  case CONV:
>> > -conv_params = (ConvolutionalParams*)network-
>> >layers[layer].params;
>> > +conv_params = (ConvolutionalParams *)network->layers[layer].
>> params;
>> >  if (conv_params->input_num != cur_channels){
>> >  return DNN_ERROR;
>> >  }
>> >  cur_channels = conv_params->output_num;
>> >  break;
>> >  case DEPTH_TO_SPACE:
>> > -depth_to_space_params = (DepthToSpaceParams*)network->
>> layers[layer].params;
>> > +depth_to_space_params = (DepthToSpaceParams
>> *)network->layers[layer].params;
>> >  if (cur_channels % (depth_to_space_params->block_size *
>> depth_to_space_params->block_size) != 0){
>> >  return DNN_ERROR;
>> >  }
>> > @@ -127,16 +127,16 @@ static DNNReturnType set_input_output_native(void*
>> model, DNNData* input, DNNDat
>> >  // layers_num,layer_type,layer_parameterss,layer_type,layer_
>> parameters...
>> >  // For CONV layer: activation_function, input_num, output_num,
>> kernel_size, kernel, biases
>> >  // For DEPTH_TO_SPACE layer: block_size
>> > -DNNModel* ff_dnn_load_model_native(const char* model_filename)
>> > +DNNModel *ff_dnn_load_model_native(const char *model_filename)
>> >  {
>> > -DNNModel* model = NULL;
>> > -ConvolutionalNetwork* network = NULL;
>> > -AVIOContext* model_file_context;
>> > +DNNModel *model = NULL;
>> > +ConvolutionalNetwork *network = NULL;
>> > +AVIOContext *model_file_context;
>> >  int file_size, dnn_size, kernel_size, i;
>> >  int32_t layer;
>> >  LayerType layer_type;
>> > -ConvolutionalParams* conv_params;
>> > -DepthToSpaceParams* 

Re: [FFmpeg-devel] [PATCH 2/7] libavfilter: Code style fixes for pointers in DNN module and sr filter.

2018-08-06 Thread Sergey Lavrushkin
Updated patch.

2018-08-06 17:55 GMT+03:00 Pedro Arthur :

> 2018-08-02 15:52 GMT-03:00 Sergey Lavrushkin :
> > ---
> >  libavfilter/dnn_backend_native.c |  84 +++---
> >  libavfilter/dnn_backend_native.h |   8 +--
> >  libavfilter/dnn_backend_tf.c | 108 +++---
> -
> >  libavfilter/dnn_backend_tf.h |   8 +--
> >  libavfilter/dnn_espcn.h  |   6 +--
> >  libavfilter/dnn_interface.c  |   4 +-
> >  libavfilter/dnn_interface.h  |  16 +++---
> >  libavfilter/dnn_srcnn.h  |   6 +--
> >  libavfilter/vf_sr.c  |  60 +++---
> >  9 files changed, 150 insertions(+), 150 deletions(-)
> >
> > diff --git a/libavfilter/dnn_backend_native.c b/libavfilter/dnn_backend_
> native.c
> > index 3e6b86280d..baefea7fcb 100644
> > --- a/libavfilter/dnn_backend_native.c
> > +++ b/libavfilter/dnn_backend_native.c
> > @@ -34,15 +34,15 @@ typedef enum {RELU, TANH, SIGMOID} ActivationFunc;
> >
> >  typedef struct Layer{
> >  LayerType type;
> > -float* output;
> > -void* params;
> > +float *output;
> > +void *params;
> >  } Layer;
> >
> >  typedef struct ConvolutionalParams{
> >  int32_t input_num, output_num, kernel_size;
> >  ActivationFunc activation;
> > -float* kernel;
> > -float* biases;
> > +float *kernel;
> > +float *biases;
> >  } ConvolutionalParams;
> >
> >  typedef struct InputParams{
> > @@ -55,16 +55,16 @@ typedef struct DepthToSpaceParams{
> >
> >  // Represents simple feed-forward convolutional network.
> >  typedef struct ConvolutionalNetwork{
> > -Layer* layers;
> > +Layer *layers;
> >  int32_t layers_num;
> >  } ConvolutionalNetwork;
> >
> > -static DNNReturnType set_input_output_native(void* model, DNNData*
> input, DNNData* output)
> > +static DNNReturnType set_input_output_native(void *model, DNNData
> *input, DNNData *output)
> >  {
> > -ConvolutionalNetwork* network = (ConvolutionalNetwork*)model;
> > -InputParams* input_params;
> > -ConvolutionalParams* conv_params;
> > -DepthToSpaceParams* depth_to_space_params;
> > +ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
> > +InputParams *input_params;
> > +ConvolutionalParams *conv_params;
> > +DepthToSpaceParams *depth_to_space_params;
> >  int cur_width, cur_height, cur_channels;
> >  int32_t layer;
> >
> > @@ -72,7 +72,7 @@ static DNNReturnType set_input_output_native(void*
> model, DNNData* input, DNNDat
> >  return DNN_ERROR;
> >  }
> >  else{
> > -input_params = (InputParams*)network->layers[0].params;
> > +input_params = (InputParams *)network->layers[0].params;
> >  input_params->width = cur_width = input->width;
> >  input_params->height = cur_height = input->height;
> >  input_params->channels = cur_channels = input->channels;
> > @@ -88,14 +88,14 @@ static DNNReturnType set_input_output_native(void*
> model, DNNData* input, DNNDat
> >  for (layer = 1; layer < network->layers_num; ++layer){
> >  switch (network->layers[layer].type){
> >  case CONV:
> > -conv_params = (ConvolutionalParams*)network-
> >layers[layer].params;
> > +conv_params = (ConvolutionalParams *)network->layers[layer].
> params;
> >  if (conv_params->input_num != cur_channels){
> >  return DNN_ERROR;
> >  }
> >  cur_channels = conv_params->output_num;
> >  break;
> >  case DEPTH_TO_SPACE:
> > -depth_to_space_params = (DepthToSpaceParams*)network->
> layers[layer].params;
> > +depth_to_space_params = (DepthToSpaceParams
> *)network->layers[layer].params;
> >  if (cur_channels % (depth_to_space_params->block_size *
> depth_to_space_params->block_size) != 0){
> >  return DNN_ERROR;
> >  }
> > @@ -127,16 +127,16 @@ static DNNReturnType set_input_output_native(void*
> model, DNNData* input, DNNDat
> >  // layers_num,layer_type,layer_parameterss,layer_type,layer_
> parameters...
> >  // For CONV layer: activation_function, input_num, output_num,
> kernel_size, kernel, biases
> >  // For DEPTH_TO_SPACE layer: block_size
> > -DNNModel* ff_dnn_load_model_native(const char* model_filename)
> > +DNNModel *ff_dnn_load_model_native(const char *model_filename)
> >  {
> > -DNNModel* model = NULL;
> > -ConvolutionalNetwork* network = NULL;
> > -AVIOContext* model_file_context;
> > +DNNModel *model = NULL;
> > +ConvolutionalNetwork *network = NULL;
> > +AVIOContext *model_file_context;
> >  int file_size, dnn_size, kernel_size, i;
> >  int32_t layer;
> >  LayerType layer_type;
> > -ConvolutionalParams* conv_params;
> > -DepthToSpaceParams* depth_to_space_params;
> > +ConvolutionalParams *conv_params;
> > +DepthToSpaceParams *depth_to_space_params;
> >
> >  model = av_malloc(sizeof(DNNModel));
> >   

Re: [FFmpeg-devel] [PATCH 2/7] libavfilter: Code style fixes for pointers in DNN module and sr filter.

2018-08-06 Thread Pedro Arthur
2018-08-02 15:52 GMT-03:00 Sergey Lavrushkin :
> ---
>  libavfilter/dnn_backend_native.c |  84 +++---
>  libavfilter/dnn_backend_native.h |   8 +--
>  libavfilter/dnn_backend_tf.c | 108 
> +++
>  libavfilter/dnn_backend_tf.h |   8 +--
>  libavfilter/dnn_espcn.h  |   6 +--
>  libavfilter/dnn_interface.c  |   4 +-
>  libavfilter/dnn_interface.h  |  16 +++---
>  libavfilter/dnn_srcnn.h  |   6 +--
>  libavfilter/vf_sr.c  |  60 +++---
>  9 files changed, 150 insertions(+), 150 deletions(-)
>
> diff --git a/libavfilter/dnn_backend_native.c 
> b/libavfilter/dnn_backend_native.c
> index 3e6b86280d..baefea7fcb 100644
> --- a/libavfilter/dnn_backend_native.c
> +++ b/libavfilter/dnn_backend_native.c
> @@ -34,15 +34,15 @@ typedef enum {RELU, TANH, SIGMOID} ActivationFunc;
>
>  typedef struct Layer{
>  LayerType type;
> -float* output;
> -void* params;
> +float *output;
> +void *params;
>  } Layer;
>
>  typedef struct ConvolutionalParams{
>  int32_t input_num, output_num, kernel_size;
>  ActivationFunc activation;
> -float* kernel;
> -float* biases;
> +float *kernel;
> +float *biases;
>  } ConvolutionalParams;
>
>  typedef struct InputParams{
> @@ -55,16 +55,16 @@ typedef struct DepthToSpaceParams{
>
>  // Represents simple feed-forward convolutional network.
>  typedef struct ConvolutionalNetwork{
> -Layer* layers;
> +Layer *layers;
>  int32_t layers_num;
>  } ConvolutionalNetwork;
>
> -static DNNReturnType set_input_output_native(void* model, DNNData* input, 
> DNNData* output)
> +static DNNReturnType set_input_output_native(void *model, DNNData *input, 
> DNNData *output)
>  {
> -ConvolutionalNetwork* network = (ConvolutionalNetwork*)model;
> -InputParams* input_params;
> -ConvolutionalParams* conv_params;
> -DepthToSpaceParams* depth_to_space_params;
> +ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
> +InputParams *input_params;
> +ConvolutionalParams *conv_params;
> +DepthToSpaceParams *depth_to_space_params;
>  int cur_width, cur_height, cur_channels;
>  int32_t layer;
>
> @@ -72,7 +72,7 @@ static DNNReturnType set_input_output_native(void* model, 
> DNNData* input, DNNDat
>  return DNN_ERROR;
>  }
>  else{
> -input_params = (InputParams*)network->layers[0].params;
> +input_params = (InputParams *)network->layers[0].params;
>  input_params->width = cur_width = input->width;
>  input_params->height = cur_height = input->height;
>  input_params->channels = cur_channels = input->channels;
> @@ -88,14 +88,14 @@ static DNNReturnType set_input_output_native(void* model, 
> DNNData* input, DNNDat
>  for (layer = 1; layer < network->layers_num; ++layer){
>  switch (network->layers[layer].type){
>  case CONV:
> -conv_params = 
> (ConvolutionalParams*)network->layers[layer].params;
> +conv_params = (ConvolutionalParams 
> *)network->layers[layer].params;
>  if (conv_params->input_num != cur_channels){
>  return DNN_ERROR;
>  }
>  cur_channels = conv_params->output_num;
>  break;
>  case DEPTH_TO_SPACE:
> -depth_to_space_params = 
> (DepthToSpaceParams*)network->layers[layer].params;
> +depth_to_space_params = (DepthToSpaceParams 
> *)network->layers[layer].params;
>  if (cur_channels % (depth_to_space_params->block_size * 
> depth_to_space_params->block_size) != 0){
>  return DNN_ERROR;
>  }
> @@ -127,16 +127,16 @@ static DNNReturnType set_input_output_native(void* 
> model, DNNData* input, DNNDat
>  // layers_num,layer_type,layer_parameterss,layer_type,layer_parameters...
>  // For CONV layer: activation_function, input_num, output_num, kernel_size, 
> kernel, biases
>  // For DEPTH_TO_SPACE layer: block_size
> -DNNModel* ff_dnn_load_model_native(const char* model_filename)
> +DNNModel *ff_dnn_load_model_native(const char *model_filename)
>  {
> -DNNModel* model = NULL;
> -ConvolutionalNetwork* network = NULL;
> -AVIOContext* model_file_context;
> +DNNModel *model = NULL;
> +ConvolutionalNetwork *network = NULL;
> +AVIOContext *model_file_context;
>  int file_size, dnn_size, kernel_size, i;
>  int32_t layer;
>  LayerType layer_type;
> -ConvolutionalParams* conv_params;
> -DepthToSpaceParams* depth_to_space_params;
> +ConvolutionalParams *conv_params;
> +DepthToSpaceParams *depth_to_space_params;
>
>  model = av_malloc(sizeof(DNNModel));
>  if (!model){
> @@ -155,7 +155,7 @@ DNNModel* ff_dnn_load_model_native(const char* 
> model_filename)
>  av_freep();
>  return NULL;
>  }
> -model->model = (void*)network;
> +model->model = (void *)network;
>
>  network->layers_num = 1 + 

[FFmpeg-devel] [PATCH 2/7] libavfilter: Code style fixes for pointers in DNN module and sr filter.

2018-08-02 Thread Sergey Lavrushkin
---
 libavfilter/dnn_backend_native.c |  84 +++---
 libavfilter/dnn_backend_native.h |   8 +--
 libavfilter/dnn_backend_tf.c | 108 +++
 libavfilter/dnn_backend_tf.h |   8 +--
 libavfilter/dnn_espcn.h  |   6 +--
 libavfilter/dnn_interface.c  |   4 +-
 libavfilter/dnn_interface.h  |  16 +++---
 libavfilter/dnn_srcnn.h  |   6 +--
 libavfilter/vf_sr.c  |  60 +++---
 9 files changed, 150 insertions(+), 150 deletions(-)

diff --git a/libavfilter/dnn_backend_native.c b/libavfilter/dnn_backend_native.c
index 3e6b86280d..baefea7fcb 100644
--- a/libavfilter/dnn_backend_native.c
+++ b/libavfilter/dnn_backend_native.c
@@ -34,15 +34,15 @@ typedef enum {RELU, TANH, SIGMOID} ActivationFunc;
 
 typedef struct Layer{
 LayerType type;
-float* output;
-void* params;
+float *output;
+void *params;
 } Layer;
 
 typedef struct ConvolutionalParams{
 int32_t input_num, output_num, kernel_size;
 ActivationFunc activation;
-float* kernel;
-float* biases;
+float *kernel;
+float *biases;
 } ConvolutionalParams;
 
 typedef struct InputParams{
@@ -55,16 +55,16 @@ typedef struct DepthToSpaceParams{
 
 // Represents simple feed-forward convolutional network.
 typedef struct ConvolutionalNetwork{
-Layer* layers;
+Layer *layers;
 int32_t layers_num;
 } ConvolutionalNetwork;
 
-static DNNReturnType set_input_output_native(void* model, DNNData* input, 
DNNData* output)
+static DNNReturnType set_input_output_native(void *model, DNNData *input, 
DNNData *output)
 {
-ConvolutionalNetwork* network = (ConvolutionalNetwork*)model;
-InputParams* input_params;
-ConvolutionalParams* conv_params;
-DepthToSpaceParams* depth_to_space_params;
+ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
+InputParams *input_params;
+ConvolutionalParams *conv_params;
+DepthToSpaceParams *depth_to_space_params;
 int cur_width, cur_height, cur_channels;
 int32_t layer;
 
@@ -72,7 +72,7 @@ static DNNReturnType set_input_output_native(void* model, 
DNNData* input, DNNDat
 return DNN_ERROR;
 }
 else{
-input_params = (InputParams*)network->layers[0].params;
+input_params = (InputParams *)network->layers[0].params;
 input_params->width = cur_width = input->width;
 input_params->height = cur_height = input->height;
 input_params->channels = cur_channels = input->channels;
@@ -88,14 +88,14 @@ static DNNReturnType set_input_output_native(void* model, 
DNNData* input, DNNDat
 for (layer = 1; layer < network->layers_num; ++layer){
 switch (network->layers[layer].type){
 case CONV:
-conv_params = (ConvolutionalParams*)network->layers[layer].params;
+conv_params = (ConvolutionalParams *)network->layers[layer].params;
 if (conv_params->input_num != cur_channels){
 return DNN_ERROR;
 }
 cur_channels = conv_params->output_num;
 break;
 case DEPTH_TO_SPACE:
-depth_to_space_params = 
(DepthToSpaceParams*)network->layers[layer].params;
+depth_to_space_params = (DepthToSpaceParams 
*)network->layers[layer].params;
 if (cur_channels % (depth_to_space_params->block_size * 
depth_to_space_params->block_size) != 0){
 return DNN_ERROR;
 }
@@ -127,16 +127,16 @@ static DNNReturnType set_input_output_native(void* model, 
DNNData* input, DNNDat
 // layers_num,layer_type,layer_parameterss,layer_type,layer_parameters...
 // For CONV layer: activation_function, input_num, output_num, kernel_size, 
kernel, biases
 // For DEPTH_TO_SPACE layer: block_size
-DNNModel* ff_dnn_load_model_native(const char* model_filename)
+DNNModel *ff_dnn_load_model_native(const char *model_filename)
 {
-DNNModel* model = NULL;
-ConvolutionalNetwork* network = NULL;
-AVIOContext* model_file_context;
+DNNModel *model = NULL;
+ConvolutionalNetwork *network = NULL;
+AVIOContext *model_file_context;
 int file_size, dnn_size, kernel_size, i;
 int32_t layer;
 LayerType layer_type;
-ConvolutionalParams* conv_params;
-DepthToSpaceParams* depth_to_space_params;
+ConvolutionalParams *conv_params;
+DepthToSpaceParams *depth_to_space_params;
 
 model = av_malloc(sizeof(DNNModel));
 if (!model){
@@ -155,7 +155,7 @@ DNNModel* ff_dnn_load_model_native(const char* 
model_filename)
 av_freep();
 return NULL;
 }
-model->model = (void*)network;
+model->model = (void *)network;
 
 network->layers_num = 1 + (int32_t)avio_rl32(model_file_context);
 dnn_size = 4;
@@ -251,10 +251,10 @@ DNNModel* ff_dnn_load_model_native(const char* 
model_filename)
 return model;
 }
 
-static int set_up_conv_layer(Layer* layer, const float* kernel, const float* 
biases, ActivationFunc activation,
+static