18735c025cc8e4ce3aecd66611aa50a9860da728
[ffmpeg.git] / libavfilter / dnn_backend_native.c
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * DNN native backend implementation.
24  */
25
26 #include "dnn_backend_native.h"
27
28 static DNNReturnType set_input_output_native(void *model, DNNData *input, const char *input_name, const char *output_name)
29 {
30     ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
31     InputParams *input_params;
32     ConvolutionalParams *conv_params;
33     DepthToSpaceParams *depth_to_space_params;
34     int cur_width, cur_height, cur_channels;
35     int32_t layer;
36
37     if (network->layers_num <= 0 || network->layers[0].type != INPUT){
38         return DNN_ERROR;
39     }
40     else{
41         input_params = (InputParams *)network->layers[0].params;
42         input_params->width = cur_width = input->width;
43         input_params->height = cur_height = input->height;
44         input_params->channels = cur_channels = input->channels;
45         if (input->data){
46             av_freep(&input->data);
47         }
48         network->layers[0].output = input->data = av_malloc(cur_height * cur_width * cur_channels * sizeof(float));
49         if (!network->layers[0].output){
50             return DNN_ERROR;
51         }
52     }
53
54     for (layer = 1; layer < network->layers_num; ++layer){
55         switch (network->layers[layer].type){
56         case CONV:
57             conv_params = (ConvolutionalParams *)network->layers[layer].params;
58             if (conv_params->input_num != cur_channels){
59                 return DNN_ERROR;
60             }
61             cur_channels = conv_params->output_num;
62             break;
63         case DEPTH_TO_SPACE:
64             depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
65             if (cur_channels % (depth_to_space_params->block_size * depth_to_space_params->block_size) != 0){
66                 return DNN_ERROR;
67             }
68             cur_channels = cur_channels / (depth_to_space_params->block_size * depth_to_space_params->block_size);
69             cur_height *= depth_to_space_params->block_size;
70             cur_width *= depth_to_space_params->block_size;
71             break;
72         default:
73             return DNN_ERROR;
74         }
75         if (network->layers[layer].output){
76             av_freep(&network->layers[layer].output);
77         }
78         network->layers[layer].output = av_malloc(cur_height * cur_width * cur_channels * sizeof(float));
79         if (!network->layers[layer].output){
80             return DNN_ERROR;
81         }
82     }
83
84     return DNN_SUCCESS;
85 }
86
87 // Loads model and its parameters that are stored in a binary file with following structure:
88 // layers_num,layer_type,layer_parameterss,layer_type,layer_parameters...
89 // For CONV layer: activation_function, input_num, output_num, kernel_size, kernel, biases
90 // For DEPTH_TO_SPACE layer: block_size
91 DNNModel *ff_dnn_load_model_native(const char *model_filename)
92 {
93     DNNModel *model = NULL;
94     ConvolutionalNetwork *network = NULL;
95     AVIOContext *model_file_context;
96     int file_size, dnn_size, kernel_size, i;
97     int32_t layer;
98     DNNLayerType layer_type;
99     ConvolutionalParams *conv_params;
100     DepthToSpaceParams *depth_to_space_params;
101
102     model = av_malloc(sizeof(DNNModel));
103     if (!model){
104         return NULL;
105     }
106
107     if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
108         av_freep(&model);
109         return NULL;
110     }
111     file_size = avio_size(model_file_context);
112
113     network = av_malloc(sizeof(ConvolutionalNetwork));
114     if (!network){
115         avio_closep(&model_file_context);
116         av_freep(&model);
117         return NULL;
118     }
119     model->model = (void *)network;
120
121     network->layers_num = 1 + (int32_t)avio_rl32(model_file_context);
122     dnn_size = 4;
123
124     network->layers = av_malloc(network->layers_num * sizeof(Layer));
125     if (!network->layers){
126         av_freep(&network);
127         avio_closep(&model_file_context);
128         av_freep(&model);
129         return NULL;
130     }
131
132     for (layer = 0; layer < network->layers_num; ++layer){
133         network->layers[layer].output = NULL;
134         network->layers[layer].params = NULL;
135     }
136     network->layers[0].type = INPUT;
137     network->layers[0].params = av_malloc(sizeof(InputParams));
138     if (!network->layers[0].params){
139         avio_closep(&model_file_context);
140         ff_dnn_free_model_native(&model);
141         return NULL;
142     }
143
144     for (layer = 1; layer < network->layers_num; ++layer){
145         layer_type = (int32_t)avio_rl32(model_file_context);
146         dnn_size += 4;
147         switch (layer_type){
148         case CONV:
149             conv_params = av_malloc(sizeof(ConvolutionalParams));
150             if (!conv_params){
151                 avio_closep(&model_file_context);
152                 ff_dnn_free_model_native(&model);
153                 return NULL;
154             }
155             conv_params->activation = (int32_t)avio_rl32(model_file_context);
156             conv_params->input_num = (int32_t)avio_rl32(model_file_context);
157             conv_params->output_num = (int32_t)avio_rl32(model_file_context);
158             conv_params->kernel_size = (int32_t)avio_rl32(model_file_context);
159             kernel_size = conv_params->input_num * conv_params->output_num *
160                           conv_params->kernel_size * conv_params->kernel_size;
161             dnn_size += 16 + (kernel_size + conv_params->output_num << 2);
162             if (dnn_size > file_size || conv_params->input_num <= 0 ||
163                 conv_params->output_num <= 0 || conv_params->kernel_size <= 0){
164                 avio_closep(&model_file_context);
165                 ff_dnn_free_model_native(&model);
166                 return NULL;
167             }
168             conv_params->kernel = av_malloc(kernel_size * sizeof(float));
169             conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
170             if (!conv_params->kernel || !conv_params->biases){
171                 avio_closep(&model_file_context);
172                 ff_dnn_free_model_native(&model);
173                 return NULL;
174             }
175             for (i = 0; i < kernel_size; ++i){
176                 conv_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
177             }
178             for (i = 0; i < conv_params->output_num; ++i){
179                 conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
180             }
181             network->layers[layer].type = CONV;
182             network->layers[layer].params = conv_params;
183             break;
184         case DEPTH_TO_SPACE:
185             depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
186             if (!depth_to_space_params){
187                 avio_closep(&model_file_context);
188                 ff_dnn_free_model_native(&model);
189                 return NULL;
190             }
191             depth_to_space_params->block_size = (int32_t)avio_rl32(model_file_context);
192             dnn_size += 4;
193             network->layers[layer].type = DEPTH_TO_SPACE;
194             network->layers[layer].params = depth_to_space_params;
195             break;
196         default:
197             avio_closep(&model_file_context);
198             ff_dnn_free_model_native(&model);
199             return NULL;
200         }
201     }
202
203     avio_closep(&model_file_context);
204
205     if (dnn_size != file_size){
206         ff_dnn_free_model_native(&model);
207         return NULL;
208     }
209
210     model->set_input_output = &set_input_output_native;
211
212     return model;
213 }
214
215 #define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
216
217 static void convolve(const float *input, float *output, const ConvolutionalParams *conv_params, int width, int height)
218 {
219     int y, x, n_filter, ch, kernel_y, kernel_x;
220     int radius = conv_params->kernel_size >> 1;
221     int src_linesize = width * conv_params->input_num;
222     int filter_linesize = conv_params->kernel_size * conv_params->input_num;
223     int filter_size = conv_params->kernel_size * filter_linesize;
224
225     for (y = 0; y < height; ++y){
226         for (x = 0; x < width; ++x){
227             for (n_filter = 0; n_filter < conv_params->output_num; ++n_filter){
228                 output[n_filter] = conv_params->biases[n_filter];
229                 for (ch = 0; ch < conv_params->input_num; ++ch){
230                     for (kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y){
231                         for (kernel_x = 0; kernel_x < conv_params->kernel_size; ++kernel_x){
232                             output[n_filter] += input[CLAMP_TO_EDGE(y + kernel_y - radius, height) * src_linesize +
233                                                       CLAMP_TO_EDGE(x + kernel_x - radius, width) * conv_params->input_num + ch] *
234                                                 conv_params->kernel[n_filter * filter_size + kernel_y * filter_linesize +
235                                                                     kernel_x * conv_params->input_num + ch];
236                         }
237                     }
238                 }
239                 switch (conv_params->activation){
240                 case RELU:
241                     output[n_filter] = FFMAX(output[n_filter], 0.0);
242                     break;
243                 case TANH:
244                     output[n_filter] = 2.0f  / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
245                     break;
246                 case SIGMOID:
247                     output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
248                 }
249             }
250             output += conv_params->output_num;
251         }
252     }
253 }
254
255 static void depth_to_space(const float *input, float *output, int block_size, int width, int height, int channels)
256 {
257     int y, x, by, bx, ch;
258     int new_channels = channels / (block_size * block_size);
259     int output_linesize = width * channels;
260     int by_linesize = output_linesize / block_size;
261     int x_linesize = new_channels * block_size;
262
263     for (y = 0; y < height; ++y){
264         for (x = 0; x < width; ++x){
265             for (by = 0; by < block_size; ++by){
266                 for (bx = 0; bx < block_size; ++bx){
267                     for (ch = 0; ch < new_channels; ++ch){
268                         output[by * by_linesize + x * x_linesize + bx * new_channels + ch] = input[ch];
269                     }
270                     input += new_channels;
271                 }
272             }
273         }
274         output += output_linesize;
275     }
276 }
277
278 DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output)
279 {
280     ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model;
281     int cur_width, cur_height, cur_channels;
282     int32_t layer;
283     InputParams *input_params;
284     ConvolutionalParams *conv_params;
285     DepthToSpaceParams *depth_to_space_params;
286
287     if (network->layers_num <= 0 || network->layers[0].type != INPUT || !network->layers[0].output){
288         return DNN_ERROR;
289     }
290     else{
291         input_params = (InputParams *)network->layers[0].params;
292         cur_width = input_params->width;
293         cur_height = input_params->height;
294         cur_channels = input_params->channels;
295     }
296
297     for (layer = 1; layer < network->layers_num; ++layer){
298         if (!network->layers[layer].output){
299             return DNN_ERROR;
300         }
301         switch (network->layers[layer].type){
302         case CONV:
303             conv_params = (ConvolutionalParams *)network->layers[layer].params;
304             convolve(network->layers[layer - 1].output, network->layers[layer].output, conv_params, cur_width, cur_height);
305             cur_channels = conv_params->output_num;
306             break;
307         case DEPTH_TO_SPACE:
308             depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
309             depth_to_space(network->layers[layer - 1].output, network->layers[layer].output,
310                            depth_to_space_params->block_size, cur_width, cur_height, cur_channels);
311             cur_height *= depth_to_space_params->block_size;
312             cur_width *= depth_to_space_params->block_size;
313             cur_channels /= depth_to_space_params->block_size * depth_to_space_params->block_size;
314             break;
315         case INPUT:
316             return DNN_ERROR;
317         }
318     }
319
320     output->data = network->layers[network->layers_num - 1].output;
321     output->height = cur_height;
322     output->width = cur_width;
323     output->channels = cur_channels;
324
325     return DNN_SUCCESS;
326 }
327
328 void ff_dnn_free_model_native(DNNModel **model)
329 {
330     ConvolutionalNetwork *network;
331     ConvolutionalParams *conv_params;
332     int32_t layer;
333
334     if (*model)
335     {
336         network = (ConvolutionalNetwork *)(*model)->model;
337         for (layer = 0; layer < network->layers_num; ++layer){
338             av_freep(&network->layers[layer].output);
339             if (network->layers[layer].type == CONV){
340                 conv_params = (ConvolutionalParams *)network->layers[layer].params;
341                 av_freep(&conv_params->kernel);
342                 av_freep(&conv_params->biases);
343             }
344             av_freep(&network->layers[layer].params);
345         }
346         av_freep(&network->layers);
347         av_freep(&network);
348         av_freep(model);
349     }
350 }