FFmpeg  2.1.1
vf_w3fdif.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2012 British Broadcasting Corporation, All Rights Reserved
3  * Author of de-interlace algorithm: Jim Easterbrook for BBC R&D
4  * Based on the process described by Martin Weston for BBC R&D
5  * Author of FFmpeg filter: Mark Himsley for BBC Broadcast Systems Development
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "libavutil/common.h"
25 #include "libavutil/imgutils.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/pixdesc.h"
28 #include "avfilter.h"
29 #include "formats.h"
30 #include "internal.h"
31 #include "video.h"
32 
33 typedef struct W3FDIFContext {
34  const AVClass *class;
35  int filter; ///< 0 is simple, 1 is more complex
36  int deint; ///< which frames to deinterlace
37  int linesize[4]; ///< bytes of pixel data per line for each plane
38  int planeheight[4]; ///< height of each plane
39  int field; ///< which field are we on, 0 or 1
40  int eof;
41  int nb_planes;
42  AVFrame *prev, *cur, *next; ///< previous, current, next frames
43  int32_t *work_line; ///< line we are calculating
45 
46 #define OFFSET(x) offsetof(W3FDIFContext, x)
47 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
48 #define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit }
49 
50 static const AVOption w3fdif_options[] = {
51  { "filter", "specify the filter", OFFSET(filter), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "filter" },
52  CONST("simple", NULL, 0, "filter"),
53  CONST("complex", NULL, 1, "filter"),
54  { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "deint" },
55  CONST("all", "deinterlace all frames", 0, "deint"),
56  CONST("interlaced", "only deinterlace frames marked as interlaced", 1, "deint"),
57  { NULL }
58 };
59 
60 AVFILTER_DEFINE_CLASS(w3fdif);
61 
63 {
64  static const enum AVPixelFormat pix_fmts[] = {
75  };
76 
78 
79  return 0;
80 }
81 
82 static int config_input(AVFilterLink *inlink)
83 {
84  W3FDIFContext *s = inlink->dst->priv;
85  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
86  int ret;
87 
88  if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
89  return ret;
90 
91  s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
92  s->planeheight[0] = s->planeheight[3] = inlink->h;
93 
95  s->work_line = av_calloc(s->linesize[0], sizeof(*s->work_line));
96  if (!s->work_line)
97  return AVERROR(ENOMEM);
98 
99  return 0;
100 }
101 
102 static int config_output(AVFilterLink *outlink)
103 {
104  AVFilterLink *inlink = outlink->src->inputs[0];
105 
106  outlink->time_base.num = inlink->time_base.num;
107  outlink->time_base.den = inlink->time_base.den * 2;
108  outlink->frame_rate.num = inlink->frame_rate.num * 2;
109  outlink->frame_rate.den = inlink->frame_rate.den;
110  outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
111 
112  return 0;
113 }
114 
115 /*
116  * Filter coefficients from PH-2071, scaled by 256 * 256.
117  * Each set of coefficients has a set for low-frequencies and high-frequencies.
118  * n_coef_lf[] and n_coef_hf[] are the number of coefs for simple and more-complex.
119  * It is important for later that n_coef_lf[] is even and n_coef_hf[] is odd.
120  * coef_lf[][] and coef_hf[][] are the coefficients for low-frequencies
121  * and high-frequencies for simple and more-complex mode.
122  */
123 static const int8_t n_coef_lf[2] = { 2, 4 };
124 static const int32_t coef_lf[2][4] = {{ 32768, 32768, 0, 0},
125  { -1704, 34472, 34472, -1704}};
126 static const int8_t n_coef_hf[2] = { 3, 5 };
127 static const int32_t coef_hf[2][5] = {{ -4096, 8192, -4096, 0, 0},
128  { 2032, -7602, 11140, -7602, 2032}};
129 
131  const AVFrame *cur, const AVFrame *adj,
132  const int filter, const int plane)
133 {
134  W3FDIFContext *s = ctx->priv;
135  uint8_t *in_line, *in_lines_cur[5], *in_lines_adj[5];
136  uint8_t *out_line, *out_pixel;
137  int32_t *work_line, *work_pixel;
138  uint8_t *cur_data = cur->data[plane];
139  uint8_t *adj_data = adj->data[plane];
140  uint8_t *dst_data = out->data[plane];
141  const int linesize = s->linesize[plane];
142  const int height = s->planeheight[plane];
143  const int cur_line_stride = cur->linesize[plane];
144  const int adj_line_stride = adj->linesize[plane];
145  const int dst_line_stride = out->linesize[plane];
146  int i, j, y_in, y_out;
147 
148  /* copy unchanged the lines of the field */
149  y_out = s->field == cur->top_field_first;
150 
151  in_line = cur_data + (y_out * cur_line_stride);
152  out_line = dst_data + (y_out * dst_line_stride);
153 
154  while (y_out < height) {
155  memcpy(out_line, in_line, linesize);
156  y_out += 2;
157  in_line += cur_line_stride * 2;
158  out_line += dst_line_stride * 2;
159  }
160 
161  /* interpolate other lines of the field */
162  y_out = s->field != cur->top_field_first;
163 
164  out_line = dst_data + (y_out * dst_line_stride);
165 
166  while (y_out < height) {
167  /* clear workspace */
168  memset(s->work_line, 0, sizeof(*s->work_line) * linesize);
169 
170  /* get low vertical frequencies from current field */
171  for (j = 0; j < n_coef_lf[filter]; j++) {
172  y_in = (y_out + 1) + (j * 2) - n_coef_lf[filter];
173 
174  while (y_in < 0)
175  y_in += 2;
176  while (y_in >= height)
177  y_in -= 2;
178 
179  in_lines_cur[j] = cur_data + (y_in * cur_line_stride);
180  }
181 
182  work_line = s->work_line;
183  switch (n_coef_lf[filter]) {
184  case 2:
185  for (i = 0; i < linesize; i++) {
186  *work_line += *in_lines_cur[0]++ * coef_lf[filter][0];
187  *work_line++ += *in_lines_cur[1]++ * coef_lf[filter][1];
188  }
189  break;
190  case 4:
191  for (i = 0; i < linesize; i++) {
192  *work_line += *in_lines_cur[0]++ * coef_lf[filter][0];
193  *work_line += *in_lines_cur[1]++ * coef_lf[filter][1];
194  *work_line += *in_lines_cur[2]++ * coef_lf[filter][2];
195  *work_line++ += *in_lines_cur[3]++ * coef_lf[filter][3];
196  }
197  }
198 
199  /* get high vertical frequencies from adjacent fields */
200  for (j = 0; j < n_coef_hf[filter]; j++) {
201  y_in = (y_out + 1) + (j * 2) - n_coef_hf[filter];
202 
203  while (y_in < 0)
204  y_in += 2;
205  while (y_in >= height)
206  y_in -= 2;
207 
208  in_lines_cur[j] = cur_data + (y_in * cur_line_stride);
209  in_lines_adj[j] = adj_data + (y_in * adj_line_stride);
210  }
211 
212  work_line = s->work_line;
213  switch (n_coef_hf[filter]) {
214  case 3:
215  for (i = 0; i < linesize; i++) {
216  *work_line += *in_lines_cur[0]++ * coef_hf[filter][0];
217  *work_line += *in_lines_adj[0]++ * coef_hf[filter][0];
218  *work_line += *in_lines_cur[1]++ * coef_hf[filter][1];
219  *work_line += *in_lines_adj[1]++ * coef_hf[filter][1];
220  *work_line += *in_lines_cur[2]++ * coef_hf[filter][2];
221  *work_line++ += *in_lines_adj[2]++ * coef_hf[filter][2];
222  }
223  break;
224  case 5:
225  for (i = 0; i < linesize; i++) {
226  *work_line += *in_lines_cur[0]++ * coef_hf[filter][0];
227  *work_line += *in_lines_adj[0]++ * coef_hf[filter][0];
228  *work_line += *in_lines_cur[1]++ * coef_hf[filter][1];
229  *work_line += *in_lines_adj[1]++ * coef_hf[filter][1];
230  *work_line += *in_lines_cur[2]++ * coef_hf[filter][2];
231  *work_line += *in_lines_adj[2]++ * coef_hf[filter][2];
232  *work_line += *in_lines_cur[3]++ * coef_hf[filter][3];
233  *work_line += *in_lines_adj[3]++ * coef_hf[filter][3];
234  *work_line += *in_lines_cur[4]++ * coef_hf[filter][4];
235  *work_line++ += *in_lines_adj[4]++ * coef_hf[filter][4];
236  }
237  }
238 
239  /* save scaled result to the output frame, scaling down by 256 * 256 */
240  work_pixel = s->work_line;
241  out_pixel = out_line;
242 
243  for (j = 0; j < linesize; j++, out_pixel++, work_pixel++)
244  *out_pixel = av_clip(*work_pixel, 0, 255 * 256 * 256) >> 16;
245 
246  /* move on to next line */
247  y_out += 2;
248  out_line += dst_line_stride * 2;
249  }
250 }
251 
252 static int filter(AVFilterContext *ctx, int is_second)
253 {
254  W3FDIFContext *s = ctx->priv;
255  AVFilterLink *outlink = ctx->outputs[0];
256  AVFrame *out, *adj;
257  int plane;
258 
259  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
260  if (!out)
261  return AVERROR(ENOMEM);
262  av_frame_copy_props(out, s->cur);
263  out->interlaced_frame = 0;
264 
265  if (!is_second) {
266  if (out->pts != AV_NOPTS_VALUE)
267  out->pts *= 2;
268  } else {
269  int64_t cur_pts = s->cur->pts;
270  int64_t next_pts = s->next->pts;
271 
272  if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
273  out->pts = cur_pts + next_pts;
274  } else {
275  out->pts = AV_NOPTS_VALUE;
276  }
277  }
278 
279  adj = s->field ? s->next : s->prev;
280  for (plane = 0; plane < s->nb_planes; plane++)
281  deinterlace_plane(ctx, out, s->cur, adj, s->filter, plane);
282 
283  s->field = !s->field;
284 
285  return ff_filter_frame(outlink, out);
286 }
287 
288 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
289 {
290  AVFilterContext *ctx = inlink->dst;
291  W3FDIFContext *s = ctx->priv;
292  int ret;
293 
294  av_frame_free(&s->prev);
295  s->prev = s->cur;
296  s->cur = s->next;
297  s->next = frame;
298 
299  if (!s->cur) {
300  s->cur = av_frame_clone(s->next);
301  if (!s->cur)
302  return AVERROR(ENOMEM);
303  }
304 
305  if ((s->deint && !s->cur->interlaced_frame) || ctx->is_disabled) {
306  AVFrame *out = av_frame_clone(s->cur);
307  if (!out)
308  return AVERROR(ENOMEM);
309 
310  av_frame_free(&s->prev);
311  if (out->pts != AV_NOPTS_VALUE)
312  out->pts *= 2;
313  return ff_filter_frame(ctx->outputs[0], out);
314  }
315 
316  if (!s->prev)
317  return 0;
318 
319  ret = filter(ctx, 0);
320  if (ret < 0)
321  return ret;
322 
323  return filter(ctx, 1);
324 }
325 
326 static int request_frame(AVFilterLink *outlink)
327 {
328  AVFilterContext *ctx = outlink->src;
329  W3FDIFContext *s = ctx->priv;
330 
331  do {
332  int ret;
333 
334  if (s->eof)
335  return AVERROR_EOF;
336 
337  ret = ff_request_frame(ctx->inputs[0]);
338 
339  if (ret == AVERROR_EOF && s->cur) {
340  AVFrame *next = av_frame_clone(s->next);
341  if (!next)
342  return AVERROR(ENOMEM);
343  next->pts = s->next->pts * 2 - s->cur->pts;
344  filter_frame(ctx->inputs[0], next);
345  s->eof = 1;
346  } else if (ret < 0) {
347  return ret;
348  }
349  } while (!s->cur);
350 
351  return 0;
352 }
353 
354 static av_cold void uninit(AVFilterContext *ctx)
355 {
356  W3FDIFContext *s = ctx->priv;
357 
358  av_frame_free(&s->prev);
359  av_frame_free(&s->cur );
360  av_frame_free(&s->next);
361  av_freep(&s->work_line);
362 }
363 
364 static const AVFilterPad w3fdif_inputs[] = {
365  {
366  .name = "default",
367  .type = AVMEDIA_TYPE_VIDEO,
368  .filter_frame = filter_frame,
369  .config_props = config_input,
370  },
371  { NULL }
372 };
373 
374 static const AVFilterPad w3fdif_outputs[] = {
375  {
376  .name = "default",
377  .type = AVMEDIA_TYPE_VIDEO,
378  .config_props = config_output,
379  .request_frame = request_frame,
380  },
381  { NULL }
382 };
383 
385  .name = "w3fdif",
386  .description = NULL_IF_CONFIG_SMALL("Apply Martin Weston three field deinterlace."),
387  .priv_size = sizeof(W3FDIFContext),
388  .priv_class = &w3fdif_class,
389  .uninit = uninit,
391  .inputs = w3fdif_inputs,
392  .outputs = w3fdif_outputs,
394 };
AVFilter avfilter_vf_w3fdif
Definition: vf_w3fdif.c:384
const char * s
Definition: avisynth_c.h:668
void * av_calloc(size_t nmemb, size_t size) av_malloc_attrib
Allocate a block of nmemb * size bytes with alignment suitable for all memory accesses (including vec...
Definition: mem.c:249
static int filter(AVFilterContext *ctx, int is_second)
Definition: vf_w3fdif.c:252
AVFrame * prev
Definition: vf_w3fdif.c:42
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1979
This structure describes decoded (raw) audio or video data.
Definition: frame.h:96
AVOption.
Definition: opt.h:253
AVFrame * cur
Definition: vf_w3fdif.c:42
const char * name
Filter name.
Definition: avfilter.h:468
void * priv
private data for use by the filter
Definition: avfilter.h:648
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:111
planar YUV 4:2:2, 16bpp, (1 Cr &amp; Cb sample per 2x1 Y samples)
Definition: avcodec.h:4538
int num
numerator
Definition: rational.h:44
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:109
planar YUV 4:4:4 32bpp, (1 Cr &amp; Cb sample per 1x1 Y &amp; A samples)
Definition: avcodec.h:4693
Pixel format.
Definition: avcodec.h:4533
#define av_cold
Definition: avcodec.h:653
int is_disabled
the enabled state from the last expression evaluation
Definition: avfilter.h:680
Y , 8bpp.
Definition: avcodec.h:4542
void av_freep(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:234
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:294
const char * name
Pad name.
Definition: internal.h:66
static const AVOption w3fdif_options[]
Definition: vf_w3fdif.c:50
static int config_input(AVFilterLink *inlink)
Definition: vf_w3fdif.c:82
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1118
#define FLAGS
Definition: vf_w3fdif.c:47
int32_t * work_line
line we are calculating
Definition: vf_w3fdif.c:43
uint8_t
static const int32_t coef_lf[2][4]
Definition: vf_w3fdif.c:124
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only &quot;metadata&quot; fields from src to dst.
Definition: frame.c:446
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:182
#define FF_CEIL_RSHIFT(a, b)
Definition: avcodec.h:916
planar YUV 4:2:2 24bpp, (1 Cr &amp; Cb sample per 2x1 Y &amp; A samples)
Definition: avcodec.h:4694
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:293
planar YUV 4:2:0, 20bpp, (1 Cr &amp; Cb sample per 2x2 Y &amp; A samples)
Definition: avcodec.h:4571
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:531
static AVFrame * frame
Definition: demuxing.c:51
A filter pad used for either input or output.
Definition: internal.h:60
int linesize[4]
bytes of pixel data per line for each plane
Definition: vf_w3fdif.c:37
AVFrame * next
previous, current, next frames
Definition: vf_w3fdif.c:42
static int query_formats(AVFilterContext *ctx)
Definition: vf_w3fdif.c:62
Frame requests may need to loop in order to be fulfilled.
Definition: internal.h:347
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:77
planar GBRA 4:4:4:4 32bpp
Definition: avcodec.h:4712
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: avcodec.h:4548
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:151
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range ...
Definition: avcodec.h:4570
#define OFFSET(x)
Definition: vf_w3fdif.c:46
planar YUV 4:1:1, 12bpp, (1 Cr &amp; Cb sample per 4x1 Y samples)
Definition: avcodec.h:4541
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:123
int planeheight[4]
height of each plane
Definition: vf_w3fdif.c:38
planar YUV 4:1:1, 12bpp, (1 Cr &amp; Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: avcodec.h:4715
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
int field
which field are we on, 0 or 1
Definition: vf_w3fdif.c:39
ret
Definition: avfilter.c:961
int32_t
static const int8_t n_coef_hf[2]
Definition: vf_w3fdif.c:126
Main libavfilter public API header.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1938
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: avcodec.h:4546
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:642
planar YUV 4:2:0, 12bpp, (1 Cr &amp; Cb sample per 2x2 Y samples)
Definition: avcodec.h:4534
planar YUV 4:4:0 (1 Cr &amp; Cb sample per 1x2 Y samples)
Definition: avcodec.h:4569
static int config_output(AVFilterLink *outlink)
Definition: vf_w3fdif.c:102
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:57
planar GBR 4:4:4 24bpp
Definition: avcodec.h:4640
static const int8_t n_coef_lf[2]
Definition: vf_w3fdif.c:123
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
Describe the class of an AVClass context structure.
Definition: log.h:50
Filter definition.
Definition: avfilter.h:464
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:86
static const AVFilterPad inputs[]
Definition: af_ashowinfo.c:102
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:635
planar YUV 4:4:4, 24bpp, (1 Cr &amp; Cb sample per 1x1 Y samples)
Definition: avcodec.h:4539
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:124
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:453
static const int32_t coef_hf[2][5]
Definition: vf_w3fdif.c:127
static int flags
Definition: cpu.c:45
#define AVERROR_EOF
#define CONST(name, help, val, unit)
Definition: vf_w3fdif.c:48
static const AVFilterPad w3fdif_inputs[]
Definition: vf_w3fdif.c:364
AVFrame * av_frame_clone(AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:338
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_w3fdif.c:288
int deint
which frames to deinterlace
Definition: vf_w3fdif.c:36
int den
denominator
Definition: rational.h:45
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:298
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);ff_audio_convert_init_arm(ac);ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:301
static const AVFilterPad w3fdif_outputs[]
Definition: vf_w3fdif.c:374
#define AVERROR(e)
An instance of a filter.
Definition: avfilter.h:627
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_w3fdif.c:354
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:335
static int request_frame(AVFilterLink *outlink)
Definition: vf_w3fdif.c:326
internal API functions
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: avcodec.h:4547
int filter
0 is simple, 1 is more complex
Definition: vf_w3fdif.c:35
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:107
planar YUV 4:1:0, 9bpp, (1 Cr &amp; Cb sample per 4x4 Y samples)
Definition: avcodec.h:4540
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avcodec.h:2278
static void deinterlace_plane(AVFilterContext *ctx, AVFrame *out, const AVFrame *cur, const AVFrame *adj, const int filter, const int plane)
Definition: vf_w3fdif.c:130