FFmpeg  2.1.1
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
35 #include "avcodec.h"
36 #include "dct.h"
37 #include "dsputil.h"
38 #include "mpeg12.h"
39 #include "mpegvideo.h"
40 #include "h261.h"
41 #include "h263.h"
42 #include "mathops.h"
43 #include "mjpegenc.h"
44 #include "msmpeg4.h"
45 #include "faandct.h"
46 #include "thread.h"
47 #include "aandcttab.h"
48 #include "flv.h"
49 #include "mpeg4video.h"
50 #include "internal.h"
51 #include "bytestream.h"
52 #include <limits.h>
53 #include "sp5x.h"
54 
56 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
57 static int sse_mb(MpegEncContext *s);
58 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
59 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
60 
63 
66  { NULL },
67 };
68 
69 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
70  uint16_t (*qmat16)[2][64],
71  const uint16_t *quant_matrix,
72  int bias, int qmin, int qmax, int intra)
73 {
74  int qscale;
75  int shift = 0;
76 
77  for (qscale = qmin; qscale <= qmax; qscale++) {
78  int i;
79  if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
80  dsp->fdct == ff_jpeg_fdct_islow_10 ||
81  dsp->fdct == ff_faandct) {
82  for (i = 0; i < 64; i++) {
83  const int j = dsp->idct_permutation[i];
84  /* 16 <= qscale * quant_matrix[i] <= 7905
85  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
86  * 19952 <= x <= 249205026
87  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
88  * 3444240 >= (1 << 36) / (x) >= 275 */
89 
90  qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
91  (qscale * quant_matrix[j]));
92  }
93  } else if (dsp->fdct == ff_fdct_ifast) {
94  for (i = 0; i < 64; i++) {
95  const int j = dsp->idct_permutation[i];
96  /* 16 <= qscale * quant_matrix[i] <= 7905
97  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
98  * 19952 <= x <= 249205026
99  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
100  * 3444240 >= (1 << 36) / (x) >= 275 */
101 
102  qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
103  (ff_aanscales[i] * (int64_t)qscale * quant_matrix[j]));
104  }
105  } else {
106  for (i = 0; i < 64; i++) {
107  const int j = dsp->idct_permutation[i];
108  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
109  * Assume x = qscale * quant_matrix[i]
110  * So 16 <= x <= 7905
111  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
112  * so 32768 >= (1 << 19) / (x) >= 67 */
113  qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
114  (qscale * quant_matrix[j]));
115  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
116  // (qscale * quant_matrix[i]);
117  qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
118  (qscale * quant_matrix[j]);
119 
120  if (qmat16[qscale][0][i] == 0 ||
121  qmat16[qscale][0][i] == 128 * 256)
122  qmat16[qscale][0][i] = 128 * 256 - 1;
123  qmat16[qscale][1][i] =
124  ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
125  qmat16[qscale][0][i]);
126  }
127  }
128 
129  for (i = intra; i < 64; i++) {
130  int64_t max = 8191;
131  if (dsp->fdct == ff_fdct_ifast) {
132  max = (8191LL * ff_aanscales[i]) >> 14;
133  }
134  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
135  shift++;
136  }
137  }
138  }
139  if (shift) {
140  av_log(NULL, AV_LOG_INFO,
141  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
142  QMAT_SHIFT - shift);
143  }
144 }
145 
146 static inline void update_qscale(MpegEncContext *s)
147 {
148  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
149  (FF_LAMBDA_SHIFT + 7);
150  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
151 
152  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
154 }
155 
156 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
157 {
158  int i;
159 
160  if (matrix) {
161  put_bits(pb, 1, 1);
162  for (i = 0; i < 64; i++) {
163  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
164  }
165  } else
166  put_bits(pb, 1, 0);
167 }
168 
169 /**
170  * init s->current_picture.qscale_table from s->lambda_table
171  */
173 {
174  int8_t * const qscale_table = s->current_picture.qscale_table;
175  int i;
176 
177  for (i = 0; i < s->mb_num; i++) {
178  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
179  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
180  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
181  s->avctx->qmax);
182  }
183 }
184 
187 {
188 #define COPY(a) dst->a= src->a
189  COPY(pict_type);
191  COPY(f_code);
192  COPY(b_code);
193  COPY(qscale);
194  COPY(lambda);
195  COPY(lambda2);
198  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
199  COPY(progressive_frame); // FIXME don't set in encode_header
200  COPY(partitioned_frame); // FIXME don't set in encode_header
201 #undef COPY
202 }
203 
204 /**
205  * Set the given MpegEncContext to defaults for encoding.
206  * the changed fields will not depend upon the prior state of the MpegEncContext.
207  */
209 {
210  int i;
212 
213  for (i = -16; i < 16; i++) {
214  default_fcode_tab[i + MAX_MV] = 1;
215  }
218 }
219 
221  if (ARCH_X86)
223 
224  if (!s->dct_quantize)
226  if (!s->denoise_dct)
229  if (s->avctx->trellis)
231 
232  return 0;
233 }
234 
235 /* init video encoder */
237 {
238  MpegEncContext *s = avctx->priv_data;
239  int i;
240  int chroma_h_shift, chroma_v_shift;
241 
243 
244  switch (avctx->codec_id) {
246  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
247  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
248  av_log(avctx, AV_LOG_ERROR,
249  "only YUV420 and YUV422 are supported\n");
250  return -1;
251  }
252  break;
253  case AV_CODEC_ID_LJPEG:
254  if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
255  avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
256  avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
257  avctx->pix_fmt != AV_PIX_FMT_BGR0 &&
258  avctx->pix_fmt != AV_PIX_FMT_BGRA &&
259  avctx->pix_fmt != AV_PIX_FMT_BGR24 &&
260  ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
261  avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
262  avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
264  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
265  return -1;
266  }
267  break;
268  case AV_CODEC_ID_MJPEG:
269  case AV_CODEC_ID_AMV:
270  if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
271  avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
272  avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
273  ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
274  avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
275  avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
277  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
278  return -1;
279  }
280  break;
281  default:
282  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
283  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
284  return -1;
285  }
286  }
287 
288  switch (avctx->pix_fmt) {
289  case AV_PIX_FMT_YUVJ444P:
290  case AV_PIX_FMT_YUV444P:
292  break;
293  case AV_PIX_FMT_YUVJ422P:
294  case AV_PIX_FMT_YUV422P:
296  break;
297  case AV_PIX_FMT_YUVJ420P:
298  case AV_PIX_FMT_YUV420P:
299  default:
301  break;
302  }
303 
304  s->bit_rate = avctx->bit_rate;
305  s->width = avctx->width;
306  s->height = avctx->height;
307  if (avctx->gop_size > 600 &&
309  av_log(avctx, AV_LOG_WARNING,
310  "keyframe interval too large!, reducing it from %d to %d\n",
311  avctx->gop_size, 600);
312  avctx->gop_size = 600;
313  }
314  s->gop_size = avctx->gop_size;
315  s->avctx = avctx;
316  s->flags = avctx->flags;
317  s->flags2 = avctx->flags2;
318  s->max_b_frames = avctx->max_b_frames;
319  s->codec_id = avctx->codec->id;
321  s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
322  s->mpeg_quant = avctx->mpeg_quant;
323  s->rtp_mode = !!avctx->rtp_payload_size;
326 
327  if (s->gop_size <= 1) {
328  s->intra_only = 1;
329  s->gop_size = 12;
330  } else {
331  s->intra_only = 0;
332  }
333 
334  s->me_method = avctx->me_method;
335 
336  /* Fixed QSCALE */
337  s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
338 
339  s->adaptive_quant = (s->avctx->lumi_masking ||
340  s->avctx->dark_masking ||
343  s->avctx->p_masking ||
344  s->avctx->border_masking ||
345  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
346  !s->fixed_qscale;
347 
349 
350  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
351  switch(avctx->codec_id) {
354  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
355  break;
356  case AV_CODEC_ID_MPEG4:
360  if (avctx->rc_max_rate >= 15000000) {
361  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000);
362  } else if(avctx->rc_max_rate >= 2000000) {
363  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000L) * (320- 80) / (15000000 - 2000000);
364  } else if(avctx->rc_max_rate >= 384000) {
365  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000L) * ( 80- 40) / ( 2000000 - 384000);
366  } else
367  avctx->rc_buffer_size = 40;
368  avctx->rc_buffer_size *= 16384;
369  break;
370  }
371  if (avctx->rc_buffer_size) {
372  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
373  }
374  }
375 
376  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
377  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
378  if (avctx->rc_max_rate && !avctx->rc_buffer_size)
379  return -1;
380  }
381 
382  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
383  av_log(avctx, AV_LOG_INFO,
384  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
385  }
386 
387  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
388  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
389  return -1;
390  }
391 
392  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
393  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
394  return -1;
395  }
396 
397  if (avctx->rc_max_rate &&
398  avctx->rc_max_rate == avctx->bit_rate &&
399  avctx->rc_max_rate != avctx->rc_min_rate) {
400  av_log(avctx, AV_LOG_INFO,
401  "impossible bitrate constraints, this will fail\n");
402  }
403 
404  if (avctx->rc_buffer_size &&
405  avctx->bit_rate * (int64_t)avctx->time_base.num >
406  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
407  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
408  return -1;
409  }
410 
411  if (!s->fixed_qscale &&
412  avctx->bit_rate * av_q2d(avctx->time_base) >
413  avctx->bit_rate_tolerance) {
414  av_log(avctx, AV_LOG_ERROR,
415  "bitrate tolerance too small for bitrate\n");
416  return -1;
417  }
418 
419  if (s->avctx->rc_max_rate &&
420  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
423  90000LL * (avctx->rc_buffer_size - 1) >
424  s->avctx->rc_max_rate * 0xFFFFLL) {
425  av_log(avctx, AV_LOG_INFO,
426  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
427  "specified vbv buffer is too large for the given bitrate!\n");
428  }
429 
430  if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
432  s->codec_id != AV_CODEC_ID_FLV1) {
433  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
434  return -1;
435  }
436 
437  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
438  av_log(avctx, AV_LOG_ERROR,
439  "OBMC is only supported with simple mb decision\n");
440  return -1;
441  }
442 
443  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
444  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
445  return -1;
446  }
447 
448  if (s->max_b_frames &&
449  s->codec_id != AV_CODEC_ID_MPEG4 &&
452  av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
453  return -1;
454  }
455 
456  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
457  s->codec_id == AV_CODEC_ID_H263 ||
458  s->codec_id == AV_CODEC_ID_H263P) &&
459  (avctx->sample_aspect_ratio.num > 255 ||
460  avctx->sample_aspect_ratio.den > 255)) {
461  av_log(avctx, AV_LOG_WARNING,
462  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
465  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
466  }
467 
468  if ((s->codec_id == AV_CODEC_ID_H263 ||
469  s->codec_id == AV_CODEC_ID_H263P) &&
470  (avctx->width > 2048 ||
471  avctx->height > 1152 )) {
472  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
473  return -1;
474  }
475  if ((s->codec_id == AV_CODEC_ID_H263 ||
476  s->codec_id == AV_CODEC_ID_H263P) &&
477  ((avctx->width &3) ||
478  (avctx->height&3) )) {
479  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
480  return -1;
481  }
482 
483  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
484  (avctx->width > 4095 ||
485  avctx->height > 4095 )) {
486  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
487  return -1;
488  }
489 
490  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
491  (avctx->width > 16383 ||
492  avctx->height > 16383 )) {
493  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
494  return -1;
495  }
496 
497  if (s->codec_id == AV_CODEC_ID_RV10 &&
498  (avctx->width &15 ||
499  avctx->height&15 )) {
500  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
501  return AVERROR(EINVAL);
502  }
503 
504  if (s->codec_id == AV_CODEC_ID_RV20 &&
505  (avctx->width &3 ||
506  avctx->height&3 )) {
507  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
508  return AVERROR(EINVAL);
509  }
510 
511  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
512  s->codec_id == AV_CODEC_ID_WMV2) &&
513  avctx->width & 1) {
514  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
515  return -1;
516  }
517 
520  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
521  return -1;
522  }
523 
524  // FIXME mpeg2 uses that too
525  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
526  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
527  av_log(avctx, AV_LOG_ERROR,
528  "mpeg2 style quantization not supported by codec\n");
529  return -1;
530  }
531 
532  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
533  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
534  return -1;
535  }
536 
537  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
539  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
540  return -1;
541  }
542 
543  if (s->avctx->scenechange_threshold < 1000000000 &&
544  (s->flags & CODEC_FLAG_CLOSED_GOP)) {
545  av_log(avctx, AV_LOG_ERROR,
546  "closed gop with scene change detection are not supported yet, "
547  "set threshold to 1000000000\n");
548  return -1;
549  }
550 
551  if (s->flags & CODEC_FLAG_LOW_DELAY) {
552  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
553  av_log(avctx, AV_LOG_ERROR,
554  "low delay forcing is only available for mpeg2\n");
555  return -1;
556  }
557  if (s->max_b_frames != 0) {
558  av_log(avctx, AV_LOG_ERROR,
559  "b frames cannot be used with low delay\n");
560  return -1;
561  }
562  }
563 
564  if (s->q_scale_type == 1) {
565  if (avctx->qmax > 12) {
566  av_log(avctx, AV_LOG_ERROR,
567  "non linear quant only supports qmax <= 12 currently\n");
568  return -1;
569  }
570  }
571 
572  if (s->avctx->thread_count > 1 &&
573  s->codec_id != AV_CODEC_ID_MPEG4 &&
576  s->codec_id != AV_CODEC_ID_MJPEG &&
577  (s->codec_id != AV_CODEC_ID_H263P)) {
578  av_log(avctx, AV_LOG_ERROR,
579  "multi threaded encoding not supported by codec\n");
580  return -1;
581  }
582 
583  if (s->avctx->thread_count < 1) {
584  av_log(avctx, AV_LOG_ERROR,
585  "automatic thread number detection not supported by codec, "
586  "patch welcome\n");
587  return -1;
588  }
589 
590  if (s->avctx->slices > 1 || s->avctx->thread_count > 1)
591  s->rtp_mode = 1;
592 
593  if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
594  s->h263_slice_structured = 1;
595 
596  if (!avctx->time_base.den || !avctx->time_base.num) {
597  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
598  return -1;
599  }
600 
601  i = (INT_MAX / 2 + 128) >> 8;
602  if (avctx->mb_threshold >= i) {
603  av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
604  i - 1);
605  return -1;
606  }
607 
608  if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
609  av_log(avctx, AV_LOG_INFO,
610  "notice: b_frame_strategy only affects the first pass\n");
611  avctx->b_frame_strategy = 0;
612  }
613 
614  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
615  if (i > 1) {
616  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
617  avctx->time_base.den /= i;
618  avctx->time_base.num /= i;
619  //return -1;
620  }
621 
623  // (a + x * 3 / 8) / x
624  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
625  s->inter_quant_bias = 0;
626  } else {
627  s->intra_quant_bias = 0;
628  // (a - x / 4) / x
629  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
630  }
631 
632  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
633  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
634  return AVERROR(EINVAL);
635  }
636 
641 
642  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
643 
644  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
645 
646  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
647  s->avctx->time_base.den > (1 << 16) - 1) {
648  av_log(avctx, AV_LOG_ERROR,
649  "timebase %d/%d not supported by MPEG 4 standard, "
650  "the maximum admitted value for the timebase denominator "
651  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
652  (1 << 16) - 1);
653  return -1;
654  }
655  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
656 
657  switch (avctx->codec->id) {
659  s->out_format = FMT_MPEG1;
660  s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
661  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
662  break;
664  s->out_format = FMT_MPEG1;
665  s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
666  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
667  s->rtp_mode = 1;
668  break;
669  case AV_CODEC_ID_LJPEG:
670  case AV_CODEC_ID_MJPEG:
671  case AV_CODEC_ID_AMV:
672  s->out_format = FMT_MJPEG;
673  s->intra_only = 1; /* force intra only for jpeg */
674  if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
675  (avctx->pix_fmt == AV_PIX_FMT_BGR0
676  || s->avctx->pix_fmt == AV_PIX_FMT_BGRA
677  || s->avctx->pix_fmt == AV_PIX_FMT_BGR24)) {
678  s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
679  s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
680  s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
681  } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P || avctx->pix_fmt == AV_PIX_FMT_YUVJ444P) {
682  s->mjpeg_vsample[0] = s->mjpeg_vsample[1] = s->mjpeg_vsample[2] = 2;
683  s->mjpeg_hsample[0] = s->mjpeg_hsample[1] = s->mjpeg_hsample[2] = 1;
684  } else {
685  s->mjpeg_vsample[0] = 2;
686  s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
687  s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
688  s->mjpeg_hsample[0] = 2;
689  s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
690  s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
691  }
693  ff_mjpeg_encode_init(s) < 0)
694  return -1;
695  avctx->delay = 0;
696  s->low_delay = 1;
697  break;
698  case AV_CODEC_ID_H261:
699  if (!CONFIG_H261_ENCODER)
700  return -1;
701  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
702  av_log(avctx, AV_LOG_ERROR,
703  "The specified picture size of %dx%d is not valid for the "
704  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
705  s->width, s->height);
706  return -1;
707  }
708  s->out_format = FMT_H261;
709  avctx->delay = 0;
710  s->low_delay = 1;
711  break;
712  case AV_CODEC_ID_H263:
713  if (!CONFIG_H263_ENCODER)
714  return -1;
716  s->width, s->height) == 8) {
717  av_log(avctx, AV_LOG_ERROR,
718  "The specified picture size of %dx%d is not valid for "
719  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
720  "352x288, 704x576, and 1408x1152. "
721  "Try H.263+.\n", s->width, s->height);
722  return -1;
723  }
724  s->out_format = FMT_H263;
725  avctx->delay = 0;
726  s->low_delay = 1;
727  break;
728  case AV_CODEC_ID_H263P:
729  s->out_format = FMT_H263;
730  s->h263_plus = 1;
731  /* Fx */
732  s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
733  s->modified_quant = s->h263_aic;
734  s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
735  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
736 
737  /* /Fx */
738  /* These are just to be sure */
739  avctx->delay = 0;
740  s->low_delay = 1;
741  break;
742  case AV_CODEC_ID_FLV1:
743  s->out_format = FMT_H263;
744  s->h263_flv = 2; /* format = 1; 11-bit codes */
745  s->unrestricted_mv = 1;
746  s->rtp_mode = 0; /* don't allow GOB */
747  avctx->delay = 0;
748  s->low_delay = 1;
749  break;
750  case AV_CODEC_ID_RV10:
751  s->out_format = FMT_H263;
752  avctx->delay = 0;
753  s->low_delay = 1;
754  break;
755  case AV_CODEC_ID_RV20:
756  s->out_format = FMT_H263;
757  avctx->delay = 0;
758  s->low_delay = 1;
759  s->modified_quant = 1;
760  s->h263_aic = 1;
761  s->h263_plus = 1;
762  s->loop_filter = 1;
763  s->unrestricted_mv = 0;
764  break;
765  case AV_CODEC_ID_MPEG4:
766  s->out_format = FMT_H263;
767  s->h263_pred = 1;
768  s->unrestricted_mv = 1;
769  s->low_delay = s->max_b_frames ? 0 : 1;
770  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
771  break;
773  s->out_format = FMT_H263;
774  s->h263_pred = 1;
775  s->unrestricted_mv = 1;
776  s->msmpeg4_version = 2;
777  avctx->delay = 0;
778  s->low_delay = 1;
779  break;
781  s->out_format = FMT_H263;
782  s->h263_pred = 1;
783  s->unrestricted_mv = 1;
784  s->msmpeg4_version = 3;
785  s->flipflop_rounding = 1;
786  avctx->delay = 0;
787  s->low_delay = 1;
788  break;
789  case AV_CODEC_ID_WMV1:
790  s->out_format = FMT_H263;
791  s->h263_pred = 1;
792  s->unrestricted_mv = 1;
793  s->msmpeg4_version = 4;
794  s->flipflop_rounding = 1;
795  avctx->delay = 0;
796  s->low_delay = 1;
797  break;
798  case AV_CODEC_ID_WMV2:
799  s->out_format = FMT_H263;
800  s->h263_pred = 1;
801  s->unrestricted_mv = 1;
802  s->msmpeg4_version = 5;
803  s->flipflop_rounding = 1;
804  avctx->delay = 0;
805  s->low_delay = 1;
806  break;
807  default:
808  return -1;
809  }
810 
811  avctx->has_b_frames = !s->low_delay;
812 
813  s->encoding = 1;
814 
815  s->progressive_frame =
818  s->alternate_scan);
819 
820  /* init */
821  if (ff_MPV_common_init(s) < 0)
822  return -1;
823 
825 
828 
829  s->quant_precision = 5;
830 
831  ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
833 
841  && s->out_format == FMT_MPEG1)
843 
844  /* init q matrix */
845  for (i = 0; i < 64; i++) {
846  int j = s->dsp.idct_permutation[i];
848  s->mpeg_quant) {
851  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
852  s->intra_matrix[j] =
854  } else {
855  /* mpeg1/2 */
858  }
859  if (s->avctx->intra_matrix)
860  s->intra_matrix[j] = s->avctx->intra_matrix[i];
861  if (s->avctx->inter_matrix)
862  s->inter_matrix[j] = s->avctx->inter_matrix[i];
863  }
864 
865  /* precompute matrix */
866  /* for mjpeg, we do include qscale in the matrix */
867  if (s->out_format != FMT_MJPEG) {
869  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
870  31, 1);
872  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
873  31, 0);
874  }
875 
876  if (ff_rate_control_init(s) < 0)
877  return -1;
878 
879  return 0;
880 }
881 
883 {
884  MpegEncContext *s = avctx->priv_data;
885 
887 
890  s->out_format == FMT_MJPEG)
892 
893  av_freep(&avctx->extradata);
894 
895  return 0;
896 }
897 
898 static int get_sae(uint8_t *src, int ref, int stride)
899 {
900  int x,y;
901  int acc = 0;
902 
903  for (y = 0; y < 16; y++) {
904  for (x = 0; x < 16; x++) {
905  acc += FFABS(src[x + y * stride] - ref);
906  }
907  }
908 
909  return acc;
910 }
911 
913  uint8_t *ref, int stride)
914 {
915  int x, y, w, h;
916  int acc = 0;
917 
918  w = s->width & ~15;
919  h = s->height & ~15;
920 
921  for (y = 0; y < h; y += 16) {
922  for (x = 0; x < w; x += 16) {
923  int offset = x + y * stride;
924  int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
925  16);
926  int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
927  int sae = get_sae(src + offset, mean, stride);
928 
929  acc += sae + 500 < sad;
930  }
931  }
932  return acc;
933 }
934 
935 
936 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
937 {
938  Picture *pic = NULL;
939  int64_t pts;
940  int i, display_picture_number = 0, ret;
941  const int encoding_delay = s->max_b_frames ? s->max_b_frames :
942  (s->low_delay ? 0 : 1);
943  int direct = 1;
944 
945  if (pic_arg) {
946  pts = pic_arg->pts;
947  display_picture_number = s->input_picture_number++;
948 
949  if (pts != AV_NOPTS_VALUE) {
951  int64_t last = s->user_specified_pts;
952 
953  if (pts <= last) {
955  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
956  pts, last);
957  return AVERROR(EINVAL);
958  }
959 
960  if (!s->low_delay && display_picture_number == 1)
961  s->dts_delta = pts - last;
962  }
963  s->user_specified_pts = pts;
964  } else {
966  s->user_specified_pts =
967  pts = s->user_specified_pts + 1;
969  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
970  pts);
971  } else {
972  pts = display_picture_number;
973  }
974  }
975  }
976 
977  if (pic_arg) {
978  if (!pic_arg->buf[0])
979  direct = 0;
980  if (pic_arg->linesize[0] != s->linesize)
981  direct = 0;
982  if (pic_arg->linesize[1] != s->uvlinesize)
983  direct = 0;
984  if (pic_arg->linesize[2] != s->uvlinesize)
985  direct = 0;
986 
987  av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
988  pic_arg->linesize[1], s->linesize, s->uvlinesize);
989 
990  if (direct) {
991  i = ff_find_unused_picture(s, 1);
992  if (i < 0)
993  return i;
994 
995  pic = &s->picture[i];
996  pic->reference = 3;
997 
998  if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
999  return ret;
1000  if (ff_alloc_picture(s, pic, 1) < 0) {
1001  return -1;
1002  }
1003  } else {
1004  i = ff_find_unused_picture(s, 0);
1005  if (i < 0)
1006  return i;
1007 
1008  pic = &s->picture[i];
1009  pic->reference = 3;
1010 
1011  if (ff_alloc_picture(s, pic, 0) < 0) {
1012  return -1;
1013  }
1014 
1015  if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1016  pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1017  pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1018  // empty
1019  } else {
1020  int h_chroma_shift, v_chroma_shift;
1022  &h_chroma_shift,
1023  &v_chroma_shift);
1024 
1025  for (i = 0; i < 3; i++) {
1026  int src_stride = pic_arg->linesize[i];
1027  int dst_stride = i ? s->uvlinesize : s->linesize;
1028  int h_shift = i ? h_chroma_shift : 0;
1029  int v_shift = i ? v_chroma_shift : 0;
1030  int w = s->width >> h_shift;
1031  int h = s->height >> v_shift;
1032  uint8_t *src = pic_arg->data[i];
1033  uint8_t *dst = pic->f.data[i];
1034 
1035  if (s->codec_id == AV_CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)) {
1036  h = ((s->height + 15)/16*16) >> v_shift;
1037  }
1038 
1039  if (!s->avctx->rc_buffer_size)
1040  dst += INPLACE_OFFSET;
1041 
1042  if (src_stride == dst_stride)
1043  memcpy(dst, src, src_stride * h);
1044  else {
1045  int h2 = h;
1046  uint8_t *dst2 = dst;
1047  while (h2--) {
1048  memcpy(dst2, src, w);
1049  dst2 += dst_stride;
1050  src += src_stride;
1051  }
1052  }
1053  if ((s->width & 15) || (s->height & 15)) {
1054  s->dsp.draw_edges(dst, dst_stride,
1055  w, h,
1056  16>>h_shift,
1057  16>>v_shift,
1058  EDGE_BOTTOM);
1059  }
1060  }
1061  }
1062  }
1063  ret = av_frame_copy_props(&pic->f, pic_arg);
1064  if (ret < 0)
1065  return ret;
1066 
1067  pic->f.display_picture_number = display_picture_number;
1068  pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
1069  }
1070 
1071  /* shift buffer entries */
1072  for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1073  s->input_picture[i - 1] = s->input_picture[i];
1074 
1075  s->input_picture[encoding_delay] = (Picture*) pic;
1076 
1077  return 0;
1078 }
1079 
1081 {
1082  int x, y, plane;
1083  int score = 0;
1084  int64_t score64 = 0;
1085 
1086  for (plane = 0; plane < 3; plane++) {
1087  const int stride = p->f.linesize[plane];
1088  const int bw = plane ? 1 : 2;
1089  for (y = 0; y < s->mb_height * bw; y++) {
1090  for (x = 0; x < s->mb_width * bw; x++) {
1091  int off = p->shared ? 0 : 16;
1092  uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1093  uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1094  int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1095 
1096  switch (s->avctx->frame_skip_exp) {
1097  case 0: score = FFMAX(score, v); break;
1098  case 1: score += FFABS(v); break;
1099  case 2: score += v * v; break;
1100  case 3: score64 += FFABS(v * v * (int64_t)v); break;
1101  case 4: score64 += v * v * (int64_t)(v * v); break;
1102  }
1103  }
1104  }
1105  }
1106 
1107  if (score)
1108  score64 = score;
1109 
1110  if (score64 < s->avctx->frame_skip_threshold)
1111  return 1;
1112  if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1113  return 1;
1114  return 0;
1115 }
1116 
1118 {
1119  AVPacket pkt = { 0 };
1120  int ret, got_output;
1121 
1122  av_init_packet(&pkt);
1123  ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1124  if (ret < 0)
1125  return ret;
1126 
1127  ret = pkt.size;
1128  av_free_packet(&pkt);
1129  return ret;
1130 }
1131 
1133 {
1136  AVFrame input[FF_MAX_B_FRAMES + 2];
1137  const int scale = s->avctx->brd_scale;
1138  int i, j, out_size, p_lambda, b_lambda, lambda2;
1139  int64_t best_rd = INT64_MAX;
1140  int best_b_count = -1;
1141 
1142  av_assert0(scale >= 0 && scale <= 3);
1143 
1144  //emms_c();
1145  //s->next_picture_ptr->quality;
1146  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1147  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1148  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1149  if (!b_lambda) // FIXME we should do this somewhere else
1150  b_lambda = p_lambda;
1151  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1153 
1154  c->width = s->width >> scale;
1155  c->height = s->height >> scale;
1157  CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1158  c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1159  c->mb_decision = s->avctx->mb_decision;
1160  c->me_cmp = s->avctx->me_cmp;
1161  c->mb_cmp = s->avctx->mb_cmp;
1162  c->me_sub_cmp = s->avctx->me_sub_cmp;
1164  c->time_base = s->avctx->time_base;
1165  c->max_b_frames = s->max_b_frames;
1166 
1167  if (avcodec_open2(c, codec, NULL) < 0)
1168  return -1;
1169 
1170  for (i = 0; i < s->max_b_frames + 2; i++) {
1171  int ysize = c->width * c->height;
1172  int csize = (c->width / 2) * (c->height / 2);
1173  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1174  s->next_picture_ptr;
1175 
1176  avcodec_get_frame_defaults(&input[i]);
1177  input[i].data[0] = av_malloc(ysize + 2 * csize);
1178  input[i].data[1] = input[i].data[0] + ysize;
1179  input[i].data[2] = input[i].data[1] + csize;
1180  input[i].linesize[0] = c->width;
1181  input[i].linesize[1] =
1182  input[i].linesize[2] = c->width / 2;
1183 
1184  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1185  pre_input = *pre_input_ptr;
1186 
1187  if (!pre_input.shared && i) {
1188  pre_input.f.data[0] += INPLACE_OFFSET;
1189  pre_input.f.data[1] += INPLACE_OFFSET;
1190  pre_input.f.data[2] += INPLACE_OFFSET;
1191  }
1192 
1193  s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1194  pre_input.f.data[0], pre_input.f.linesize[0],
1195  c->width, c->height);
1196  s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1197  pre_input.f.data[1], pre_input.f.linesize[1],
1198  c->width >> 1, c->height >> 1);
1199  s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1200  pre_input.f.data[2], pre_input.f.linesize[2],
1201  c->width >> 1, c->height >> 1);
1202  }
1203  }
1204 
1205  for (j = 0; j < s->max_b_frames + 1; j++) {
1206  int64_t rd = 0;
1207 
1208  if (!s->input_picture[j])
1209  break;
1210 
1211  c->error[0] = c->error[1] = c->error[2] = 0;
1212 
1213  input[0].pict_type = AV_PICTURE_TYPE_I;
1214  input[0].quality = 1 * FF_QP2LAMBDA;
1215 
1216  out_size = encode_frame(c, &input[0]);
1217 
1218  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1219 
1220  for (i = 0; i < s->max_b_frames + 1; i++) {
1221  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1222 
1223  input[i + 1].pict_type = is_p ?
1225  input[i + 1].quality = is_p ? p_lambda : b_lambda;
1226 
1227  out_size = encode_frame(c, &input[i + 1]);
1228 
1229  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1230  }
1231 
1232  /* get the delayed frames */
1233  while (out_size) {
1234  out_size = encode_frame(c, NULL);
1235  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1236  }
1237 
1238  rd += c->error[0] + c->error[1] + c->error[2];
1239 
1240  if (rd < best_rd) {
1241  best_rd = rd;
1242  best_b_count = j;
1243  }
1244  }
1245 
1246  avcodec_close(c);
1247  av_freep(&c);
1248 
1249  for (i = 0; i < s->max_b_frames + 2; i++) {
1250  av_freep(&input[i].data[0]);
1251  }
1252 
1253  return best_b_count;
1254 }
1255 
1257 {
1258  int i, ret;
1259 
1260  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1262  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1263 
1264  /* set next picture type & ordering */
1265  if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1266  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1267  s->next_picture_ptr == NULL || s->intra_only) {
1268  s->reordered_input_picture[0] = s->input_picture[0];
1271  s->coded_picture_number++;
1272  } else {
1273  int b_frames;
1274 
1276  if (s->picture_in_gop_number < s->gop_size &&
1277  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1278  // FIXME check that te gop check above is +-1 correct
1279  av_frame_unref(&s->input_picture[0]->f);
1280 
1281  emms_c();
1282  ff_vbv_update(s, 0);
1283 
1284  goto no_output_pic;
1285  }
1286  }
1287 
1288  if (s->flags & CODEC_FLAG_PASS2) {
1289  for (i = 0; i < s->max_b_frames + 1; i++) {
1290  int pict_num = s->input_picture[0]->f.display_picture_number + i;
1291 
1292  if (pict_num >= s->rc_context.num_entries)
1293  break;
1294  if (!s->input_picture[i]) {
1295  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1296  break;
1297  }
1298 
1299  s->input_picture[i]->f.pict_type =
1300  s->rc_context.entry[pict_num].new_pict_type;
1301  }
1302  }
1303 
1304  if (s->avctx->b_frame_strategy == 0) {
1305  b_frames = s->max_b_frames;
1306  while (b_frames && !s->input_picture[b_frames])
1307  b_frames--;
1308  } else if (s->avctx->b_frame_strategy == 1) {
1309  for (i = 1; i < s->max_b_frames + 1; i++) {
1310  if (s->input_picture[i] &&
1311  s->input_picture[i]->b_frame_score == 0) {
1312  s->input_picture[i]->b_frame_score =
1313  get_intra_count(s,
1314  s->input_picture[i ]->f.data[0],
1315  s->input_picture[i - 1]->f.data[0],
1316  s->linesize) + 1;
1317  }
1318  }
1319  for (i = 0; i < s->max_b_frames + 1; i++) {
1320  if (s->input_picture[i] == NULL ||
1321  s->input_picture[i]->b_frame_score - 1 >
1322  s->mb_num / s->avctx->b_sensitivity)
1323  break;
1324  }
1325 
1326  b_frames = FFMAX(0, i - 1);
1327 
1328  /* reset scores */
1329  for (i = 0; i < b_frames + 1; i++) {
1330  s->input_picture[i]->b_frame_score = 0;
1331  }
1332  } else if (s->avctx->b_frame_strategy == 2) {
1333  b_frames = estimate_best_b_count(s);
1334  } else {
1335  av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1336  b_frames = 0;
1337  }
1338 
1339  emms_c();
1340 
1341  for (i = b_frames - 1; i >= 0; i--) {
1342  int type = s->input_picture[i]->f.pict_type;
1343  if (type && type != AV_PICTURE_TYPE_B)
1344  b_frames = i;
1345  }
1346  if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1347  b_frames == s->max_b_frames) {
1349  "warning, too many b frames in a row\n");
1350  }
1351 
1352  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1353  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1354  s->gop_size > s->picture_in_gop_number) {
1355  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1356  } else {
1357  if (s->flags & CODEC_FLAG_CLOSED_GOP)
1358  b_frames = 0;
1359  s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1360  }
1361  }
1362 
1363  if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1364  s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1365  b_frames--;
1366 
1367  s->reordered_input_picture[0] = s->input_picture[b_frames];
1371  s->coded_picture_number++;
1372  for (i = 0; i < b_frames; i++) {
1373  s->reordered_input_picture[i + 1] = s->input_picture[i];
1374  s->reordered_input_picture[i + 1]->f.pict_type =
1377  s->coded_picture_number++;
1378  }
1379  }
1380  }
1381 no_output_pic:
1382  if (s->reordered_input_picture[0]) {
1385  AV_PICTURE_TYPE_B ? 3 : 0;
1386 
1388  if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1389  return ret;
1390 
1391  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1392  // input is a shared pix, so we can't modifiy it -> alloc a new
1393  // one & ensure that the shared one is reuseable
1394 
1395  Picture *pic;
1396  int i = ff_find_unused_picture(s, 0);
1397  if (i < 0)
1398  return i;
1399  pic = &s->picture[i];
1400 
1402  if (ff_alloc_picture(s, pic, 0) < 0) {
1403  return -1;
1404  }
1405 
1406  ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
1407  if (ret < 0)
1408  return ret;
1409 
1410  /* mark us unused / free shared pic */
1412  s->reordered_input_picture[0]->shared = 0;
1413 
1414  s->current_picture_ptr = pic;
1415  } else {
1416  // input is not a shared pix -> reuse buffer for current_pix
1418  for (i = 0; i < 4; i++) {
1419  s->new_picture.f.data[i] += INPLACE_OFFSET;
1420  }
1421  }
1423  if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1424  s->current_picture_ptr)) < 0)
1425  return ret;
1426 
1428  } else {
1430  }
1431  return 0;
1432 }
1433 
1435  AVFrame *pic_arg, int *got_packet)
1436 {
1437  MpegEncContext *s = avctx->priv_data;
1438  int i, stuffing_count, ret;
1439  int context_count = s->slice_context_count;
1440 
1441  s->picture_in_gop_number++;
1442 
1443  if (load_input_picture(s, pic_arg) < 0)
1444  return -1;
1445 
1446  if (select_input_picture(s) < 0) {
1447  return -1;
1448  }
1449 
1450  /* output? */
1451  if (s->new_picture.f.data[0]) {
1452  if ((ret = ff_alloc_packet2(avctx, pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
1453  return ret;
1454  if (s->mb_info) {
1457  s->mb_width*s->mb_height*12);
1458  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1459  }
1460 
1461  for (i = 0; i < context_count; i++) {
1462  int start_y = s->thread_context[i]->start_mb_y;
1463  int end_y = s->thread_context[i]-> end_mb_y;
1464  int h = s->mb_height;
1465  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1466  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1467 
1468  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1469  }
1470 
1471  s->pict_type = s->new_picture.f.pict_type;
1472  //emms_c();
1473  if (ff_MPV_frame_start(s, avctx) < 0)
1474  return -1;
1475 vbv_retry:
1476  if (encode_picture(s, s->picture_number) < 0)
1477  return -1;
1478 
1479  avctx->header_bits = s->header_bits;
1480  avctx->mv_bits = s->mv_bits;
1481  avctx->misc_bits = s->misc_bits;
1482  avctx->i_tex_bits = s->i_tex_bits;
1483  avctx->p_tex_bits = s->p_tex_bits;
1484  avctx->i_count = s->i_count;
1485  // FIXME f/b_count in avctx
1486  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1487  avctx->skip_count = s->skip_count;
1488 
1489  ff_MPV_frame_end(s);
1490 
1493 
1494  if (avctx->rc_buffer_size) {
1495  RateControlContext *rcc = &s->rc_context;
1496  int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1497 
1498  if (put_bits_count(&s->pb) > max_size &&
1499  s->lambda < s->avctx->lmax) {
1500  s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1501  (s->qscale + 1) / s->qscale);
1502  if (s->adaptive_quant) {
1503  int i;
1504  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1505  s->lambda_table[i] =
1506  FFMAX(s->lambda_table[i] + 1,
1507  s->lambda_table[i] * (s->qscale + 1) /
1508  s->qscale);
1509  }
1510  s->mb_skipped = 0; // done in MPV_frame_start()
1511  // done in encode_picture() so we must undo it
1512  if (s->pict_type == AV_PICTURE_TYPE_P) {
1513  if (s->flipflop_rounding ||
1514  s->codec_id == AV_CODEC_ID_H263P ||
1516  s->no_rounding ^= 1;
1517  }
1518  if (s->pict_type != AV_PICTURE_TYPE_B) {
1519  s->time_base = s->last_time_base;
1520  s->last_non_b_time = s->time - s->pp_time;
1521  }
1522  for (i = 0; i < context_count; i++) {
1523  PutBitContext *pb = &s->thread_context[i]->pb;
1524  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1525  }
1526  goto vbv_retry;
1527  }
1528 
1529  assert(s->avctx->rc_max_rate);
1530  }
1531 
1532  if (s->flags & CODEC_FLAG_PASS1)
1534 
1535  for (i = 0; i < 4; i++) {
1537  avctx->error[i] += s->current_picture_ptr->f.error[i];
1538  }
1539 
1540  if (s->flags & CODEC_FLAG_PASS1)
1541  assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1542  avctx->i_tex_bits + avctx->p_tex_bits ==
1543  put_bits_count(&s->pb));
1544  flush_put_bits(&s->pb);
1545  s->frame_bits = put_bits_count(&s->pb);
1546 
1547  stuffing_count = ff_vbv_update(s, s->frame_bits);
1548  s->stuffing_bits = 8*stuffing_count;
1549  if (stuffing_count) {
1550  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1551  stuffing_count + 50) {
1552  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1553  return -1;
1554  }
1555 
1556  switch (s->codec_id) {
1559  while (stuffing_count--) {
1560  put_bits(&s->pb, 8, 0);
1561  }
1562  break;
1563  case AV_CODEC_ID_MPEG4:
1564  put_bits(&s->pb, 16, 0);
1565  put_bits(&s->pb, 16, 0x1C3);
1566  stuffing_count -= 4;
1567  while (stuffing_count--) {
1568  put_bits(&s->pb, 8, 0xFF);
1569  }
1570  break;
1571  default:
1572  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1573  }
1574  flush_put_bits(&s->pb);
1575  s->frame_bits = put_bits_count(&s->pb);
1576  }
1577 
1578  /* update mpeg1/2 vbv_delay for CBR */
1579  if (s->avctx->rc_max_rate &&
1580  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1581  s->out_format == FMT_MPEG1 &&
1582  90000LL * (avctx->rc_buffer_size - 1) <=
1583  s->avctx->rc_max_rate * 0xFFFFLL) {
1584  int vbv_delay, min_delay;
1585  double inbits = s->avctx->rc_max_rate *
1586  av_q2d(s->avctx->time_base);
1587  int minbits = s->frame_bits - 8 *
1588  (s->vbv_delay_ptr - s->pb.buf - 1);
1589  double bits = s->rc_context.buffer_index + minbits - inbits;
1590 
1591  if (bits < 0)
1593  "Internal error, negative bits\n");
1594 
1595  assert(s->repeat_first_field == 0);
1596 
1597  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1598  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1599  s->avctx->rc_max_rate;
1600 
1601  vbv_delay = FFMAX(vbv_delay, min_delay);
1602 
1603  av_assert0(vbv_delay < 0xFFFF);
1604 
1605  s->vbv_delay_ptr[0] &= 0xF8;
1606  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1607  s->vbv_delay_ptr[1] = vbv_delay >> 5;
1608  s->vbv_delay_ptr[2] &= 0x07;
1609  s->vbv_delay_ptr[2] |= vbv_delay << 3;
1610  avctx->vbv_delay = vbv_delay * 300;
1611  }
1612  s->total_bits += s->frame_bits;
1613  avctx->frame_bits = s->frame_bits;
1614 
1615  pkt->pts = s->current_picture.f.pts;
1616  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1618  pkt->dts = pkt->pts - s->dts_delta;
1619  else
1620  pkt->dts = s->reordered_pts;
1621  s->reordered_pts = pkt->pts;
1622  } else
1623  pkt->dts = pkt->pts;
1624  if (s->current_picture.f.key_frame)
1625  pkt->flags |= AV_PKT_FLAG_KEY;
1626  if (s->mb_info)
1628  } else {
1629  s->frame_bits = 0;
1630  }
1631 
1632  /* release non-reference frames */
1633  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1634  if (!s->picture[i].reference)
1635  ff_mpeg_unref_picture(s, &s->picture[i]);
1636  }
1637 
1638  assert((s->frame_bits & 7) == 0);
1639 
1640  pkt->size = s->frame_bits / 8;
1641  *got_packet = !!pkt->size;
1642  return 0;
1643 }
1644 
1646  int n, int threshold)
1647 {
1648  static const char tab[64] = {
1649  3, 2, 2, 1, 1, 1, 1, 1,
1650  1, 1, 1, 1, 1, 1, 1, 1,
1651  1, 1, 1, 1, 1, 1, 1, 1,
1652  0, 0, 0, 0, 0, 0, 0, 0,
1653  0, 0, 0, 0, 0, 0, 0, 0,
1654  0, 0, 0, 0, 0, 0, 0, 0,
1655  0, 0, 0, 0, 0, 0, 0, 0,
1656  0, 0, 0, 0, 0, 0, 0, 0
1657  };
1658  int score = 0;
1659  int run = 0;
1660  int i;
1661  int16_t *block = s->block[n];
1662  const int last_index = s->block_last_index[n];
1663  int skip_dc;
1664 
1665  if (threshold < 0) {
1666  skip_dc = 0;
1667  threshold = -threshold;
1668  } else
1669  skip_dc = 1;
1670 
1671  /* Are all we could set to zero already zero? */
1672  if (last_index <= skip_dc - 1)
1673  return;
1674 
1675  for (i = 0; i <= last_index; i++) {
1676  const int j = s->intra_scantable.permutated[i];
1677  const int level = FFABS(block[j]);
1678  if (level == 1) {
1679  if (skip_dc && i == 0)
1680  continue;
1681  score += tab[run];
1682  run = 0;
1683  } else if (level > 1) {
1684  return;
1685  } else {
1686  run++;
1687  }
1688  }
1689  if (score >= threshold)
1690  return;
1691  for (i = skip_dc; i <= last_index; i++) {
1692  const int j = s->intra_scantable.permutated[i];
1693  block[j] = 0;
1694  }
1695  if (block[0])
1696  s->block_last_index[n] = 0;
1697  else
1698  s->block_last_index[n] = -1;
1699 }
1700 
1701 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1702  int last_index)
1703 {
1704  int i;
1705  const int maxlevel = s->max_qcoeff;
1706  const int minlevel = s->min_qcoeff;
1707  int overflow = 0;
1708 
1709  if (s->mb_intra) {
1710  i = 1; // skip clipping of intra dc
1711  } else
1712  i = 0;
1713 
1714  for (; i <= last_index; i++) {
1715  const int j = s->intra_scantable.permutated[i];
1716  int level = block[j];
1717 
1718  if (level > maxlevel) {
1719  level = maxlevel;
1720  overflow++;
1721  } else if (level < minlevel) {
1722  level = minlevel;
1723  overflow++;
1724  }
1725 
1726  block[j] = level;
1727  }
1728 
1729  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1730  av_log(s->avctx, AV_LOG_INFO,
1731  "warning, clipping %d dct coefficients to %d..%d\n",
1732  overflow, minlevel, maxlevel);
1733 }
1734 
1735 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1736 {
1737  int x, y;
1738  // FIXME optimize
1739  for (y = 0; y < 8; y++) {
1740  for (x = 0; x < 8; x++) {
1741  int x2, y2;
1742  int sum = 0;
1743  int sqr = 0;
1744  int count = 0;
1745 
1746  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1747  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1748  int v = ptr[x2 + y2 * stride];
1749  sum += v;
1750  sqr += v * v;
1751  count++;
1752  }
1753  }
1754  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1755  }
1756  }
1757 }
1758 
1760  int motion_x, int motion_y,
1761  int mb_block_height,
1762  int mb_block_width,
1763  int mb_block_count)
1764 {
1765  int16_t weight[12][64];
1766  int16_t orig[12][64];
1767  const int mb_x = s->mb_x;
1768  const int mb_y = s->mb_y;
1769  int i;
1770  int skip_dct[12];
1771  int dct_offset = s->linesize * 8; // default for progressive frames
1772  int uv_dct_offset = s->uvlinesize * 8;
1773  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1774  ptrdiff_t wrap_y, wrap_c;
1775 
1776  for (i = 0; i < mb_block_count; i++)
1777  skip_dct[i] = s->skipdct;
1778 
1779  if (s->adaptive_quant) {
1780  const int last_qp = s->qscale;
1781  const int mb_xy = mb_x + mb_y * s->mb_stride;
1782 
1783  s->lambda = s->lambda_table[mb_xy];
1784  update_qscale(s);
1785 
1786  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1787  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1788  s->dquant = s->qscale - last_qp;
1789 
1790  if (s->out_format == FMT_H263) {
1791  s->dquant = av_clip(s->dquant, -2, 2);
1792 
1793  if (s->codec_id == AV_CODEC_ID_MPEG4) {
1794  if (!s->mb_intra) {
1795  if (s->pict_type == AV_PICTURE_TYPE_B) {
1796  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1797  s->dquant = 0;
1798  }
1799  if (s->mv_type == MV_TYPE_8X8)
1800  s->dquant = 0;
1801  }
1802  }
1803  }
1804  }
1805  ff_set_qscale(s, last_qp + s->dquant);
1806  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1807  ff_set_qscale(s, s->qscale + s->dquant);
1808 
1809  wrap_y = s->linesize;
1810  wrap_c = s->uvlinesize;
1811  ptr_y = s->new_picture.f.data[0] +
1812  (mb_y * 16 * wrap_y) + mb_x * 16;
1813  ptr_cb = s->new_picture.f.data[1] +
1814  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
1815  ptr_cr = s->new_picture.f.data[2] +
1816  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
1817 
1818  if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
1819  uint8_t *ebuf = s->edge_emu_buffer + 32;
1820  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
1821  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
1822  s->vdsp.emulated_edge_mc(ebuf, wrap_y, ptr_y, wrap_y, 16, 16, mb_x * 16,
1823  mb_y * 16, s->width, s->height);
1824  ptr_y = ebuf;
1825  s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, wrap_c, ptr_cb, wrap_c, mb_block_width,
1826  mb_block_height, mb_x * mb_block_width, mb_y * mb_block_height,
1827  cw, ch);
1828  ptr_cb = ebuf + 18 * wrap_y;
1829  s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 16, wrap_c, ptr_cr, wrap_c, mb_block_width,
1830  mb_block_height, mb_x * mb_block_width, mb_y * mb_block_height,
1831  cw, ch);
1832  ptr_cr = ebuf + 18 * wrap_y + 16;
1833  }
1834 
1835  if (s->mb_intra) {
1836  if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1837  int progressive_score, interlaced_score;
1838 
1839  s->interlaced_dct = 0;
1840  progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1841  NULL, wrap_y, 8) +
1842  s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1843  NULL, wrap_y, 8) - 400;
1844 
1845  if (progressive_score > 0) {
1846  interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1847  NULL, wrap_y * 2, 8) +
1848  s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1849  NULL, wrap_y * 2, 8);
1850  if (progressive_score > interlaced_score) {
1851  s->interlaced_dct = 1;
1852 
1853  dct_offset = wrap_y;
1854  uv_dct_offset = wrap_c;
1855  wrap_y <<= 1;
1856  if (s->chroma_format == CHROMA_422 ||
1857  s->chroma_format == CHROMA_444)
1858  wrap_c <<= 1;
1859  }
1860  }
1861  }
1862 
1863  s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1864  s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1865  s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1866  s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1867 
1868  if (s->flags & CODEC_FLAG_GRAY) {
1869  skip_dct[4] = 1;
1870  skip_dct[5] = 1;
1871  } else {
1872  s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1873  s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1874  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
1875  s->dsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
1876  s->dsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
1877  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
1878  s->dsp.get_pixels(s->block[6], ptr_cb + 8, wrap_c);
1879  s->dsp.get_pixels(s->block[7], ptr_cr + 8, wrap_c);
1880  s->dsp.get_pixels(s->block[8], ptr_cb + uv_dct_offset, wrap_c);
1881  s->dsp.get_pixels(s->block[9], ptr_cr + uv_dct_offset, wrap_c);
1882  s->dsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
1883  s->dsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
1884  }
1885  }
1886  } else {
1887  op_pixels_func (*op_pix)[4];
1888  qpel_mc_func (*op_qpix)[16];
1889  uint8_t *dest_y, *dest_cb, *dest_cr;
1890 
1891  dest_y = s->dest[0];
1892  dest_cb = s->dest[1];
1893  dest_cr = s->dest[2];
1894 
1895  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1896  op_pix = s->hdsp.put_pixels_tab;
1897  op_qpix = s->dsp.put_qpel_pixels_tab;
1898  } else {
1899  op_pix = s->hdsp.put_no_rnd_pixels_tab;
1900  op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1901  }
1902 
1903  if (s->mv_dir & MV_DIR_FORWARD) {
1904  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1905  s->last_picture.f.data,
1906  op_pix, op_qpix);
1907  op_pix = s->hdsp.avg_pixels_tab;
1908  op_qpix = s->dsp.avg_qpel_pixels_tab;
1909  }
1910  if (s->mv_dir & MV_DIR_BACKWARD) {
1911  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1912  s->next_picture.f.data,
1913  op_pix, op_qpix);
1914  }
1915 
1916  if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1917  int progressive_score, interlaced_score;
1918 
1919  s->interlaced_dct = 0;
1920  progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1921  ptr_y, wrap_y,
1922  8) +
1923  s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1924  ptr_y + wrap_y * 8, wrap_y,
1925  8) - 400;
1926 
1927  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1928  progressive_score -= 400;
1929 
1930  if (progressive_score > 0) {
1931  interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1932  ptr_y,
1933  wrap_y * 2, 8) +
1934  s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1935  ptr_y + wrap_y,
1936  wrap_y * 2, 8);
1937 
1938  if (progressive_score > interlaced_score) {
1939  s->interlaced_dct = 1;
1940 
1941  dct_offset = wrap_y;
1942  uv_dct_offset = wrap_c;
1943  wrap_y <<= 1;
1944  if (s->chroma_format == CHROMA_422)
1945  wrap_c <<= 1;
1946  }
1947  }
1948  }
1949 
1950  s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1951  s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1952  s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1953  dest_y + dct_offset, wrap_y);
1954  s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1955  dest_y + dct_offset + 8, wrap_y);
1956 
1957  if (s->flags & CODEC_FLAG_GRAY) {
1958  skip_dct[4] = 1;
1959  skip_dct[5] = 1;
1960  } else {
1961  s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1962  s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1963  if (!s->chroma_y_shift) { /* 422 */
1964  s->dsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
1965  dest_cb + uv_dct_offset, wrap_c);
1966  s->dsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
1967  dest_cr + uv_dct_offset, wrap_c);
1968  }
1969  }
1970  /* pre quantization */
1971  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1972  2 * s->qscale * s->qscale) {
1973  // FIXME optimize
1974  if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1975  wrap_y, 8) < 20 * s->qscale)
1976  skip_dct[0] = 1;
1977  if (s->dsp.sad[1](NULL, ptr_y + 8,
1978  dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1979  skip_dct[1] = 1;
1980  if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1981  dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1982  skip_dct[2] = 1;
1983  if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1984  dest_y + dct_offset + 8,
1985  wrap_y, 8) < 20 * s->qscale)
1986  skip_dct[3] = 1;
1987  if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1988  wrap_c, 8) < 20 * s->qscale)
1989  skip_dct[4] = 1;
1990  if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1991  wrap_c, 8) < 20 * s->qscale)
1992  skip_dct[5] = 1;
1993  if (!s->chroma_y_shift) { /* 422 */
1994  if (s->dsp.sad[1](NULL, ptr_cb + uv_dct_offset,
1995  dest_cb + uv_dct_offset,
1996  wrap_c, 8) < 20 * s->qscale)
1997  skip_dct[6] = 1;
1998  if (s->dsp.sad[1](NULL, ptr_cr + uv_dct_offset,
1999  dest_cr + uv_dct_offset,
2000  wrap_c, 8) < 20 * s->qscale)
2001  skip_dct[7] = 1;
2002  }
2003  }
2004  }
2005 
2006  if (s->quantizer_noise_shaping) {
2007  if (!skip_dct[0])
2008  get_visual_weight(weight[0], ptr_y , wrap_y);
2009  if (!skip_dct[1])
2010  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2011  if (!skip_dct[2])
2012  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2013  if (!skip_dct[3])
2014  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2015  if (!skip_dct[4])
2016  get_visual_weight(weight[4], ptr_cb , wrap_c);
2017  if (!skip_dct[5])
2018  get_visual_weight(weight[5], ptr_cr , wrap_c);
2019  if (!s->chroma_y_shift) { /* 422 */
2020  if (!skip_dct[6])
2021  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2022  wrap_c);
2023  if (!skip_dct[7])
2024  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2025  wrap_c);
2026  }
2027  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2028  }
2029 
2030  /* DCT & quantize */
2031  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2032  {
2033  for (i = 0; i < mb_block_count; i++) {
2034  if (!skip_dct[i]) {
2035  int overflow;
2036  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2037  // FIXME we could decide to change to quantizer instead of
2038  // clipping
2039  // JS: I don't think that would be a good idea it could lower
2040  // quality instead of improve it. Just INTRADC clipping
2041  // deserves changes in quantizer
2042  if (overflow)
2043  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2044  } else
2045  s->block_last_index[i] = -1;
2046  }
2047  if (s->quantizer_noise_shaping) {
2048  for (i = 0; i < mb_block_count; i++) {
2049  if (!skip_dct[i]) {
2050  s->block_last_index[i] =
2051  dct_quantize_refine(s, s->block[i], weight[i],
2052  orig[i], i, s->qscale);
2053  }
2054  }
2055  }
2056 
2057  if (s->luma_elim_threshold && !s->mb_intra)
2058  for (i = 0; i < 4; i++)
2060  if (s->chroma_elim_threshold && !s->mb_intra)
2061  for (i = 4; i < mb_block_count; i++)
2063 
2064  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2065  for (i = 0; i < mb_block_count; i++) {
2066  if (s->block_last_index[i] == -1)
2067  s->coded_score[i] = INT_MAX / 256;
2068  }
2069  }
2070  }
2071 
2072  if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2073  s->block_last_index[4] =
2074  s->block_last_index[5] = 0;
2075  s->block[4][0] =
2076  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2077  if (!s->chroma_y_shift) { /* 422 / 444 */
2078  for (i=6; i<12; i++) {
2079  s->block_last_index[i] = 0;
2080  s->block[i][0] = s->block[4][0];
2081  }
2082  }
2083  }
2084 
2085  // non c quantize code returns incorrect block_last_index FIXME
2086  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2087  for (i = 0; i < mb_block_count; i++) {
2088  int j;
2089  if (s->block_last_index[i] > 0) {
2090  for (j = 63; j > 0; j--) {
2091  if (s->block[i][s->intra_scantable.permutated[j]])
2092  break;
2093  }
2094  s->block_last_index[i] = j;
2095  }
2096  }
2097  }
2098 
2099  /* huffman encode */
2100  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2104  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2105  break;
2106  case AV_CODEC_ID_MPEG4:
2108  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2109  break;
2110  case AV_CODEC_ID_MSMPEG4V2:
2111  case AV_CODEC_ID_MSMPEG4V3:
2112  case AV_CODEC_ID_WMV1:
2114  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2115  break;
2116  case AV_CODEC_ID_WMV2:
2117  if (CONFIG_WMV2_ENCODER)
2118  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2119  break;
2120  case AV_CODEC_ID_H261:
2121  if (CONFIG_H261_ENCODER)
2122  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2123  break;
2124  case AV_CODEC_ID_H263:
2125  case AV_CODEC_ID_H263P:
2126  case AV_CODEC_ID_FLV1:
2127  case AV_CODEC_ID_RV10:
2128  case AV_CODEC_ID_RV20:
2129  if (CONFIG_H263_ENCODER)
2130  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2131  break;
2132  case AV_CODEC_ID_MJPEG:
2133  case AV_CODEC_ID_AMV:
2135  ff_mjpeg_encode_mb(s, s->block);
2136  break;
2137  default:
2138  av_assert1(0);
2139  }
2140 }
2141 
2142 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2143 {
2144  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2145  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2146  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2147 }
2148 
2150  int i;
2151 
2152  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2153 
2154  /* mpeg1 */
2155  d->mb_skip_run= s->mb_skip_run;
2156  for(i=0; i<3; i++)
2157  d->last_dc[i] = s->last_dc[i];
2158 
2159  /* statistics */
2160  d->mv_bits= s->mv_bits;
2161  d->i_tex_bits= s->i_tex_bits;
2162  d->p_tex_bits= s->p_tex_bits;
2163  d->i_count= s->i_count;
2164  d->f_count= s->f_count;
2165  d->b_count= s->b_count;
2166  d->skip_count= s->skip_count;
2167  d->misc_bits= s->misc_bits;
2168  d->last_bits= 0;
2169 
2170  d->mb_skipped= 0;
2171  d->qscale= s->qscale;
2172  d->dquant= s->dquant;
2173 
2175 }
2176 
2178  int i;
2179 
2180  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2181  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2182 
2183  /* mpeg1 */
2184  d->mb_skip_run= s->mb_skip_run;
2185  for(i=0; i<3; i++)
2186  d->last_dc[i] = s->last_dc[i];
2187 
2188  /* statistics */
2189  d->mv_bits= s->mv_bits;
2190  d->i_tex_bits= s->i_tex_bits;
2191  d->p_tex_bits= s->p_tex_bits;
2192  d->i_count= s->i_count;
2193  d->f_count= s->f_count;
2194  d->b_count= s->b_count;
2195  d->skip_count= s->skip_count;
2196  d->misc_bits= s->misc_bits;
2197 
2198  d->mb_intra= s->mb_intra;
2199  d->mb_skipped= s->mb_skipped;
2200  d->mv_type= s->mv_type;
2201  d->mv_dir= s->mv_dir;
2202  d->pb= s->pb;
2203  if(s->data_partitioning){
2204  d->pb2= s->pb2;
2205  d->tex_pb= s->tex_pb;
2206  }
2207  d->block= s->block;
2208  for(i=0; i<8; i++)
2209  d->block_last_index[i]= s->block_last_index[i];
2211  d->qscale= s->qscale;
2212 
2214 }
2215 
2216 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2218  int *dmin, int *next_block, int motion_x, int motion_y)
2219 {
2220  int score;
2221  uint8_t *dest_backup[3];
2222 
2223  copy_context_before_encode(s, backup, type);
2224 
2225  s->block= s->blocks[*next_block];
2226  s->pb= pb[*next_block];
2227  if(s->data_partitioning){
2228  s->pb2 = pb2 [*next_block];
2229  s->tex_pb= tex_pb[*next_block];
2230  }
2231 
2232  if(*next_block){
2233  memcpy(dest_backup, s->dest, sizeof(s->dest));
2234  s->dest[0] = s->rd_scratchpad;
2235  s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2236  s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2237  assert(s->linesize >= 32); //FIXME
2238  }
2239 
2240  encode_mb(s, motion_x, motion_y);
2241 
2242  score= put_bits_count(&s->pb);
2243  if(s->data_partitioning){
2244  score+= put_bits_count(&s->pb2);
2245  score+= put_bits_count(&s->tex_pb);
2246  }
2247 
2248  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2249  ff_MPV_decode_mb(s, s->block);
2250 
2251  score *= s->lambda2;
2252  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2253  }
2254 
2255  if(*next_block){
2256  memcpy(s->dest, dest_backup, sizeof(s->dest));
2257  }
2258 
2259  if(score<*dmin){
2260  *dmin= score;
2261  *next_block^=1;
2262 
2263  copy_context_after_encode(best, s, type);
2264  }
2265 }
2266 
2267 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2268  uint32_t *sq = ff_squareTbl + 256;
2269  int acc=0;
2270  int x,y;
2271 
2272  if(w==16 && h==16)
2273  return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2274  else if(w==8 && h==8)
2275  return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2276 
2277  for(y=0; y<h; y++){
2278  for(x=0; x<w; x++){
2279  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2280  }
2281  }
2282 
2283  av_assert2(acc>=0);
2284 
2285  return acc;
2286 }
2287 
2288 static int sse_mb(MpegEncContext *s){
2289  int w= 16;
2290  int h= 16;
2291 
2292  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2293  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2294 
2295  if(w==16 && h==16)
2296  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2297  return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2298  +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2299  +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2300  }else{
2301  return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2302  +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2303  +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2304  }
2305  else
2306  return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2307  +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2308  +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2309 }
2310 
2312  MpegEncContext *s= *(void**)arg;
2313 
2314 
2315  s->me.pre_pass=1;
2316  s->me.dia_size= s->avctx->pre_dia_size;
2317  s->first_slice_line=1;
2318  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2319  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2321  }
2322  s->first_slice_line=0;
2323  }
2324 
2325  s->me.pre_pass=0;
2326 
2327  return 0;
2328 }
2329 
2331  MpegEncContext *s= *(void**)arg;
2332 
2334 
2335  s->me.dia_size= s->avctx->dia_size;
2336  s->first_slice_line=1;
2337  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2338  s->mb_x=0; //for block init below
2340  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2341  s->block_index[0]+=2;
2342  s->block_index[1]+=2;
2343  s->block_index[2]+=2;
2344  s->block_index[3]+=2;
2345 
2346  /* compute motion vector & mb_type and store in context */
2349  else
2351  }
2352  s->first_slice_line=0;
2353  }
2354  return 0;
2355 }
2356 
2357 static int mb_var_thread(AVCodecContext *c, void *arg){
2358  MpegEncContext *s= *(void**)arg;
2359  int mb_x, mb_y;
2360 
2362 
2363  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2364  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2365  int xx = mb_x * 16;
2366  int yy = mb_y * 16;
2367  uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2368  int varc;
2369  int sum = s->dsp.pix_sum(pix, s->linesize);
2370 
2371  varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2372 
2373  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2374  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2375  s->me.mb_var_sum_temp += varc;
2376  }
2377  }
2378  return 0;
2379 }
2380 
2383  if(s->partitioned_frame){
2385  }
2386 
2387  ff_mpeg4_stuffing(&s->pb);
2388  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2390  }
2391 
2393  flush_put_bits(&s->pb);
2394 
2395  if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2396  s->misc_bits+= get_bits_diff(s);
2397 }
2398 
2400 {
2401  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2402  int offset = put_bits_count(&s->pb);
2403  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2404  int gobn = s->mb_y / s->gob_index;
2405  int pred_x, pred_y;
2406  if (CONFIG_H263_ENCODER)
2407  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2408  bytestream_put_le32(&ptr, offset);
2409  bytestream_put_byte(&ptr, s->qscale);
2410  bytestream_put_byte(&ptr, gobn);
2411  bytestream_put_le16(&ptr, mba);
2412  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2413  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2414  /* 4MV not implemented */
2415  bytestream_put_byte(&ptr, 0); /* hmv2 */
2416  bytestream_put_byte(&ptr, 0); /* vmv2 */
2417 }
2418 
2419 static void update_mb_info(MpegEncContext *s, int startcode)
2420 {
2421  if (!s->mb_info)
2422  return;
2423  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2424  s->mb_info_size += 12;
2425  s->prev_mb_info = s->last_mb_info;
2426  }
2427  if (startcode) {
2428  s->prev_mb_info = put_bits_count(&s->pb)/8;
2429  /* This might have incremented mb_info_size above, and we return without
2430  * actually writing any info into that slot yet. But in that case,
2431  * this will be called again at the start of the after writing the
2432  * start code, actually writing the mb info. */
2433  return;
2434  }
2435 
2436  s->last_mb_info = put_bits_count(&s->pb)/8;
2437  if (!s->mb_info_size)
2438  s->mb_info_size += 12;
2439  write_mb_info(s);
2440 }
2441 
2442 static int encode_thread(AVCodecContext *c, void *arg){
2443  MpegEncContext *s= *(void**)arg;
2444  int mb_x, mb_y, pdif = 0;
2445  int chr_h= 16>>s->chroma_y_shift;
2446  int i, j;
2447  MpegEncContext best_s, backup_s;
2448  uint8_t bit_buf[2][MAX_MB_BYTES];
2449  uint8_t bit_buf2[2][MAX_MB_BYTES];
2450  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2451  PutBitContext pb[2], pb2[2], tex_pb[2];
2452 
2454 
2455  for(i=0; i<2; i++){
2456  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2457  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2458  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2459  }
2460 
2461  s->last_bits= put_bits_count(&s->pb);
2462  s->mv_bits=0;
2463  s->misc_bits=0;
2464  s->i_tex_bits=0;
2465  s->p_tex_bits=0;
2466  s->i_count=0;
2467  s->f_count=0;
2468  s->b_count=0;
2469  s->skip_count=0;
2470 
2471  for(i=0; i<3; i++){
2472  /* init last dc values */
2473  /* note: quant matrix value (8) is implied here */
2474  s->last_dc[i] = 128 << s->intra_dc_precision;
2475 
2476  s->current_picture.f.error[i] = 0;
2477  }
2478  if(s->codec_id==AV_CODEC_ID_AMV){
2479  s->last_dc[0] = 128*8/13;
2480  s->last_dc[1] = 128*8/14;
2481  s->last_dc[2] = 128*8/14;
2482  }
2483  s->mb_skip_run = 0;
2484  memset(s->last_mv, 0, sizeof(s->last_mv));
2485 
2486  s->last_mv_dir = 0;
2487 
2488  switch(s->codec_id){
2489  case AV_CODEC_ID_H263:
2490  case AV_CODEC_ID_H263P:
2491  case AV_CODEC_ID_FLV1:
2492  if (CONFIG_H263_ENCODER)
2494  break;
2495  case AV_CODEC_ID_MPEG4:
2498  break;
2499  }
2500 
2501  s->resync_mb_x=0;
2502  s->resync_mb_y=0;
2503  s->first_slice_line = 1;
2504  s->ptr_lastgob = s->pb.buf;
2505  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2506  s->mb_x=0;
2507  s->mb_y= mb_y;
2508 
2509  ff_set_qscale(s, s->qscale);
2511 
2512  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2513  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2514  int mb_type= s->mb_type[xy];
2515 // int d;
2516  int dmin= INT_MAX;
2517  int dir;
2518 
2519  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2520  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2521  return -1;
2522  }
2523  if(s->data_partitioning){
2524  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2525  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2526  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2527  return -1;
2528  }
2529  }
2530 
2531  s->mb_x = mb_x;
2532  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2534 
2537  xy= s->mb_y*s->mb_stride + s->mb_x;
2538  mb_type= s->mb_type[xy];
2539  }
2540 
2541  /* write gob / video packet header */
2542  if(s->rtp_mode){
2543  int current_packet_size, is_gob_start;
2544 
2545  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2546 
2547  is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2548 
2549  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2550 
2551  switch(s->codec_id){
2552  case AV_CODEC_ID_H263:
2553  case AV_CODEC_ID_H263P:
2554  if(!s->h263_slice_structured)
2555  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2556  break;
2558  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2560  if(s->mb_skip_run) is_gob_start=0;
2561  break;
2562  case AV_CODEC_ID_MJPEG:
2563  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2564  break;
2565  }
2566 
2567  if(is_gob_start){
2568  if(s->start_mb_y != mb_y || mb_x!=0){
2569  write_slice_end(s);
2572  }
2573  }
2574 
2575  av_assert2((put_bits_count(&s->pb)&7) == 0);
2576  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2577 
2578  if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2579  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2580  int d= 100 / s->avctx->error_rate;
2581  if(r % d == 0){
2582  current_packet_size=0;
2583  s->pb.buf_ptr= s->ptr_lastgob;
2584  assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2585  }
2586  }
2587 
2588  if (s->avctx->rtp_callback){
2589  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2590  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2591  }
2592  update_mb_info(s, 1);
2593 
2594  switch(s->codec_id){
2595  case AV_CODEC_ID_MPEG4:
2596  if (CONFIG_MPEG4_ENCODER) {
2599  }
2600  break;
2606  }
2607  break;
2608  case AV_CODEC_ID_H263:
2609  case AV_CODEC_ID_H263P:
2610  if (CONFIG_H263_ENCODER)
2611  ff_h263_encode_gob_header(s, mb_y);
2612  break;
2613  }
2614 
2615  if(s->flags&CODEC_FLAG_PASS1){
2616  int bits= put_bits_count(&s->pb);
2617  s->misc_bits+= bits - s->last_bits;
2618  s->last_bits= bits;
2619  }
2620 
2621  s->ptr_lastgob += current_packet_size;
2622  s->first_slice_line=1;
2623  s->resync_mb_x=mb_x;
2624  s->resync_mb_y=mb_y;
2625  }
2626  }
2627 
2628  if( (s->resync_mb_x == s->mb_x)
2629  && s->resync_mb_y+1 == s->mb_y){
2630  s->first_slice_line=0;
2631  }
2632 
2633  s->mb_skipped=0;
2634  s->dquant=0; //only for QP_RD
2635 
2636  update_mb_info(s, 0);
2637 
2638  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2639  int next_block=0;
2640  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2641 
2642  copy_context_before_encode(&backup_s, s, -1);
2643  backup_s.pb= s->pb;
2646  if(s->data_partitioning){
2647  backup_s.pb2= s->pb2;
2648  backup_s.tex_pb= s->tex_pb;
2649  }
2650 
2651  if(mb_type&CANDIDATE_MB_TYPE_INTER){
2652  s->mv_dir = MV_DIR_FORWARD;
2653  s->mv_type = MV_TYPE_16X16;
2654  s->mb_intra= 0;
2655  s->mv[0][0][0] = s->p_mv_table[xy][0];
2656  s->mv[0][0][1] = s->p_mv_table[xy][1];
2657  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2658  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2659  }
2660  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2661  s->mv_dir = MV_DIR_FORWARD;
2662  s->mv_type = MV_TYPE_FIELD;
2663  s->mb_intra= 0;
2664  for(i=0; i<2; i++){
2665  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2666  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2667  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2668  }
2669  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2670  &dmin, &next_block, 0, 0);
2671  }
2672  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2673  s->mv_dir = MV_DIR_FORWARD;
2674  s->mv_type = MV_TYPE_16X16;
2675  s->mb_intra= 0;
2676  s->mv[0][0][0] = 0;
2677  s->mv[0][0][1] = 0;
2678  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2679  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2680  }
2681  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2682  s->mv_dir = MV_DIR_FORWARD;
2683  s->mv_type = MV_TYPE_8X8;
2684  s->mb_intra= 0;
2685  for(i=0; i<4; i++){
2686  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2687  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2688  }
2689  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2690  &dmin, &next_block, 0, 0);
2691  }
2692  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2693  s->mv_dir = MV_DIR_FORWARD;
2694  s->mv_type = MV_TYPE_16X16;
2695  s->mb_intra= 0;
2696  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2697  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2698  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2699  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2700  }
2701  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2702  s->mv_dir = MV_DIR_BACKWARD;
2703  s->mv_type = MV_TYPE_16X16;
2704  s->mb_intra= 0;
2705  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2706  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2707  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2708  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2709  }
2710  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2712  s->mv_type = MV_TYPE_16X16;
2713  s->mb_intra= 0;
2714  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2715  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2716  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2717  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2718  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2719  &dmin, &next_block, 0, 0);
2720  }
2721  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2722  s->mv_dir = MV_DIR_FORWARD;
2723  s->mv_type = MV_TYPE_FIELD;
2724  s->mb_intra= 0;
2725  for(i=0; i<2; i++){
2726  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2727  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2728  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2729  }
2730  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2731  &dmin, &next_block, 0, 0);
2732  }
2733  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2734  s->mv_dir = MV_DIR_BACKWARD;
2735  s->mv_type = MV_TYPE_FIELD;
2736  s->mb_intra= 0;
2737  for(i=0; i<2; i++){
2738  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2739  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2740  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2741  }
2742  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2743  &dmin, &next_block, 0, 0);
2744  }
2745  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2747  s->mv_type = MV_TYPE_FIELD;
2748  s->mb_intra= 0;
2749  for(dir=0; dir<2; dir++){
2750  for(i=0; i<2; i++){
2751  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2752  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2753  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2754  }
2755  }
2756  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2757  &dmin, &next_block, 0, 0);
2758  }
2759  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2760  s->mv_dir = 0;
2761  s->mv_type = MV_TYPE_16X16;
2762  s->mb_intra= 1;
2763  s->mv[0][0][0] = 0;
2764  s->mv[0][0][1] = 0;
2765  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2766  &dmin, &next_block, 0, 0);
2767  if(s->h263_pred || s->h263_aic){
2768  if(best_s.mb_intra)
2769  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2770  else
2771  ff_clean_intra_table_entries(s); //old mode?
2772  }
2773  }
2774 
2775  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2776  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2777  const int last_qp= backup_s.qscale;
2778  int qpi, qp, dc[6];
2779  int16_t ac[6][16];
2780  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2781  static const int dquant_tab[4]={-1,1,-2,2};
2782  int storecoefs = s->mb_intra && s->dc_val[0];
2783 
2784  av_assert2(backup_s.dquant == 0);
2785 
2786  //FIXME intra
2787  s->mv_dir= best_s.mv_dir;
2788  s->mv_type = MV_TYPE_16X16;
2789  s->mb_intra= best_s.mb_intra;
2790  s->mv[0][0][0] = best_s.mv[0][0][0];
2791  s->mv[0][0][1] = best_s.mv[0][0][1];
2792  s->mv[1][0][0] = best_s.mv[1][0][0];
2793  s->mv[1][0][1] = best_s.mv[1][0][1];
2794 
2795  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2796  for(; qpi<4; qpi++){
2797  int dquant= dquant_tab[qpi];
2798  qp= last_qp + dquant;
2799  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2800  continue;
2801  backup_s.dquant= dquant;
2802  if(storecoefs){
2803  for(i=0; i<6; i++){
2804  dc[i]= s->dc_val[0][ s->block_index[i] ];
2805  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2806  }
2807  }
2808 
2809  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2810  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2811  if(best_s.qscale != qp){
2812  if(storecoefs){
2813  for(i=0; i<6; i++){
2814  s->dc_val[0][ s->block_index[i] ]= dc[i];
2815  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2816  }
2817  }
2818  }
2819  }
2820  }
2821  }
2823  int mx= s->b_direct_mv_table[xy][0];
2824  int my= s->b_direct_mv_table[xy][1];
2825 
2826  backup_s.dquant = 0;
2828  s->mb_intra= 0;
2829  ff_mpeg4_set_direct_mv(s, mx, my);
2830  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2831  &dmin, &next_block, mx, my);
2832  }
2834  backup_s.dquant = 0;
2836  s->mb_intra= 0;
2837  ff_mpeg4_set_direct_mv(s, 0, 0);
2838  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2839  &dmin, &next_block, 0, 0);
2840  }
2841  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2842  int coded=0;
2843  for(i=0; i<6; i++)
2844  coded |= s->block_last_index[i];
2845  if(coded){
2846  int mx,my;
2847  memcpy(s->mv, best_s.mv, sizeof(s->mv));
2848  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2849  mx=my=0; //FIXME find the one we actually used
2850  ff_mpeg4_set_direct_mv(s, mx, my);
2851  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2852  mx= s->mv[1][0][0];
2853  my= s->mv[1][0][1];
2854  }else{
2855  mx= s->mv[0][0][0];
2856  my= s->mv[0][0][1];
2857  }
2858 
2859  s->mv_dir= best_s.mv_dir;
2860  s->mv_type = best_s.mv_type;
2861  s->mb_intra= 0;
2862 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2863  s->mv[0][0][1] = best_s.mv[0][0][1];
2864  s->mv[1][0][0] = best_s.mv[1][0][0];
2865  s->mv[1][0][1] = best_s.mv[1][0][1];*/
2866  backup_s.dquant= 0;
2867  s->skipdct=1;
2868  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2869  &dmin, &next_block, mx, my);
2870  s->skipdct=0;
2871  }
2872  }
2873 
2874  s->current_picture.qscale_table[xy] = best_s.qscale;
2875 
2876  copy_context_after_encode(s, &best_s, -1);
2877 
2878  pb_bits_count= put_bits_count(&s->pb);
2879  flush_put_bits(&s->pb);
2880  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2881  s->pb= backup_s.pb;
2882 
2883  if(s->data_partitioning){
2884  pb2_bits_count= put_bits_count(&s->pb2);
2885  flush_put_bits(&s->pb2);
2886  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2887  s->pb2= backup_s.pb2;
2888 
2889  tex_pb_bits_count= put_bits_count(&s->tex_pb);
2890  flush_put_bits(&s->tex_pb);
2891  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2892  s->tex_pb= backup_s.tex_pb;
2893  }
2894  s->last_bits= put_bits_count(&s->pb);
2895 
2896  if (CONFIG_H263_ENCODER &&
2899 
2900  if(next_block==0){ //FIXME 16 vs linesize16
2901  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2902  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2903  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2904  }
2905 
2907  ff_MPV_decode_mb(s, s->block);
2908  } else {
2909  int motion_x = 0, motion_y = 0;
2911  // only one MB-Type possible
2912 
2913  switch(mb_type){
2915  s->mv_dir = 0;
2916  s->mb_intra= 1;
2917  motion_x= s->mv[0][0][0] = 0;
2918  motion_y= s->mv[0][0][1] = 0;
2919  break;
2921  s->mv_dir = MV_DIR_FORWARD;
2922  s->mb_intra= 0;
2923  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2924  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2925  break;
2927  s->mv_dir = MV_DIR_FORWARD;
2928  s->mv_type = MV_TYPE_FIELD;
2929  s->mb_intra= 0;
2930  for(i=0; i<2; i++){
2931  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2932  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2933  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2934  }
2935  break;
2937  s->mv_dir = MV_DIR_FORWARD;
2938  s->mv_type = MV_TYPE_8X8;
2939  s->mb_intra= 0;
2940  for(i=0; i<4; i++){
2941  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2942  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2943  }
2944  break;
2946  if (CONFIG_MPEG4_ENCODER) {
2948  s->mb_intra= 0;
2949  motion_x=s->b_direct_mv_table[xy][0];
2950  motion_y=s->b_direct_mv_table[xy][1];
2951  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2952  }
2953  break;
2955  if (CONFIG_MPEG4_ENCODER) {
2957  s->mb_intra= 0;
2958  ff_mpeg4_set_direct_mv(s, 0, 0);
2959  }
2960  break;
2963  s->mb_intra= 0;
2964  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2965  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2966  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2967  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2968  break;
2970  s->mv_dir = MV_DIR_BACKWARD;
2971  s->mb_intra= 0;
2972  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2973  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2974  break;
2976  s->mv_dir = MV_DIR_FORWARD;
2977  s->mb_intra= 0;
2978  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2979  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2980  break;
2982  s->mv_dir = MV_DIR_FORWARD;
2983  s->mv_type = MV_TYPE_FIELD;
2984  s->mb_intra= 0;
2985  for(i=0; i<2; i++){
2986  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2987  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2988  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2989  }
2990  break;
2992  s->mv_dir = MV_DIR_BACKWARD;
2993  s->mv_type = MV_TYPE_FIELD;
2994  s->mb_intra= 0;
2995  for(i=0; i<2; i++){
2996  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2997  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2998  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2999  }
3000  break;
3003  s->mv_type = MV_TYPE_FIELD;
3004  s->mb_intra= 0;
3005  for(dir=0; dir<2; dir++){
3006  for(i=0; i<2; i++){
3007  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3008  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3009  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3010  }
3011  }
3012  break;
3013  default:
3014  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3015  }
3016 
3017  encode_mb(s, motion_x, motion_y);
3018 
3019  // RAL: Update last macroblock type
3020  s->last_mv_dir = s->mv_dir;
3021 
3022  if (CONFIG_H263_ENCODER &&
3025 
3026  ff_MPV_decode_mb(s, s->block);
3027  }
3028 
3029  /* clean the MV table in IPS frames for direct mode in B frames */
3030  if(s->mb_intra /* && I,P,S_TYPE */){
3031  s->p_mv_table[xy][0]=0;
3032  s->p_mv_table[xy][1]=0;
3033  }
3034 
3035  if(s->flags&CODEC_FLAG_PSNR){
3036  int w= 16;
3037  int h= 16;
3038 
3039  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3040  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3041 
3042  s->current_picture.f.error[0] += sse(
3043  s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3044  s->dest[0], w, h, s->linesize);
3045  s->current_picture.f.error[1] += sse(
3046  s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3047  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3048  s->current_picture.f.error[2] += sse(
3049  s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3050  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3051  }
3052  if(s->loop_filter){
3055  }
3056  av_dlog(s->avctx, "MB %d %d bits\n",
3057  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3058  }
3059  }
3060 
3061  //not beautiful here but we must write it before flushing so it has to be here
3064 
3065  write_slice_end(s);
3066 
3067  /* Send the last GOB if RTP */
3068  if (s->avctx->rtp_callback) {
3069  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3070  pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3071  /* Call the RTP callback to send the last GOB */
3072  emms_c();
3073  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3074  }
3075 
3076  return 0;
3077 }
3078 
3079 #define MERGE(field) dst->field += src->field; src->field=0
3081  MERGE(me.scene_change_score);
3082  MERGE(me.mc_mb_var_sum_temp);
3083  MERGE(me.mb_var_sum_temp);
3084 }
3085 
3087  int i;
3088 
3089  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3090  MERGE(dct_count[1]);
3091  MERGE(mv_bits);
3092  MERGE(i_tex_bits);
3093  MERGE(p_tex_bits);
3094  MERGE(i_count);
3095  MERGE(f_count);
3096  MERGE(b_count);
3097  MERGE(skip_count);
3098  MERGE(misc_bits);
3099  MERGE(er.error_count);
3104 
3105  if(dst->avctx->noise_reduction){
3106  for(i=0; i<64; i++){
3107  MERGE(dct_error_sum[0][i]);
3108  MERGE(dct_error_sum[1][i]);
3109  }
3110  }
3111 
3112  assert(put_bits_count(&src->pb) % 8 ==0);
3113  assert(put_bits_count(&dst->pb) % 8 ==0);
3114  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3115  flush_put_bits(&dst->pb);
3116 }
3117 
3118 static int estimate_qp(MpegEncContext *s, int dry_run){
3119  if (s->next_lambda){
3122  if(!dry_run) s->next_lambda= 0;
3123  } else if (!s->fixed_qscale) {
3126  if (s->current_picture.f.quality < 0)
3127  return -1;
3128  }
3129 
3130  if(s->adaptive_quant){
3131  switch(s->codec_id){
3132  case AV_CODEC_ID_MPEG4:
3135  break;
3136  case AV_CODEC_ID_H263:
3137  case AV_CODEC_ID_H263P:
3138  case AV_CODEC_ID_FLV1:
3139  if (CONFIG_H263_ENCODER)
3141  break;
3142  default:
3143  ff_init_qscale_tab(s);
3144  }
3145 
3146  s->lambda= s->lambda_table[0];
3147  //FIXME broken
3148  }else
3149  s->lambda = s->current_picture.f.quality;
3150  update_qscale(s);
3151  return 0;
3152 }
3153 
3154 /* must be called before writing the header */
3156  assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3157  s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3158 
3159  if(s->pict_type==AV_PICTURE_TYPE_B){
3160  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3161  assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3162  }else{
3163  s->pp_time= s->time - s->last_non_b_time;
3164  s->last_non_b_time= s->time;
3165  assert(s->picture_number==0 || s->pp_time > 0);
3166  }
3167 }
3168 
3170 {
3171  int i, ret;
3172  int bits;
3173  int context_count = s->slice_context_count;
3174 
3176 
3177  /* Reset the average MB variance */
3178  s->me.mb_var_sum_temp =
3179  s->me.mc_mb_var_sum_temp = 0;
3180 
3181  /* we need to initialize some time vars before we can encode b-frames */
3182  // RAL: Condition added for MPEG1VIDEO
3186  ff_set_mpeg4_time(s);
3187 
3188  s->me.scene_change_score=0;
3189 
3190 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3191 
3192  if(s->pict_type==AV_PICTURE_TYPE_I){
3193  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3194  else s->no_rounding=0;
3195  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3197  s->no_rounding ^= 1;
3198  }
3199 
3200  if(s->flags & CODEC_FLAG_PASS2){
3201  if (estimate_qp(s,1) < 0)
3202  return -1;
3203  ff_get_2pass_fcode(s);
3204  }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3206  s->lambda= s->last_lambda_for[s->pict_type];
3207  else
3209  update_qscale(s);
3210  }
3211 
3212  if(s->codec_id != AV_CODEC_ID_AMV){
3217  }
3218 
3219  s->mb_intra=0; //for the rate distortion & bit compare functions
3220  for(i=1; i<context_count; i++){
3222  if (ret < 0)
3223  return ret;
3224  }
3225 
3226  if(ff_init_me(s)<0)
3227  return -1;
3228 
3229  /* Estimate motion for every MB */
3230  if(s->pict_type != AV_PICTURE_TYPE_I){
3231  s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3232  s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3233  if (s->pict_type != AV_PICTURE_TYPE_B) {
3234  if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3235  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3236  }
3237  }
3238 
3239  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3240  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3241  /* I-Frame */
3242  for(i=0; i<s->mb_stride*s->mb_height; i++)
3244 
3245  if(!s->fixed_qscale){
3246  /* finding spatial complexity for I-frame rate control */
3247  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3248  }
3249  }
3250  for(i=1; i<context_count; i++){
3252  }
3254  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3255  emms_c();
3256 
3259  for(i=0; i<s->mb_stride*s->mb_height; i++)
3261  if(s->msmpeg4_version >= 3)
3262  s->no_rounding=1;
3263  av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3265  }
3266 
3267  if(!s->umvplus){
3270 
3272  int a,b;
3273  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3275  s->f_code= FFMAX3(s->f_code, a, b);
3276  }
3277 
3278  ff_fix_long_p_mvs(s);
3279  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3281  int j;
3282  for(i=0; i<2; i++){
3283  for(j=0; j<2; j++)
3286  }
3287  }
3288  }
3289 
3290  if(s->pict_type==AV_PICTURE_TYPE_B){
3291  int a, b;
3292 
3295  s->f_code = FFMAX(a, b);
3296 
3299  s->b_code = FFMAX(a, b);
3300 
3306  int dir, j;
3307  for(dir=0; dir<2; dir++){
3308  for(i=0; i<2; i++){
3309  for(j=0; j<2; j++){
3312  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3313  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3314  }
3315  }
3316  }
3317  }
3318  }
3319  }
3320 
3321  if (estimate_qp(s, 0) < 0)
3322  return -1;
3323 
3324  if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3325  s->qscale= 3; //reduce clipping problems
3326 
3327  if (s->out_format == FMT_MJPEG) {
3328  /* for mjpeg, we do include qscale in the matrix */
3329  for(i=1;i<64;i++){
3330  int j= s->dsp.idct_permutation[i];
3331 
3332  s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3333  }
3334  s->y_dc_scale_table=
3338  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3339  s->qscale= 8;
3340  }
3341  if(s->codec_id == AV_CODEC_ID_AMV){
3342  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3343  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3344  for(i=1;i<64;i++){
3345  int j= s->dsp.idct_permutation[ff_zigzag_direct[i]];
3346 
3347  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3348  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3349  }
3350  s->y_dc_scale_table= y;
3351  s->c_dc_scale_table= c;
3352  s->intra_matrix[0] = 13;
3353  s->chroma_intra_matrix[0] = 14;
3355  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3357  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3358  s->qscale= 8;
3359  }
3360 
3361  //FIXME var duplication
3363  s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3366 
3367  if (s->current_picture.f.key_frame)
3368  s->picture_in_gop_number=0;
3369 
3370  s->mb_x = s->mb_y = 0;
3371  s->last_bits= put_bits_count(&s->pb);
3372  switch(s->out_format) {
3373  case FMT_MJPEG:
3376  break;
3377  case FMT_H261:
3378  if (CONFIG_H261_ENCODER)
3379  ff_h261_encode_picture_header(s, picture_number);
3380  break;
3381  case FMT_H263:
3383  ff_wmv2_encode_picture_header(s, picture_number);
3384  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3385  ff_msmpeg4_encode_picture_header(s, picture_number);
3386  else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3387  ff_mpeg4_encode_picture_header(s, picture_number);
3388  else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3389  ff_rv10_encode_picture_header(s, picture_number);
3390  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3391  ff_rv20_encode_picture_header(s, picture_number);
3392  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3393  ff_flv_encode_picture_header(s, picture_number);
3394  else if (CONFIG_H263_ENCODER)
3395  ff_h263_encode_picture_header(s, picture_number);
3396  break;
3397  case FMT_MPEG1:
3399  ff_mpeg1_encode_picture_header(s, picture_number);
3400  break;
3401  default:
3402  av_assert0(0);
3403  }
3404  bits= put_bits_count(&s->pb);
3405  s->header_bits= bits - s->last_bits;
3406 
3407  for(i=1; i<context_count; i++){
3409  }
3410  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3411  for(i=1; i<context_count; i++){
3413  }
3414  emms_c();
3415  return 0;
3416 }
3417 
3418 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3419  const int intra= s->mb_intra;
3420  int i;
3421 
3422  s->dct_count[intra]++;
3423 
3424  for(i=0; i<64; i++){
3425  int level= block[i];
3426 
3427  if(level){
3428  if(level>0){
3429  s->dct_error_sum[intra][i] += level;
3430  level -= s->dct_offset[intra][i];
3431  if(level<0) level=0;
3432  }else{
3433  s->dct_error_sum[intra][i] -= level;
3434  level += s->dct_offset[intra][i];
3435  if(level>0) level=0;
3436  }
3437  block[i]= level;
3438  }
3439  }
3440 }
3441 
3443  int16_t *block, int n,
3444  int qscale, int *overflow){
3445  const int *qmat;
3446  const uint8_t *scantable= s->intra_scantable.scantable;
3447  const uint8_t *perm_scantable= s->intra_scantable.permutated;
3448  int max=0;
3449  unsigned int threshold1, threshold2;
3450  int bias=0;
3451  int run_tab[65];
3452  int level_tab[65];
3453  int score_tab[65];
3454  int survivor[65];
3455  int survivor_count;
3456  int last_run=0;
3457  int last_level=0;
3458  int last_score= 0;
3459  int last_i;
3460  int coeff[2][64];
3461  int coeff_count[64];
3462  int qmul, qadd, start_i, last_non_zero, i, dc;
3463  const int esc_length= s->ac_esc_length;
3464  uint8_t * length;
3465  uint8_t * last_length;
3466  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3467 
3468  s->dsp.fdct (block);
3469 
3470  if(s->dct_error_sum)
3471  s->denoise_dct(s, block);
3472  qmul= qscale*16;
3473  qadd= ((qscale-1)|1)*8;
3474 
3475  if (s->mb_intra) {
3476  int q;
3477  if (!s->h263_aic) {
3478  if (n < 4)
3479  q = s->y_dc_scale;
3480  else
3481  q = s->c_dc_scale;
3482  q = q << 3;
3483  } else{
3484  /* For AIC we skip quant/dequant of INTRADC */
3485  q = 1 << 3;
3486  qadd=0;
3487  }
3488 
3489  /* note: block[0] is assumed to be positive */
3490  block[0] = (block[0] + (q >> 1)) / q;
3491  start_i = 1;
3492  last_non_zero = 0;
3493  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3494  if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3495  bias= 1<<(QMAT_SHIFT-1);
3496  length = s->intra_ac_vlc_length;
3497  last_length= s->intra_ac_vlc_last_length;
3498  } else {
3499  start_i = 0;
3500  last_non_zero = -1;
3501  qmat = s->q_inter_matrix[qscale];
3502  length = s->inter_ac_vlc_length;
3503  last_length= s->inter_ac_vlc_last_length;
3504  }
3505  last_i= start_i;
3506 
3507  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3508  threshold2= (threshold1<<1);
3509 
3510  for(i=63; i>=start_i; i--) {
3511  const int j = scantable[i];
3512  int level = block[j] * qmat[j];
3513 
3514  if(((unsigned)(level+threshold1))>threshold2){
3515  last_non_zero = i;
3516  break;
3517  }
3518  }
3519 
3520  for(i=start_i; i<=last_non_zero; i++) {
3521  const int j = scantable[i];
3522  int level = block[j] * qmat[j];
3523 
3524 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3525 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3526  if(((unsigned)(level+threshold1))>threshold2){
3527  if(level>0){
3528  level= (bias + level)>>QMAT_SHIFT;
3529  coeff[0][i]= level;
3530  coeff[1][i]= level-1;
3531 // coeff[2][k]= level-2;
3532  }else{
3533  level= (bias - level)>>QMAT_SHIFT;
3534  coeff[0][i]= -level;
3535  coeff[1][i]= -level+1;
3536 // coeff[2][k]= -level+2;
3537  }
3538  coeff_count[i]= FFMIN(level, 2);
3539  av_assert2(coeff_count[i]);
3540  max |=level;
3541  }else{
3542  coeff[0][i]= (level>>31)|1;
3543  coeff_count[i]= 1;
3544  }
3545  }
3546 
3547  *overflow= s->max_qcoeff < max; //overflow might have happened
3548 
3549  if(last_non_zero < start_i){
3550  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3551  return last_non_zero;
3552  }
3553 
3554  score_tab[start_i]= 0;
3555  survivor[0]= start_i;
3556  survivor_count= 1;
3557 
3558  for(i=start_i; i<=last_non_zero; i++){
3559  int level_index, j, zero_distortion;
3560  int dct_coeff= FFABS(block[ scantable[i] ]);
3561  int best_score=256*256*256*120;
3562 
3563  if (s->dsp.fdct == ff_fdct_ifast)
3564  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3565  zero_distortion= dct_coeff*dct_coeff;
3566 
3567  for(level_index=0; level_index < coeff_count[i]; level_index++){
3568  int distortion;
3569  int level= coeff[level_index][i];
3570  const int alevel= FFABS(level);
3571  int unquant_coeff;
3572 
3573  av_assert2(level);
3574 
3575  if(s->out_format == FMT_H263){
3576  unquant_coeff= alevel*qmul + qadd;
3577  }else{ //MPEG1
3578  j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3579  if(s->mb_intra){
3580  unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3581  unquant_coeff = (unquant_coeff - 1) | 1;
3582  }else{
3583  unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3584  unquant_coeff = (unquant_coeff - 1) | 1;
3585  }
3586  unquant_coeff<<= 3;
3587  }
3588 
3589  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3590  level+=64;
3591  if((level&(~127)) == 0){
3592  for(j=survivor_count-1; j>=0; j--){
3593  int run= i - survivor[j];
3594  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3595  score += score_tab[i-run];
3596 
3597  if(score < best_score){
3598  best_score= score;
3599  run_tab[i+1]= run;
3600  level_tab[i+1]= level-64;
3601  }
3602  }
3603 
3604  if(s->out_format == FMT_H263){
3605  for(j=survivor_count-1; j>=0; j--){
3606  int run= i - survivor[j];
3607  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3608  score += score_tab[i-run];
3609  if(score < last_score){
3610  last_score= score;
3611  last_run= run;
3612  last_level= level-64;
3613  last_i= i+1;
3614  }
3615  }
3616  }
3617  }else{
3618  distortion += esc_length*lambda;
3619  for(j=survivor_count-1; j>=0; j--){
3620  int run= i - survivor[j];
3621  int score= distortion + score_tab[i-run];
3622 
3623  if(score < best_score){
3624  best_score= score;
3625  run_tab[i+1]= run;
3626  level_tab[i+1]= level-64;
3627  }
3628  }
3629 
3630  if(s->out_format == FMT_H263){
3631  for(j=survivor_count-1; j>=0; j--){
3632  int run= i - survivor[j];
3633  int score= distortion + score_tab[i-run];
3634  if(score < last_score){
3635  last_score= score;
3636  last_run= run;
3637  last_level= level-64;
3638  last_i= i+1;
3639  }
3640  }
3641  }
3642  }
3643  }
3644 
3645  score_tab[i+1]= best_score;
3646 
3647  //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3648  if(last_non_zero <= 27){
3649  for(; survivor_count; survivor_count--){
3650  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3651  break;
3652  }
3653  }else{
3654  for(; survivor_count; survivor_count--){
3655  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3656  break;
3657  }
3658  }
3659 
3660  survivor[ survivor_count++ ]= i+1;
3661  }
3662 
3663  if(s->out_format != FMT_H263){
3664  last_score= 256*256*256*120;
3665  for(i= survivor[0]; i<=last_non_zero + 1; i++){
3666  int score= score_tab[i];
3667  if(i) score += lambda*2; //FIXME exacter?
3668 
3669  if(score < last_score){
3670  last_score= score;
3671  last_i= i;
3672  last_level= level_tab[i];
3673  last_run= run_tab[i];
3674  }
3675  }
3676  }
3677 
3678  s->coded_score[n] = last_score;
3679 
3680  dc= FFABS(block[0]);
3681  last_non_zero= last_i - 1;
3682  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3683 
3684  if(last_non_zero < start_i)
3685  return last_non_zero;
3686 
3687  if(last_non_zero == 0 && start_i == 0){
3688  int best_level= 0;
3689  int best_score= dc * dc;
3690 
3691  for(i=0; i<coeff_count[0]; i++){
3692  int level= coeff[i][0];
3693  int alevel= FFABS(level);
3694  int unquant_coeff, score, distortion;
3695 
3696  if(s->out_format == FMT_H263){
3697  unquant_coeff= (alevel*qmul + qadd)>>3;
3698  }else{ //MPEG1
3699  unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3700  unquant_coeff = (unquant_coeff - 1) | 1;
3701  }
3702  unquant_coeff = (unquant_coeff + 4) >> 3;
3703  unquant_coeff<<= 3 + 3;
3704 
3705  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3706  level+=64;
3707  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3708  else score= distortion + esc_length*lambda;
3709 
3710  if(score < best_score){
3711  best_score= score;
3712  best_level= level - 64;
3713  }
3714  }
3715  block[0]= best_level;
3716  s->coded_score[n] = best_score - dc*dc;
3717  if(best_level == 0) return -1;
3718  else return last_non_zero;
3719  }
3720 
3721  i= last_i;
3722  av_assert2(last_level);
3723 
3724  block[ perm_scantable[last_non_zero] ]= last_level;
3725  i -= last_run + 1;
3726 
3727  for(; i>start_i; i -= run_tab[i] + 1){
3728  block[ perm_scantable[i-1] ]= level_tab[i];
3729  }
3730 
3731  return last_non_zero;
3732 }
3733 
3734 //#define REFINE_STATS 1
3735 static int16_t basis[64][64];
3736 
3737 static void build_basis(uint8_t *perm){
3738  int i, j, x, y;
3739  emms_c();
3740  for(i=0; i<8; i++){
3741  for(j=0; j<8; j++){
3742  for(y=0; y<8; y++){
3743  for(x=0; x<8; x++){
3744  double s= 0.25*(1<<BASIS_SHIFT);
3745  int index= 8*i + j;
3746  int perm_index= perm[index];
3747  if(i==0) s*= sqrt(0.5);
3748  if(j==0) s*= sqrt(0.5);
3749  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3750  }
3751  }
3752  }
3753  }
3754 }
3755 
3756 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3757  int16_t *block, int16_t *weight, int16_t *orig,
3758  int n, int qscale){
3759  int16_t rem[64];
3760  LOCAL_ALIGNED_16(int16_t, d1, [64]);
3761  const uint8_t *scantable= s->intra_scantable.scantable;
3762  const uint8_t *perm_scantable= s->intra_scantable.permutated;
3763 // unsigned int threshold1, threshold2;
3764 // int bias=0;
3765  int run_tab[65];
3766  int prev_run=0;
3767  int prev_level=0;
3768  int qmul, qadd, start_i, last_non_zero, i, dc;
3769  uint8_t * length;
3770  uint8_t * last_length;
3771  int lambda;
3772  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3773 #ifdef REFINE_STATS
3774 static int count=0;
3775 static int after_last=0;
3776 static int to_zero=0;
3777 static int from_zero=0;
3778 static int raise=0;
3779 static int lower=0;
3780 static int messed_sign=0;
3781 #endif
3782 
3783  if(basis[0][0] == 0)
3785 
3786  qmul= qscale*2;
3787  qadd= (qscale-1)|1;
3788  if (s->mb_intra) {
3789  if (!s->h263_aic) {
3790  if (n < 4)
3791  q = s->y_dc_scale;
3792  else
3793  q = s->c_dc_scale;
3794  } else{
3795  /* For AIC we skip quant/dequant of INTRADC */
3796  q = 1;
3797  qadd=0;
3798  }
3799  q <<= RECON_SHIFT-3;
3800  /* note: block[0] is assumed to be positive */
3801  dc= block[0]*q;
3802 // block[0] = (block[0] + (q >> 1)) / q;
3803  start_i = 1;
3804 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3805 // bias= 1<<(QMAT_SHIFT-1);
3806  length = s->intra_ac_vlc_length;
3807  last_length= s->intra_ac_vlc_last_length;
3808  } else {
3809  dc= 0;
3810  start_i = 0;
3811  length = s->inter_ac_vlc_length;
3812  last_length= s->inter_ac_vlc_last_length;
3813  }
3814  last_non_zero = s->block_last_index[n];
3815 
3816 #ifdef REFINE_STATS
3817 {START_TIMER
3818 #endif
3819  dc += (1<<(RECON_SHIFT-1));
3820  for(i=0; i<64; i++){
3821  rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3822  }
3823 #ifdef REFINE_STATS
3824 STOP_TIMER("memset rem[]")}
3825 #endif
3826  sum=0;
3827  for(i=0; i<64; i++){
3828  int one= 36;
3829  int qns=4;
3830  int w;
3831 
3832  w= FFABS(weight[i]) + qns*one;
3833  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3834 
3835  weight[i] = w;
3836 // w=weight[i] = (63*qns + (w/2)) / w;
3837 
3838  av_assert2(w>0);
3839  av_assert2(w<(1<<6));
3840  sum += w*w;
3841  }
3842  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3843 #ifdef REFINE_STATS
3844 {START_TIMER
3845 #endif
3846  run=0;
3847  rle_index=0;
3848  for(i=start_i; i<=last_non_zero; i++){
3849  int j= perm_scantable[i];
3850  const int level= block[j];
3851  int coeff;
3852 
3853  if(level){
3854  if(level<0) coeff= qmul*level - qadd;
3855  else coeff= qmul*level + qadd;
3856  run_tab[rle_index++]=run;
3857  run=0;
3858 
3859  s->dsp.add_8x8basis(rem, basis[j], coeff);
3860  }else{
3861  run++;
3862  }
3863  }
3864 #ifdef REFINE_STATS
3865 if(last_non_zero>0){
3866 STOP_TIMER("init rem[]")
3867 }
3868 }
3869 
3870 {START_TIMER
3871 #endif
3872  for(;;){
3873  int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3874  int best_coeff=0;
3875  int best_change=0;
3876  int run2, best_unquant_change=0, analyze_gradient;
3877 #ifdef REFINE_STATS
3878 {START_TIMER
3879 #endif
3880  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3881 
3882  if(analyze_gradient){
3883 #ifdef REFINE_STATS
3884 {START_TIMER
3885 #endif
3886  for(i=0; i<64; i++){
3887  int w= weight[i];
3888 
3889  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3890  }
3891 #ifdef REFINE_STATS
3892 STOP_TIMER("rem*w*w")}
3893 {START_TIMER
3894 #endif
3895  s->dsp.fdct(d1);
3896 #ifdef REFINE_STATS
3897 STOP_TIMER("dct")}
3898 #endif
3899  }
3900 
3901  if(start_i){
3902  const int level= block[0];
3903  int change, old_coeff;
3904 
3905  av_assert2(s->mb_intra);
3906 
3907  old_coeff= q*level;
3908 
3909  for(change=-1; change<=1; change+=2){
3910  int new_level= level + change;
3911  int score, new_coeff;
3912 
3913  new_coeff= q*new_level;
3914  if(new_coeff >= 2048 || new_coeff < 0)
3915  continue;
3916 
3917  score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3918  if(score<best_score){
3919  best_score= score;
3920  best_coeff= 0;
3921  best_change= change;
3922  best_unquant_change= new_coeff - old_coeff;
3923  }
3924  }
3925  }
3926 
3927  run=0;
3928  rle_index=0;
3929  run2= run_tab[rle_index++];
3930  prev_level=0;
3931  prev_run=0;
3932 
3933  for(i=start_i; i<64; i++){
3934  int j= perm_scantable[i];
3935  const int level= block[j];
3936  int change, old_coeff;
3937 
3938  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3939  break;
3940 
3941  if(level){
3942  if(level<0) old_coeff= qmul*level - qadd;
3943  else old_coeff= qmul*level + qadd;
3944  run2= run_tab[rle_index++]; //FIXME ! maybe after last
3945  }else{
3946  old_coeff=0;
3947  run2--;
3948  av_assert2(run2>=0 || i >= last_non_zero );
3949  }
3950 
3951  for(change=-1; change<=1; change+=2){
3952  int new_level= level + change;
3953  int score, new_coeff, unquant_change;
3954 
3955  score=0;
3956  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3957  continue;
3958 
3959  if(new_level){
3960  if(new_level<0) new_coeff= qmul*new_level - qadd;
3961  else new_coeff= qmul*new_level + qadd;
3962  if(new_coeff >= 2048 || new_coeff <= -2048)
3963  continue;
3964  //FIXME check for overflow
3965 
3966  if(level){
3967  if(level < 63 && level > -63){
3968  if(i < last_non_zero)
3969  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3970  - length[UNI_AC_ENC_INDEX(run, level+64)];
3971  else
3972  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3973  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3974  }
3975  }else{
3976  av_assert2(FFABS(new_level)==1);
3977 
3978  if(analyze_gradient){
3979  int g= d1[ scantable[i] ];
3980  if(g && (g^new_level) >= 0)
3981  continue;
3982  }
3983 
3984  if(i < last_non_zero){
3985  int next_i= i + run2 + 1;
3986  int next_level= block[ perm_scantable[next_i] ] + 64;
3987 
3988  if(next_level&(~127))
3989  next_level= 0;
3990 
3991  if(next_i < last_non_zero)
3992  score += length[UNI_AC_ENC_INDEX(run, 65)]
3993  + length[UNI_AC_ENC_INDEX(run2, next_level)]
3994  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3995  else
3996  score += length[UNI_AC_ENC_INDEX(run, 65)]
3997  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3998  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3999  }else{
4000  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4001  if(prev_level){
4002  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4003  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4004  }
4005  }
4006  }
4007  }else{
4008  new_coeff=0;
4009  av_assert2(FFABS(level)==1);
4010 
4011  if(i < last_non_zero){
4012  int next_i= i + run2 + 1;
4013  int next_level= block[ perm_scantable[next_i] ] + 64;
4014 
4015  if(next_level&(~127))
4016  next_level= 0;
4017 
4018  if(next_i < last_non_zero)
4019  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4020  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4021  - length[UNI_AC_ENC_INDEX(run, 65)];
4022  else
4023  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4024  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4025  - length[UNI_AC_ENC_INDEX(run, 65)];
4026  }else{
4027  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4028  if(prev_level){
4029  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4030  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4031  }
4032  }
4033  }
4034 
4035  score *= lambda;
4036 
4037  unquant_change= new_coeff - old_coeff;
4038  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4039 
4040  score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
4041  if(score<best_score){
4042  best_score= score;
4043  best_coeff= i;
4044  best_change= change;
4045  best_unquant_change= unquant_change;
4046  }
4047  }
4048  if(level){
4049  prev_level= level + 64;
4050  if(prev_level&(~127))
4051  prev_level= 0;
4052  prev_run= run;
4053  run=0;
4054  }else{
4055  run++;
4056  }
4057  }
4058 #ifdef REFINE_STATS
4059 STOP_TIMER("iterative step")}
4060 #endif
4061 
4062  if(best_change){
4063  int j= perm_scantable[ best_coeff ];
4064 
4065  block[j] += best_change;
4066 
4067  if(best_coeff > last_non_zero){
4068  last_non_zero= best_coeff;
4069  av_assert2(block[j]);
4070 #ifdef REFINE_STATS
4071 after_last++;
4072 #endif
4073  }else{
4074 #ifdef REFINE_STATS
4075 if(block[j]){
4076  if(block[j] - best_change){
4077  if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4078  raise++;
4079  }else{
4080  lower++;
4081  }
4082  }else{
4083  from_zero++;
4084  }
4085 }else{
4086  to_zero++;
4087 }
4088 #endif
4089  for(; last_non_zero>=start_i; last_non_zero--){
4090  if(block[perm_scantable[last_non_zero]])
4091  break;
4092  }
4093  }
4094 #ifdef REFINE_STATS
4095 count++;
4096 if(256*256*256*64 % count == 0){
4097  av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4098 }
4099 #endif
4100  run=0;
4101  rle_index=0;
4102  for(i=start_i; i<=last_non_zero; i++){
4103  int j= perm_scantable[i];
4104  const int level= block[j];
4105 
4106  if(level){
4107  run_tab[rle_index++]=run;
4108  run=0;
4109  }else{
4110  run++;
4111  }
4112  }
4113 
4114  s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
4115  }else{
4116  break;
4117  }
4118  }
4119 #ifdef REFINE_STATS
4120 if(last_non_zero>0){
4121 STOP_TIMER("iterative search")
4122 }
4123 }
4124 #endif
4125 
4126  return last_non_zero;
4127 }
4128 
4130  int16_t *block, int n,
4131  int qscale, int *overflow)
4132 {
4133  int i, j, level, last_non_zero, q, start_i;
4134  const int *qmat;
4135  const uint8_t *scantable= s->intra_scantable.scantable;
4136  int bias;
4137  int max=0;
4138  unsigned int threshold1, threshold2;
4139 
4140  s->dsp.fdct (block);
4141 
4142  if(s->dct_error_sum)
4143  s->denoise_dct(s, block);
4144 
4145  if (s->mb_intra) {
4146  if (!s->h263_aic) {
4147  if (n < 4)
4148  q = s->y_dc_scale;
4149  else
4150  q = s->c_dc_scale;
4151  q = q << 3;
4152  } else
4153  /* For AIC we skip quant/dequant of INTRADC */
4154  q = 1 << 3;
4155 
4156  /* note: block[0] is assumed to be positive */
4157  block[0] = (block[0] + (q >> 1)) / q;
4158  start_i = 1;
4159  last_non_zero = 0;
4160  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4162  } else {
4163  start_i = 0;
4164  last_non_zero = -1;
4165  qmat = s->q_inter_matrix[qscale];
4167  }
4168  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4169  threshold2= (threshold1<<1);
4170  for(i=63;i>=start_i;i--) {
4171  j = scantable[i];
4172  level = block[j] * qmat[j];
4173 
4174  if(((unsigned)(level+threshold1))>threshold2){
4175  last_non_zero = i;
4176  break;
4177  }else{
4178  block[j]=0;
4179  }
4180  }
4181  for(i=start_i; i<=last_non_zero; i++) {
4182  j = scantable[i];
4183  level = block[j] * qmat[j];
4184 
4185 // if( bias+level >= (1<<QMAT_SHIFT)
4186 // || bias-level >= (1<<QMAT_SHIFT)){
4187  if(((unsigned)(level+threshold1))>threshold2){
4188  if(level>0){
4189  level= (bias + level)>>QMAT_SHIFT;
4190  block[j]= level;
4191  }else{
4192  level= (bias - level)>>QMAT_SHIFT;
4193  block[j]= -level;
4194  }
4195  max |=level;
4196  }else{
4197  block[j]=0;
4198  }
4199  }
4200  *overflow= s->max_qcoeff < max; //overflow might have happened
4201 
4202  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4204  ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4205 
4206  return last_non_zero;
4207 }
4208 
4209 #define OFFSET(x) offsetof(MpegEncContext, x)
4210 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4211 static const AVOption h263_options[] = {
4212  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4213  { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4214  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4216  { NULL },
4217 };
4218 
4219 static const AVClass h263_class = {
4220  .class_name = "H.263 encoder",
4221  .item_name = av_default_item_name,
4222  .option = h263_options,
4223  .version = LIBAVUTIL_VERSION_INT,
4224 };
4225 
4227  .name = "h263",
4228  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4229  .type = AVMEDIA_TYPE_VIDEO,
4230  .id = AV_CODEC_ID_H263,
4231  .priv_data_size = sizeof(MpegEncContext),
4233  .encode2 = ff_MPV_encode_picture,
4235  .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4236  .priv_class = &h263_class,
4237 };
4238 
4239 static const AVOption h263p_options[] = {
4240  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4241  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4242  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4243  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4245  { NULL },
4246 };
4247 static const AVClass h263p_class = {
4248  .class_name = "H.263p encoder",
4249  .item_name = av_default_item_name,
4250  .option = h263p_options,
4251  .version = LIBAVUTIL_VERSION_INT,
4252 };
4253 
4255  .name = "h263p",
4256  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4257  .type = AVMEDIA_TYPE_VIDEO,
4258  .id = AV_CODEC_ID_H263P,
4259  .priv_data_size = sizeof(MpegEncContext),
4261  .encode2 = ff_MPV_encode_picture,
4263  .capabilities = CODEC_CAP_SLICE_THREADS,
4264  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4265  .priv_class = &h263p_class,
4266 };
4267 
4268 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4269 
4271  .name = "msmpeg4v2",
4272  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4273  .type = AVMEDIA_TYPE_VIDEO,
4274  .id = AV_CODEC_ID_MSMPEG4V2,
4275  .priv_data_size = sizeof(MpegEncContext),
4277  .encode2 = ff_MPV_encode_picture,
4279  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4280  .priv_class = &msmpeg4v2_class,
4281 };
4282 
4283 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4284 
4286  .name = "msmpeg4",
4287  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4288  .type = AVMEDIA_TYPE_VIDEO,
4289  .id = AV_CODEC_ID_MSMPEG4V3,
4290  .priv_data_size = sizeof(MpegEncContext),
4292  .encode2 = ff_MPV_encode_picture,
4294  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4295  .priv_class = &msmpeg4v3_class,
4296 };
4297 
4299 
4301  .name = "wmv1",
4302  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4303  .type = AVMEDIA_TYPE_VIDEO,
4304  .id = AV_CODEC_ID_WMV1,
4305  .priv_data_size = sizeof(MpegEncContext),
4307  .encode2 = ff_MPV_encode_picture,
4309  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4310  .priv_class = &wmv1_class,
4311 };
int last_time_base
Definition: mpegvideo.h:561
#define QUANT_BIAS_SHIFT
Definition: mpegvideo.h:480
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:104
av_cold void ff_mpeg1_encode_init(MpegEncContext *s)
Definition: mpeg12enc.c:928
int chroma_elim_threshold
Definition: mpegvideo.h:268
void ff_jpeg_fdct_islow_10(int16_t *data)
static const AVOption h263_options[]
int frame_bits
bits used for the current frame
Definition: mpegvideo.h:513
int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
Definition: mpegvideo.c:502
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideo.h:516
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and/or allocate data.
Definition: utils.c:1500
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:300
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:757
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:3018
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1512
float v
int picture_number
Definition: mpegvideo.h:279
#define FF_CMP_NSSE
Definition: avcodec.h:1553
const char * s
Definition: avisynth_c.h:668
#define MAX_PICTURE_COUNT
Definition: mpegvideo.h:66
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[6][64])
Definition: mjpegenc.c:459
rate control context.
Definition: ratecontrol.h:63
static int shift(int a, int b)
Definition: sonic.c:78
#define CONFIG_WMV2_ENCODER
Definition: config.h:1124
me_cmp_func frame_skip_cmp[6]
Definition: dsputil.h:168
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:279
int esc3_level_length
Definition: mpegvideo.h:646
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
int time_increment_bits
number of bits to represent the fractional part of time
Definition: mpegvideo.h:560
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:101
This structure describes decoded (raw) audio or video data.
Definition: frame.h:96
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Allocate a Picture.
Definition: mpegvideo.c:378
int16_t(* p_mv_table)[2]
MV table (1MV per MB) p-frame encoding.
Definition: mpegvideo.h:409
int mpeg_quant
0-&gt; h263 quant 1-&gt; mpeg quant
Definition: avcodec.h:1434
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegvideo.h:369
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
AVOption.
Definition: opt.h:253
uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideo.h:438
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y &lt;= row &lt; end_mb_y) ...
Definition: mpegvideo.h:320
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:428
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:869
const uint8_t * y_dc_scale_table
qscale -&gt; y_dc_scale table
Definition: mpegvideo.h:355
void(* emulated_edge_mc)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src, ptrdiff_t src_stride, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:61
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegvideo.h:126
#define av_always_inline
Definition: attributes.h:41
#define CONFIG_LJPEG_ENCODER
Definition: config.h:1086
#define CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:704
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG1 &amp; B-frame MPEG4
Definition: mpegvideo.h:437
int pre_pass
= 1 for the pre pass
Definition: mpegvideo.h:213
#define CONFIG_RV10_ENCODER
Definition: config.h:1109
av_cold int ff_MPV_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:1003
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)&gt;&gt;1.
Definition: hpeldsp.h:68
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:756
void(* get_pixels)(int16_t *block, const uint8_t *pixels, int line_size)
Definition: dsputil.h:126
#define FF_MPV_GENERIC_CLASS(name)
Definition: mpegvideo.h:777
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:160
void(* shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height)
Definition: dsputil.h:265
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: avcodec.h:4153
RateControlEntry * entry
Definition: ratecontrol.h:65
float border_masking
Border processing masking, raises the quantizer for mbs on the borders of the picture.
Definition: avcodec.h:1742
#define LIBAVUTIL_VERSION_INT
Definition: avcodec.h:820
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideo.h:464
const char * g
Definition: vf_curves.c:104
#define CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:703
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:769
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y &lt;= row &lt; end_mb_y) ...
Definition: mpegvideo.h:321
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define OFFSET(x)
uint16_t * mb_var
Table for MB variances.
Definition: mpegvideo.h:117
uint16_t(* q_chroma_intra_matrix16)[2][64]
Definition: mpegvideo.h:501
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block.
Definition: mpegvideo.c:3058
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:477
static int estimate_qp(MpegEncContext *s, int dry_run)
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideo.h:460
void ff_MPV_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
int acc
Definition: yuv2rgb.c:512
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1397
int16_t(*[3] ac_val)[16]
used for mpeg4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:361
MJPEG encoder.
#define FF_CMP_VSSE
Definition: avcodec.h:1552
int ff_MPV_encode_end(AVCodecContext *avctx)
int mjpeg_hsample[3]
horizontal sampling factors, default = {2, 1, 1}
Definition: mpegvideo.h:632
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:763
int(* try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
Definition: dsputil.h:255
#define me
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideo.h:451
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:644
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
Definition: dsputil.h:260
planar YUV 4:2:2, 16bpp, (1 Cr &amp; Cb sample per 2x1 Y samples)
Definition: avcodec.h:4538
int num
numerator
Definition: rational.h:44
void(* rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb)
Definition: avcodec.h:2345
int size
Definition: avcodec.h:1064
enum AVCodecID codec_id
Definition: mpegvideo.h:261
const char * b
Definition: vf_curves.c:105
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:730
#define CONFIG_MJPEG_ENCODER
Definition: config.h:1087
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:63
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:542
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: bitstream.c:47
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:124
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1517
me_cmp_func sse[6]
Definition: dsputil.h:149
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:371
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:35
void av_log(void *avcl, int level, const char *fmt,...) av_printf_format(3
Send the specified message to the log if the level is less than or equal to the current av_log_level...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1342
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced p-frame encoding.
Definition: mpegvideo.h:415
static int select_input_picture(MpegEncContext *s)
void ff_convert_matrix(DSPContext *dsp, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:69
int min_qcoeff
minimum encodable coefficient
Definition: mpegvideo.h:483
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:1542
int coded_score[12]
Definition: mpegvideo.h:493
mpegvideo header.
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
int rtp_payload_size
Definition: avcodec.h:2347
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:43
int scene_change_score
Definition: mpegvideo.h:228
int mpv_flags
flags set by private options
Definition: mpegvideo.h:737
uint8_t permutated[64]
Definition: dsputil.h:113
int intra_quant_bias
intra quantizer bias
Definition: avcodec.h:1633
static const AVClass h263_class
uint8_t run
Definition: svq3.c:145
uint8_t * intra_ac_vlc_length
Definition: mpegvideo.h:486
int padding_bug_score
used to detect the VERY common padding bug in MPEG4
Definition: mpegvideo.h:606
const uint16_t ff_h263_format[8][2]
Definition: h263data.h:239
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:491
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:286
Pixel format.
Definition: avcodec.h:4533
int lmax
maximum Lagrange multipler
Definition: avcodec.h:2283
int frame_skip_cmp
frame skip comparison function
Definition: avcodec.h:2311
#define FF_LAMBDA_SHIFT
Definition: avcodec.h:2255
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: avcodec.h:958
int stride
Definition: mace.c:144
AVCodec.
Definition: avcodec.h:2922
#define av_cold
Definition: avcodec.h:653
static void write_mb_info(MpegEncContext *s)
int time_base
time in seconds of last I,P,S Frame
Definition: mpegvideo.h:562
int qscale
QP.
Definition: mpegvideo.h:373
int h263_aic
Advanded INTRA Coding (AIC)
Definition: mpegvideo.h:296
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode b-frame encoding.
Definition: mpegvideo.h:411
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:312
int chroma_x_shift
Definition: mpegvideo.h:684
#define INPLACE_OFFSET
Definition: mpegvideo.h:74
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:263
uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:2813
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1254
int field_select[2][2]
Definition: mpegvideo.h:436
#define CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:694
int scenechange_threshold
scene change detection threshold 0 is default, larger means fewer detected scene changes.
Definition: avcodec.h:1690
#define CONFIG_RV20_ENCODER
Definition: config.h:1110
int(* pix_sum)(uint8_t *pix, int line_size)
Definition: dsputil.h:144
int quant_precision
Definition: mpegvideo.h:583
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
#define CODEC_FLAG_INPUT_PRESERVED
The parent program guarantees that the input for B-frames containing streams is not written to for at...
Definition: avcodec.h:702
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:2638
Predicted.
Definition: avcodec.h:2305
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1265
void av_freep(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:234
int modified_quant
Definition: mpegvideo.h:555
#define FFMAX3(a, b, c)
Definition: avcodec.h:924
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
if((e=av_dict_get(options,"", NULL, AV_DICT_IGNORE_SUFFIX)))
Definition: avfilter.c:965
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:759
int skipdct
skip dct and code zero residual
Definition: mpegvideo.h:389
int b_frame_score
Definition: mpegvideo.h:175
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: avcodec.h:4537
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:43
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1668
#define CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:707
float p_masking
p block masking (0-&gt; disabled)
Definition: avcodec.h:1478
int picture_in_gop_number
0-&gt; first pic in gop, ...
Definition: mpegvideo.h:280
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:554
uint8_t * ptr_lastgob
Definition: mpegvideo.h:701
int64_t time
time of current frame
Definition: mpegvideo.h:563
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:64
static int encode_picture(MpegEncContext *s, int picture_number)
#define M_PI
Definition: mathematics.h:46
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1212
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (mpeg4) ...
Definition: mpegvideo.h:423
const char * av_default_item_name(void *ctx)
Return the context name.
Definition: log.c:145
uint8_t bits
Definition: crc.c:260
uint8_t
#define RECON_SHIFT
Definition: dsputil.h:258
Picture ** input_picture
next pictures on display order for encoding
Definition: mpegvideo.h:290
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:55
void(* denoise_dct)(struct MpegEncContext *s, int16_t *block)
Definition: mpegvideo.h:735
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:603
enum OutputFormat out_format
output format
Definition: mpegvideo.h:253
static const uint8_t offset[511][2]
Definition: vf_uspp.c:58
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:507
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:84
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only &quot;metadata&quot; fields from src to dst.
Definition: frame.c:446
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:67
Multithreading support functions.
int pre_dia_size
ME prepass diamond size &amp; shape.
Definition: avcodec.h:1593
AVCodec ff_h263_encoder
static const AVOption h263p_options[]
static int get_sae(uint8_t *src, int ref, int stride)
#define emms_c()
Definition: internal.h:49
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:182
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:527
const char * name
Name of the codec implementation.
Definition: avcodec.h:2929
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
Definition: mpegvideo.h:443
int interlaced_dct
Definition: mpegvideo.h:689
int(* q_chroma_intra_matrix)[64]
Definition: mpegvideo.h:497
int me_cmp
motion estimation comparison function
Definition: avcodec.h:1524
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by a given frame.
Definition: frame.c:247
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:347
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:1834
#define CHROMA_420
Definition: mpegvideo.h:681
int intra_dc_precision
Definition: mpegvideo.h:671
int repeat_first_field
Definition: mpegvideo.h:678
const char data[16]
Definition: mxf.c:68
#define CONFIG_MPEG1VIDEO_ENCODER
Definition: config.h:1088
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:69
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
Definition: mpegvideo.h:412
S(GMC)-VOP MPEG4.
Definition: avcodec.h:2307
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
uint8_t(* mv_penalty)[MAX_MV *2+1]
amount of bits needed to encode a MV
Definition: mpegvideo.h:234
void ff_set_cmp(DSPContext *c, me_cmp_func *cmp, int type)
Definition: dsputil.c:1872
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:151
void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
int mb_threshold
Macroblock threshold below which the user specified macroblock types will be used.
Definition: avcodec.h:1713
void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:31
void ff_init_qscale_tab(MpegEncContext *s)
init s-&gt;current_picture.qscale_table from s-&gt;lambda_table
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:565
uint8_t idct_permutation[64]
idct input permutation.
Definition: dsputil.h:246
#define lrintf(x)
Definition: libm_mips.h:70
me_cmp_func ildct_cmp[6]
Definition: dsputil.h:167
const uint8_t * scantable
Definition: dsputil.h:112
int flags2
AVCodecContext.flags2.
Definition: mpegvideo.h:265
int mb_height
number of MBs horizontally &amp; vertically
Definition: mpegvideo.h:281
void ff_MPV_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1734
float lumi_masking
luminance masking (0-&gt; disabled)
Definition: avcodec.h:1457
int max_qcoeff
maximum encodable coefficient
Definition: mpegvideo.h:484
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:758
#define CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:716
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:48
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:379
int flipflop_rounding
Definition: mpegvideo.h:643
#define CHROMA_444
Definition: mpegvideo.h:683
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:64
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num &amp; mpeg1 specific
Definition: mpegvideo.h:657
uint8_t * mb_info_ptr
Definition: mpegvideo.h:546
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:866
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: dsputil.h:186
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideo.h:458
me_cmp_func nsse[6]
Definition: dsputil.h:157
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:3317
int(* q_inter_matrix)[64]
Definition: mpegvideo.h:498
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1113
static AVFrame * frame
Definition: demuxing.c:51
int ff_MPV_encode_init(AVCodecContext *avctx)
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:880
#define CODEC_FLAG_LOOP_FILTER
loop filter
Definition: avcodec.h:717
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
Definition: mpegvideo.h:496
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:251
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2505
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:354
enum AVCodecID id
Definition: avcodec.h:2936
int h263_plus
h263 plus headers
Definition: mpegvideo.h:258
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:323
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideo.h:457
int last_non_b_pict_type
used for mpeg4 gmc b-frames &amp; ratecontrol
Definition: mpegvideo.h:384
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:69
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: avcodec.h:4147
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:207
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1427
int last_dc[3]
last DC values for MPEG1
Definition: mpegvideo.h:352
uint8_t * inter_ac_vlc_last_length
Definition: mpegvideo.h:489
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);ff_audio_convert_init_arm(ac);ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
#define CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:718
packed BGR 8:8:8, 32bpp, BGR0BGR0...
Definition: avcodec.h:4692
int64_t total_bits
Definition: mpegvideo.h:512
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:362
#define ARCH_X86
Definition: config.h:35
int chroma_y_shift
Definition: mpegvideo.h:685
int strict_std_compliance
strictly follow the std (MPEG4, ...)
Definition: mpegvideo.h:269
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:594
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: dsputil.h:187
int frame_skip_threshold
frame skip threshold
Definition: avcodec.h:2290
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:87
int me_sub_cmp
subpixel motion estimation comparison function
Definition: avcodec.h:1530
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: avcodec.h:4548
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:2588
int qmax
maximum quantizer
Definition: avcodec.h:2166
static void update_mb_info(MpegEncContext *s, int startcode)
#define MERGE(field)
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:45
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:392
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:151
ERContext er
Definition: mpegvideo.h:747
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:388
static int sse_mb(MpegEncContext *s)
int reference
Definition: mpegvideo.h:178
const char * r
Definition: vf_curves.c:103
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: avcodec.h:4168
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:368
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
struct AVCodec * codec
Definition: avcodec.h:1155
const char * arg
Definition: jacosubdec.c:69
int h263_slice_structured
Definition: mpegvideo.h:553
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1234
uint8_t * buf
Definition: put_bits.h:44
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:214
#define CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:692
int rc_max_rate
maximum bitrate
Definition: avcodec.h:2213
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideo.h:452
int64_t av_const av_gcd(int64_t a, int64_t b)
Return the greatest common divisor of a and b.
Definition: mathematics.c:55
int ff_check_alignment(void)
Definition: dsputil.c:2658
int quarter_sample
1-&gt;qpel, 0-&gt;half pel ME/MC
Definition: mpegvideo.h:584
uint16_t * mb_type
Table for candidate MB types for encoding.
Definition: mpegvideo.h:450
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:429
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:597
void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
Deallocate a picture.
Definition: mpegvideo.c:435
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:418
#define BASIS_SHIFT
Definition: dsputil.h:257
Libavcodec external API header.
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:298
#define CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:712
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1069
int off
Definition: dsputil_bfin.c:29
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:73
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:531
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2191
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in h263 (limit difference to -2..2)
Definition: ituh263enc.c:272
int coded_picture_number
used to set pic-&gt;coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:278
int * lambda_table
Definition: mpegvideo.h:377
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:1720
int me_penalty_compensation
Definition: avcodec.h:1763
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
#define QMAT_SHIFT
Definition: mpegvideo.h:60
common internal API header
uint8_t * intra_ac_vlc_last_length
Definition: mpegvideo.h:487
#define FF_ARRAY_ELEMS(a)
Definition: avcodec.h:929
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:138
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
#define CHROMA_422
Definition: mpegvideo.h:682
uint32_t ff_squareTbl[512]
Definition: dsputil.c:46
int bit_rate
the average bitrate
Definition: avcodec.h:1204
int progressive_frame
Definition: mpegvideo.h:687
void ff_mjpeg_encode_picture_header(MpegEncContext *s)
Definition: mjpegenc.c:206
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:167
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:351
int ff_h263_get_gob_height(MpegEncContext *s)
Get the GOB height based on picture height.
Definition: h263.c:374
int display_picture_number
picture number in display order
Definition: frame.h:203
uint16_t(* q_inter_matrix16)[2][64]
Definition: mpegvideo.h:502
uint8_t * vbv_delay_ptr
pointer to vbv_delay in the bitstream
Definition: mpegvideo.h:660
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideo.h:262
float y
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type &amp; qscale so that encoding is actually possible in mpeg4
#define MAX_MB_BYTES
Definition: mpegvideo.h:72
int me_method
ME algorithm.
Definition: mpegvideo.h:419
int umvplus
== H263+ &amp;&amp; unrestricted_mv
Definition: mpegvideo.h:551
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:341
uint16_t * intra_matrix
custom intra quantization matrix
Definition: avcodec.h:1675
int intra_quant_bias
bias for the quantizer
Definition: mpegvideo.h:481
ret
Definition: avfilter.c:961
int width
picture width / height.
Definition: avcodec.h:1314
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:105
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:351
Picture.
Definition: mpegvideo.h:97
void * av_malloc(size_t size) av_malloc_attrib 1(1)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
int alternate_scan
Definition: mpegvideo.h:677
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow. ...
Definition: avcodec.h:2236
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideo.h:453
void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:115
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:741
int b_frame_strategy
Definition: avcodec.h:1412
#define FFMIN(a, b)
Definition: avcodec.h:925
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: frame.h:277
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX &amp; these are not permutated, second 64 entries are bias ...
Definition: mpegvideo.h:500
#define FF_NO_IDCT_PERM
Definition: dsputil.h:248
perm
Definition: f_perms.c:74
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:208
#define CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:711
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:149
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:295
MotionEstContext me
Definition: mpegvideo.h:441
int n
Definition: avisynth_c.h:588
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
#define EDGE_BOTTOM
Definition: dsputil.h:263
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:348
int mb_decision
macroblock decision mode
Definition: avcodec.h:1665
#define CONFIG_FLV_ENCODER
Definition: config.h:1078
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:365
#define L(x)
Definition: vp56_arith.h:36
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: imgconvert.c:65
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function for encode/decode called after coding/decoding the header and before a frame is code...
Definition: mpegvideo.c:1506
int ac_esc_length
num of bits needed to encode the longest esc
Definition: mpegvideo.h:485
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideo.h:459
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2426
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideo.h:454
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:107
int AC3_NAME() encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2596
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:469
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
int * mb_index2xy
mb_index -&gt; mb_x + mb_y*mb_stride
Definition: mpegvideo.h:473
int inter_quant_bias
inter quantizer bias
Definition: avcodec.h:1641
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:62
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: avcodec.h:4546
int mjpeg_vsample[3]
vertical sampling factors, default = {2, 1, 1}
Definition: mpegvideo.h:631
AVCodec ff_h263p_encoder
static void build_basis(uint8_t *perm)
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:425
int frame_skip_factor
frame skip factor
Definition: avcodec.h:2297
int first_slice_line
used in mpeg4 too to handle resync markers
Definition: mpegvideo.h:642
int frame_pred_frame_dct
Definition: mpegvideo.h:672
planar YUV 4:2:0, 12bpp, (1 Cr &amp; Cb sample per 2x2 Y samples)
Definition: avcodec.h:4534
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegvideo.h:120
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:24
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:422
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideo.c:114
int coded_picture_number
picture number in bitstream order
Definition: frame.h:199
#define AV_LOG_INFO
Standard information.
Definition: avcodec.h:4158
uint16_t inter_matrix[64]
Definition: mpegvideo.h:478
#define FF_LAMBDA_SCALE
Definition: avcodec.h:2256
void ff_jpeg_fdct_islow_8(int16_t *data)
int64_t last_non_b_time
Definition: mpegvideo.h:564
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:229
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:322
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:64
AVS_Value src
Definition: avisynth_c.h:523
unsigned int lambda2
(lambda*lambda) &gt;&gt; FF_LAMBDA_SHIFT
Definition: mpegvideo.h:376
void ff_faandct(int16_t *data)
Definition: faandct.c:121
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:45
#define FFMAX(a, b)
Definition: avcodec.h:923
int h263_flv
use flv h263 header
Definition: mpegvideo.h:259
static const AVClass h263p_class
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:287
enum AVCodecID codec_id
Definition: avcodec.h:1157
static av_const unsigned int ff_sqrt(unsigned int a)
Definition: mathops.h:207
int idct_permutation_type
Definition: dsputil.h:247
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
#define START_TIMER
Definition: timer.h:74
int frame_bits
number of bits used for the previously encoded frame
Definition: avcodec.h:2369
void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:81
main external API structure.
Definition: avcodec.h:1146
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:538
ScanTable intra_scantable
Definition: mpegvideo.h:300
void(* qpel_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
Definition: dsputil.h:81
int pre_me
prepass for motion estimation
Definition: avcodec.h:1579
int qmin
minimum quantizer
Definition: avcodec.h:2159
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:249
int ff_dct_encode_init(MpegEncContext *s)
static void write_slice_end(MpegEncContext *s)
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideo.h:311
void ff_mjpeg_encode_picture_trailer(MpegEncContext *s)
Definition: mjpegenc.c:370
int64_t user_specified_pts
last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
Definition: mpegvideo.h:307
void(* diff_pixels)(int16_t *block, const uint8_t *s1, const uint8_t *s2, int stride)
Definition: dsputil.h:127
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)&gt;&gt;1.
Definition: hpeldsp.h:56
uint8_t * buf_end
Definition: put_bits.h:44
float spatial_cplx_masking
spatial complexity masking (0-&gt; disabled)
Definition: avcodec.h:1471
int luma_elim_threshold
Definition: mpegvideo.h:267
void ff_fix_long_p_mvs(MpegEncContext *s)
Definition: motion_est.c:1672
Picture * picture
main picture buffer
Definition: mpegvideo.h:289
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:593
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:366
uint8_t * inter_ac_vlc_length
Definition: mpegvideo.h:488
int progressive_sequence
Definition: mpegvideo.h:663
void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2920
#define FF_DEFAULT_QUANT_BIAS
Definition: avcodec.h:1634
h261codec.
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:246
uint8_t * buf_ptr
Definition: put_bits.h:44
void avcodec_get_frame_defaults(AVFrame *frame)
Set the fields of the given AVFrame to default values.
Definition: utils.c:1046
Describe the class of an AVClass context structure.
Definition: log.h:50
Bi-dir predicted.
Definition: avcodec.h:2306
int stuffing_bits
bits used for stuffing
Definition: mpegvideo.h:514
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)&gt;&gt;1.
Definition: hpeldsp.h:80
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced b-frame encoding.
Definition: mpegvideo.h:416
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:2425
int index
Definition: gxfenc.c:89
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
uint8_t * data
Definition: avcodec.h:1063
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
int input_picture_number
used to set pic-&gt;display_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:277
AVCodec ff_wmv1_encoder
void(* add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
Definition: dsputil.h:256
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:288
int mb_info
interval for outputting info about mb offsets as side data
Definition: mpegvideo.h:544
planar YUV 4:4:4, 24bpp, (1 Cr &amp; Cb sample per 1x1 Y samples)
Definition: avcodec.h:4539
void ff_set_mpeg4_time(MpegEncContext *s)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
DSPContext dsp
pointers for accelerated dsp functions
Definition: mpegvideo.h:395
int(* pix_norm1)(uint8_t *pix, int line_size)
Definition: dsputil.h:145
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:124
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2520
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1157
int frame_skip_exp
frame skip exponent
Definition: avcodec.h:2304
#define MAX_MV
Definition: mpegvideo.h:63
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg12enc.c:387
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:337
int f_code
forward MV resolution
Definition: mpegvideo.h:399
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1073
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:121
#define MAX_FCODE
Definition: mpegvideo.h:62
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideo.h:465
#define type
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_MV *2+1]
Definition: mpegvideo_enc.c:61
#define QMAT_SHIFT_MMX
Definition: mpegvideo.h:59
#define CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:719
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1247
#define MV_DIR_FORWARD
Definition: mpegvideo.h:421
int max_b_frames
max number of b-frames for encoding
Definition: mpegvideo.h:266
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:381
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:1667
int bit_rate
wanted bit rate
Definition: mpegvideo.h:252
#define ROUNDED_DIV(a, b)
Definition: avcodec.h:914
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FFABS(a)
Definition: avcodec.h:920
int last_mv_dir
last mv_dir, used for b frame encoding
Definition: mpegvideo.h:658
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideo.h:463
int h263_pred
use mpeg4/h263 ac/dc predictions
Definition: mpegvideo.h:254
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
Definition: mpegvideo.h:413
void * priv_data
Definition: avcodec.h:1182
float dark_masking
darkness masking (0-&gt; disabled)
Definition: avcodec.h:1485
float temporal_cplx_masking
temporary complexity masking (0-&gt; disabled)
Definition: avcodec.h:1464
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:295
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:417
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode b-frame encoding.
Definition: mpegvideo.h:414
static const uint16_t scale[4]
AAN (Arai Agui Nakajima) (I)DCT tables.
const uint8_t * c_dc_scale_table
qscale -&gt; c_dc_scale table
Definition: mpegvideo.h:356
uint8_t level
Definition: svq3.c:146
int mc_mb_var_sum_temp
Definition: mpegvideo.h:226
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second &quot; : depend...
Definition: mpegvideo.h:435
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode b-frame encoding.
Definition: mpegvideo.h:410
int noise_reduction
noise reduction strength
Definition: avcodec.h:1697
static int estimate_motion_thread(AVCodecContext *c, void *arg)
MpegEncContext.
Definition: mpegvideo.h:245
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:350
#define FF_MAX_B_FRAMES
Definition: avcodec.h:680
int8_t * qscale_table
Definition: mpegvideo.h:102
struct AVCodecContext * avctx
Definition: mpegvideo.h:247
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1333
PutBitContext pb
bit output
Definition: mpegvideo.h:318
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
#define CONFIG_MPEG4_ENCODER
Definition: config.h:1090
#define CONFIG_MPEG2VIDEO_ENCODER
Definition: config.h:1089
#define CODEC_FLAG_EMU_EDGE
Don&#39;t draw edges.
Definition: avcodec.h:706
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg12enc.c:919
static void update_qscale(MpegEncContext *s)
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:1536
int quantizer_noise_shaping
Definition: mpegvideo.h:738
int(* dct_error_sum)[64]
Definition: mpegvideo.h:505
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:1967
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:815
common internal api header.
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:115
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left &amp; top MBs without sig11 ...
Definition: mpegvideo.h:282
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:89
void ff_MPV_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:810
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:705
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.h:262
uint8_t * dest[3]
Definition: mpegvideo.h:471
int shared
Definition: mpegvideo.h:179
void ff_mjpeg_encode_stuffing(MpegEncContext *s)
Definition: mjpegenc.c:351
static double c[64]
#define CONFIG_H261_ENCODER
Definition: config.h:1080
#define COPY(a)
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:453
int adaptive_quant
use adaptive quantization
Definition: mpegvideo.h:378
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:1666
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: avcodec.h:4565
static int16_t basis[64][64]
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:274
static int score_tab[256]
Definition: zmbvenc.c:58
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:329
me_cmp_func sad[6]
Definition: dsputil.h:148
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there&#39;s a delay
Definition: mpegvideo.h:315
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:314
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:49
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:54
int den
denominator
Definition: rational.h:45
#define CONFIG_H263_ENCODER
Definition: config.h:1081
#define CONFIG_H263P_ENCODER
Definition: config.h:1082
const uint8_t * chroma_qscale_table
qscale -&gt; chroma_qscale (h263)
Definition: mpegvideo.h:357
AVCodec ff_msmpeg4v3_encoder
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:643
int trellis
trellis RD quantization
Definition: avcodec.h:2318
DSP utils.
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideo.h:467
void ff_mpeg4_stuffing(PutBitContext *pbc)
add mpeg4 stuffing bits (01...1)
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:706
#define STOP_TIMER(id)
Definition: timer.h:75
int slices
Number of slices.
Definition: avcodec.h:1864
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
Definition: mpeg12enc.c:373
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:528
void ff_mpeg4_init_partitions(MpegEncContext *s)
int dia_size
ME diamond size &amp; shape.
Definition: avcodec.h:1565
int b_sensitivity
Adjust sensitivity of b_frame_strategy 1.
Definition: avcodec.h:1820
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:2636
VideoDSPContext vdsp
Definition: mpegvideo.h:398
#define VE
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
void ff_MPV_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1257
void(* fdct)(int16_t *block)
Definition: dsputil.h:215
#define av_log2
Definition: intmath.h:89
int error_rate
Simulates errors in the bitstream to test error concealment.
Definition: avcodec.h:2796
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1619
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:532
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:705
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:32
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:602
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:335
int key_frame
1 -&gt; keyframe, 0-&gt; not
Definition: frame.h:162
static void set_frame_distances(MpegEncContext *s)
static const double coeff[2][5]
Definition: vf_owdenoise.c:71
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avcodec.h:2257
uint16_t * inter_matrix
custom inter quantization matrix
Definition: avcodec.h:1682
#define AVERROR(e)
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1722
Picture ** reordered_input_picture
pointer to the next pictures in codedorder for encoding
Definition: mpegvideo.h:291
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1241
static struct twinvq_data tab
struct AVFrame f
Definition: mpegvideo.h:98
int64_t dts
Decompression timestamp in AVStream-&gt;time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1062
int mb_var_sum
sum of MB variance for current frame
Definition: mpegvideo.h:172
static int encode_thread(AVCodecContext *c, void *arg)
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:264
int(* fast_dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:734
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideo.h:173
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:476
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:115
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
void INT64 INT64 count
Definition: avisynth_c.h:594
void INT64 start
Definition: avisynth_c.h:594
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]
Definition: dsputil.h:188
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Floating point AAN DCT
int inter_quant_bias
bias for the quantizer
Definition: mpegvideo.h:482
static AVPacket pkt
Definition: demuxing.c:52
const char int length
Definition: avisynth_c.h:668
int me_method
Motion estimation algorithm used for video coding.
Definition: avcodec.h:1351
int ff_find_unused_picture(MpegEncContext *s, int shared)
Definition: mpegvideo.c:1466
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:733
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
Definition: mpegvideo.h:426
int rc_min_rate
minimum bitrate
Definition: avcodec.h:2220
int b_code
backward MV resolution for B Frames (mpeg4)
Definition: mpegvideo.h:400
#define CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:693
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:316
static void MPV_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: avcodec.h:4547
int dct_count[2]
Definition: mpegvideo.h:506
This structure stores compressed data.
Definition: avcodec.h:1040
int delay
Codec delay.
Definition: avcodec.h:1302
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
Definition: avcodec.h:2421
int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt, AVFrame *frame, int *got_packet)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:107
int64_t pts
Presentation timestamp in AVStream-&gt;time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1056
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideo.h:462
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:3133
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avcodec.h:2278
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
unsigned int lambda
lagrange multipler used in rate distortion
Definition: mpegvideo.h:375
DSPContext.
Definition: dsputil.h:124
AVCodec ff_msmpeg4v2_encoder
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:566
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
HpelDSPContext hdsp
Definition: mpegvideo.h:397
static const uint8_t sp5x_quant_table[20][64]
Definition: sp5x.h:135
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideo.h:515
static int16_t block[64]
Definition: dct-test.c:198