FFmpeg  4.3.9
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "avcodec.h"
42 #include "dct.h"
43 #include "idctdsp.h"
44 #include "mpeg12.h"
45 #include "mpegvideo.h"
46 #include "mpegvideodata.h"
47 #include "h261.h"
48 #include "h263.h"
49 #include "h263data.h"
50 #include "mjpegenc_common.h"
51 #include "mathops.h"
52 #include "mpegutils.h"
53 #include "mjpegenc.h"
54 #include "msmpeg4.h"
55 #include "pixblockdsp.h"
56 #include "qpeldsp.h"
57 #include "faandct.h"
58 #include "thread.h"
59 #include "aandcttab.h"
60 #include "flv.h"
61 #include "mpeg4video.h"
62 #include "internal.h"
63 #include "bytestream.h"
64 #include "wmv2.h"
65 #include "rv10.h"
66 #include "packet_internal.h"
67 #include "libxvid.h"
68 #include <limits.h>
69 #include "sp5x.h"
70 
71 #define QUANT_BIAS_SHIFT 8
72 
73 #define QMAT_SHIFT_MMX 16
74 #define QMAT_SHIFT 21
75 
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 
84 
87  { NULL },
88 };
89 
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91  uint16_t (*qmat16)[2][64],
92  const uint16_t *quant_matrix,
93  int bias, int qmin, int qmax, int intra)
94 {
95  FDCTDSPContext *fdsp = &s->fdsp;
96  int qscale;
97  int shift = 0;
98 
99  for (qscale = qmin; qscale <= qmax; qscale++) {
100  int i;
101  int qscale2;
102 
104  else qscale2 = qscale << 1;
105 
106  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 #if CONFIG_FAANDCT
108  fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
110  fdsp->fdct == ff_jpeg_fdct_islow_10) {
111  for (i = 0; i < 64; i++) {
112  const int j = s->idsp.idct_permutation[i];
113  int64_t den = (int64_t) qscale2 * quant_matrix[j];
114  /* 16 <= qscale * quant_matrix[i] <= 7905
115  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116  * 19952 <= x <= 249205026
117  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118  * 3444240 >= (1 << 36) / (x) >= 275 */
119 
120  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121  }
122  } else if (fdsp->fdct == ff_fdct_ifast) {
123  for (i = 0; i < 64; i++) {
124  const int j = s->idsp.idct_permutation[i];
125  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126  /* 16 <= qscale * quant_matrix[i] <= 7905
127  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128  * 19952 <= x <= 249205026
129  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130  * 3444240 >= (1 << 36) / (x) >= 275 */
131 
132  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
133  }
134  } else {
135  for (i = 0; i < 64; i++) {
136  const int j = s->idsp.idct_permutation[i];
137  int64_t den = (int64_t) qscale2 * quant_matrix[j];
138  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139  * Assume x = qscale * quant_matrix[i]
140  * So 16 <= x <= 7905
141  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142  * so 32768 >= (1 << 19) / (x) >= 67 */
143  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145  // (qscale * quant_matrix[i]);
146  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 
148  if (qmat16[qscale][0][i] == 0 ||
149  qmat16[qscale][0][i] == 128 * 256)
150  qmat16[qscale][0][i] = 128 * 256 - 1;
151  qmat16[qscale][1][i] =
152  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153  qmat16[qscale][0][i]);
154  }
155  }
156 
157  for (i = intra; i < 64; i++) {
158  int64_t max = 8191;
159  if (fdsp->fdct == ff_fdct_ifast) {
160  max = (8191LL * ff_aanscales[i]) >> 14;
161  }
162  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
163  shift++;
164  }
165  }
166  }
167  if (shift) {
169  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
170  QMAT_SHIFT - shift);
171  }
172 }
173 
174 static inline void update_qscale(MpegEncContext *s)
175 {
176  if (s->q_scale_type == 1 && 0) {
177  int i;
178  int bestdiff=INT_MAX;
179  int best = 1;
180 
181  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
185  continue;
186  if (diff < bestdiff) {
187  bestdiff = diff;
188  best = i;
189  }
190  }
191  s->qscale = best;
192  } else {
193  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194  (FF_LAMBDA_SHIFT + 7);
195  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
196  }
197 
198  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
200 }
201 
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
203 {
204  int i;
205 
206  if (matrix) {
207  put_bits(pb, 1, 1);
208  for (i = 0; i < 64; i++) {
209  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
210  }
211  } else
212  put_bits(pb, 1, 0);
213 }
214 
215 /**
216  * init s->current_picture.qscale_table from s->lambda_table
217  */
219 {
220  int8_t * const qscale_table = s->current_picture.qscale_table;
221  int i;
222 
223  for (i = 0; i < s->mb_num; i++) {
224  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
227  s->avctx->qmax);
228  }
229 }
230 
233 {
234 #define COPY(a) dst->a= src->a
235  COPY(pict_type);
237  COPY(f_code);
238  COPY(b_code);
239  COPY(qscale);
240  COPY(lambda);
241  COPY(lambda2);
244  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245  COPY(progressive_frame); // FIXME don't set in encode_header
246  COPY(partitioned_frame); // FIXME don't set in encode_header
247 #undef COPY
248 }
249 
250 /**
251  * Set the given MpegEncContext to defaults for encoding.
252  * the changed fields will not depend upon the prior state of the MpegEncContext.
253  */
255 {
256  int i;
258 
259  for (i = -16; i < 16; i++) {
260  default_fcode_tab[i + MAX_MV] = 1;
261  }
264 
265  s->input_picture_number = 0;
266  s->picture_in_gop_number = 0;
267 }
268 
270 {
271  if (ARCH_X86)
273 
276  if (!s->dct_quantize)
278  if (!s->denoise_dct)
281  if (s->avctx->trellis)
283 
284  return 0;
285 }
286 
287 /* init video encoder */
289 {
290  MpegEncContext *s = avctx->priv_data;
291  AVCPBProperties *cpb_props;
292  int i, ret, format_supported;
293 
295 
296  switch (avctx->codec_id) {
298  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300  av_log(avctx, AV_LOG_ERROR,
301  "only YUV420 and YUV422 are supported\n");
302  return AVERROR(EINVAL);
303  }
304  break;
305  case AV_CODEC_ID_MJPEG:
306  case AV_CODEC_ID_AMV:
307  format_supported = 0;
308  /* JPEG color space */
309  if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
310  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
311  avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
312  (avctx->color_range == AVCOL_RANGE_JPEG &&
313  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
314  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
315  avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
316  format_supported = 1;
317  /* MPEG color space */
318  else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
319  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
320  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
321  avctx->pix_fmt == AV_PIX_FMT_YUV444P))
322  format_supported = 1;
323 
324  if (!format_supported) {
325  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
326  return AVERROR(EINVAL);
327  }
328  break;
329  default:
330  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
332  return AVERROR(EINVAL);
333  }
334  }
335 
336  switch (avctx->pix_fmt) {
337  case AV_PIX_FMT_YUVJ444P:
338  case AV_PIX_FMT_YUV444P:
340  break;
341  case AV_PIX_FMT_YUVJ422P:
342  case AV_PIX_FMT_YUV422P:
344  break;
345  case AV_PIX_FMT_YUVJ420P:
346  case AV_PIX_FMT_YUV420P:
347  default:
349  break;
350  }
351 
352  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
353 
354 #if FF_API_PRIVATE_OPT
356  if (avctx->rtp_payload_size)
358  if (avctx->me_penalty_compensation)
360  if (avctx->pre_me)
361  s->me_pre = avctx->pre_me;
363 #endif
364 
365  s->bit_rate = avctx->bit_rate;
366  s->width = avctx->width;
367  s->height = avctx->height;
368  if (avctx->gop_size > 600 &&
370  av_log(avctx, AV_LOG_WARNING,
371  "keyframe interval too large!, reducing it from %d to %d\n",
372  avctx->gop_size, 600);
373  avctx->gop_size = 600;
374  }
375  s->gop_size = avctx->gop_size;
376  s->avctx = avctx;
377  if (avctx->max_b_frames > MAX_B_FRAMES) {
378  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379  "is %d.\n", MAX_B_FRAMES);
380  avctx->max_b_frames = MAX_B_FRAMES;
381  }
382  s->max_b_frames = avctx->max_b_frames;
383  s->codec_id = avctx->codec->id;
385  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386  s->rtp_mode = !!s->rtp_payload_size;
388 
389  // workaround some differences between how applications specify dc precision
390  if (s->intra_dc_precision < 0) {
391  s->intra_dc_precision += 8;
392  } else if (s->intra_dc_precision >= 8)
393  s->intra_dc_precision -= 8;
394 
395  if (s->intra_dc_precision < 0) {
396  av_log(avctx, AV_LOG_ERROR,
397  "intra dc precision must be positive, note some applications use"
398  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399  return AVERROR(EINVAL);
400  }
401 
402  if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
403  s->huffman = 0;
404 
405  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407  return AVERROR(EINVAL);
408  }
410 
411  if (s->gop_size <= 1) {
412  s->intra_only = 1;
413  s->gop_size = 12;
414  } else {
415  s->intra_only = 0;
416  }
417 
418  /* Fixed QSCALE */
419  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
420 
421  s->adaptive_quant = (s->avctx->lumi_masking ||
422  s->avctx->dark_masking ||
425  s->avctx->p_masking ||
426  s->border_masking ||
427  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
428  !s->fixed_qscale;
429 
431 
432  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
433  switch(avctx->codec_id) {
436  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
437  break;
438  case AV_CODEC_ID_MPEG4:
442  if (avctx->rc_max_rate >= 15000000) {
443  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
444  } else if(avctx->rc_max_rate >= 2000000) {
445  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
446  } else if(avctx->rc_max_rate >= 384000) {
447  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
448  } else
449  avctx->rc_buffer_size = 40;
450  avctx->rc_buffer_size *= 16384;
451  break;
452  }
453  if (avctx->rc_buffer_size) {
454  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
455  }
456  }
457 
458  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
459  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
460  return AVERROR(EINVAL);
461  }
462 
463  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
464  av_log(avctx, AV_LOG_INFO,
465  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
466  }
467 
468  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
469  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
470  return AVERROR(EINVAL);
471  }
472 
473  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
474  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
475  return AVERROR(EINVAL);
476  }
477 
478  if (avctx->rc_max_rate &&
479  avctx->rc_max_rate == avctx->bit_rate &&
480  avctx->rc_max_rate != avctx->rc_min_rate) {
481  av_log(avctx, AV_LOG_INFO,
482  "impossible bitrate constraints, this will fail\n");
483  }
484 
485  if (avctx->rc_buffer_size &&
486  avctx->bit_rate * (int64_t)avctx->time_base.num >
487  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
488  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
489  return AVERROR(EINVAL);
490  }
491 
492  if (!s->fixed_qscale &&
493  avctx->bit_rate * av_q2d(avctx->time_base) >
494  avctx->bit_rate_tolerance) {
495  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
496  av_log(avctx, AV_LOG_WARNING,
497  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
498  if (nbt <= INT_MAX) {
499  avctx->bit_rate_tolerance = nbt;
500  } else
501  avctx->bit_rate_tolerance = INT_MAX;
502  }
503 
504  if (s->avctx->rc_max_rate &&
505  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
508  90000LL * (avctx->rc_buffer_size - 1) >
509  s->avctx->rc_max_rate * 0xFFFFLL) {
510  av_log(avctx, AV_LOG_INFO,
511  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
512  "specified vbv buffer is too large for the given bitrate!\n");
513  }
514 
515  if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
517  s->codec_id != AV_CODEC_ID_FLV1) {
518  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
519  return AVERROR(EINVAL);
520  }
521 
522  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
523  av_log(avctx, AV_LOG_ERROR,
524  "OBMC is only supported with simple mb decision\n");
525  return AVERROR(EINVAL);
526  }
527 
528  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
529  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
530  return AVERROR(EINVAL);
531  }
532 
533  if (s->max_b_frames &&
534  s->codec_id != AV_CODEC_ID_MPEG4 &&
537  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
538  return AVERROR(EINVAL);
539  }
540  if (s->max_b_frames < 0) {
541  av_log(avctx, AV_LOG_ERROR,
542  "max b frames must be 0 or positive for mpegvideo based encoders\n");
543  return AVERROR(EINVAL);
544  }
545 
546  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
547  s->codec_id == AV_CODEC_ID_H263 ||
548  s->codec_id == AV_CODEC_ID_H263P) &&
549  (avctx->sample_aspect_ratio.num > 255 ||
550  avctx->sample_aspect_ratio.den > 255)) {
551  av_log(avctx, AV_LOG_WARNING,
552  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
555  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
556  }
557 
558  if ((s->codec_id == AV_CODEC_ID_H263 ||
559  s->codec_id == AV_CODEC_ID_H263P) &&
560  (avctx->width > 2048 ||
561  avctx->height > 1152 )) {
562  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
563  return AVERROR(EINVAL);
564  }
565  if (s->codec_id == AV_CODEC_ID_FLV1 &&
566  (avctx->width > 65535 ||
567  avctx->height > 65535 )) {
568  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
569  return AVERROR(EINVAL);
570  }
571  if ((s->codec_id == AV_CODEC_ID_H263 ||
572  s->codec_id == AV_CODEC_ID_H263P) &&
573  ((avctx->width &3) ||
574  (avctx->height&3) )) {
575  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
576  return AVERROR(EINVAL);
577  }
578 
579  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
580  (avctx->width > 4095 ||
581  avctx->height > 4095 )) {
582  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
583  return AVERROR(EINVAL);
584  }
585 
586  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
587  (avctx->width > 16383 ||
588  avctx->height > 16383 )) {
589  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
590  return AVERROR(EINVAL);
591  }
592 
593  if (s->codec_id == AV_CODEC_ID_RV10 &&
594  (avctx->width &15 ||
595  avctx->height&15 )) {
596  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
597  return AVERROR(EINVAL);
598  }
599 
600  if (s->codec_id == AV_CODEC_ID_RV20 &&
601  (avctx->width &3 ||
602  avctx->height&3 )) {
603  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
604  return AVERROR(EINVAL);
605  }
606 
607  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
608  s->codec_id == AV_CODEC_ID_WMV2) &&
609  avctx->width & 1) {
610  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
611  return AVERROR(EINVAL);
612  }
613 
616  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
617  return AVERROR(EINVAL);
618  }
619 
620 #if FF_API_PRIVATE_OPT
622  if (avctx->mpeg_quant)
623  s->mpeg_quant = avctx->mpeg_quant;
625 #endif
626 
627  // FIXME mpeg2 uses that too
628  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
629  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
630  av_log(avctx, AV_LOG_ERROR,
631  "mpeg2 style quantization not supported by codec\n");
632  return AVERROR(EINVAL);
633  }
634 
635  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
636  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
637  return AVERROR(EINVAL);
638  }
639 
640  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
642  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
643  return AVERROR(EINVAL);
644  }
645 
646  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
647  (s->codec_id == AV_CODEC_ID_AMV ||
648  s->codec_id == AV_CODEC_ID_MJPEG)) {
649  // Used to produce garbage with MJPEG.
650  av_log(avctx, AV_LOG_ERROR,
651  "QP RD is no longer compatible with MJPEG or AMV\n");
652  return AVERROR(EINVAL);
653  }
654 
655 #if FF_API_PRIVATE_OPT
657  if (avctx->scenechange_threshold)
660 #endif
661 
662  if (s->scenechange_threshold < 1000000000 &&
664  av_log(avctx, AV_LOG_ERROR,
665  "closed gop with scene change detection are not supported yet, "
666  "set threshold to 1000000000\n");
667  return AVERROR_PATCHWELCOME;
668  }
669 
670  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
671  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
673  av_log(avctx, AV_LOG_ERROR,
674  "low delay forcing is only available for mpeg2, "
675  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
676  return AVERROR(EINVAL);
677  }
678  if (s->max_b_frames != 0) {
679  av_log(avctx, AV_LOG_ERROR,
680  "B-frames cannot be used with low delay\n");
681  return AVERROR(EINVAL);
682  }
683  }
684 
685  if (s->q_scale_type == 1) {
686  if (avctx->qmax > 28) {
687  av_log(avctx, AV_LOG_ERROR,
688  "non linear quant only supports qmax <= 28 currently\n");
689  return AVERROR_PATCHWELCOME;
690  }
691  }
692 
693  if (avctx->slices > 1 &&
694  (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
695  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
696  return AVERROR(EINVAL);
697  }
698 
699  if (s->avctx->thread_count > 1 &&
700  s->codec_id != AV_CODEC_ID_MPEG4 &&
703  s->codec_id != AV_CODEC_ID_MJPEG &&
704  (s->codec_id != AV_CODEC_ID_H263P)) {
705  av_log(avctx, AV_LOG_ERROR,
706  "multi threaded encoding not supported by codec\n");
707  return AVERROR_PATCHWELCOME;
708  }
709 
710  if (s->avctx->thread_count < 1) {
711  av_log(avctx, AV_LOG_ERROR,
712  "automatic thread number detection not supported by codec, "
713  "patch welcome\n");
714  return AVERROR_PATCHWELCOME;
715  }
716 
717  if (!avctx->time_base.den || !avctx->time_base.num) {
718  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
719  return AVERROR(EINVAL);
720  }
721 
722 #if FF_API_PRIVATE_OPT
724  if (avctx->b_frame_strategy)
726  if (avctx->b_sensitivity != 40)
727  s->b_sensitivity = avctx->b_sensitivity;
729 #endif
730 
731  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
732  av_log(avctx, AV_LOG_INFO,
733  "notice: b_frame_strategy only affects the first pass\n");
734  s->b_frame_strategy = 0;
735  }
736 
737  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
738  if (i > 1) {
739  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
740  avctx->time_base.den /= i;
741  avctx->time_base.num /= i;
742  //return -1;
743  }
744 
746  // (a + x * 3 / 8) / x
747  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
748  s->inter_quant_bias = 0;
749  } else {
750  s->intra_quant_bias = 0;
751  // (a - x / 4) / x
752  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
753  }
754 
755  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
756  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
757  return AVERROR(EINVAL);
758  }
759 
760  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
761 
762  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
763  s->avctx->time_base.den > (1 << 16) - 1) {
764  av_log(avctx, AV_LOG_ERROR,
765  "timebase %d/%d not supported by MPEG 4 standard, "
766  "the maximum admitted value for the timebase denominator "
767  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
768  (1 << 16) - 1);
769  return AVERROR(EINVAL);
770  }
771  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
772 
773  switch (avctx->codec->id) {
775  s->out_format = FMT_MPEG1;
777  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
778  break;
780  s->out_format = FMT_MPEG1;
782  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
783  s->rtp_mode = 1;
784  break;
785  case AV_CODEC_ID_MJPEG:
786  case AV_CODEC_ID_AMV:
787  s->out_format = FMT_MJPEG;
788  s->intra_only = 1; /* force intra only for jpeg */
791  if ((ret = ff_mjpeg_encode_init(s)) < 0)
792  return ret;
793  avctx->delay = 0;
794  s->low_delay = 1;
795  break;
796  case AV_CODEC_ID_H261:
797  if (!CONFIG_H261_ENCODER)
799  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
800  av_log(avctx, AV_LOG_ERROR,
801  "The specified picture size of %dx%d is not valid for the "
802  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
803  s->width, s->height);
804  return AVERROR(EINVAL);
805  }
806  s->out_format = FMT_H261;
807  avctx->delay = 0;
808  s->low_delay = 1;
809  s->rtp_mode = 0; /* Sliced encoding not supported */
810  break;
811  case AV_CODEC_ID_H263:
812  if (!CONFIG_H263_ENCODER)
815  s->width, s->height) == 8) {
816  av_log(avctx, AV_LOG_ERROR,
817  "The specified picture size of %dx%d is not valid for "
818  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
819  "352x288, 704x576, and 1408x1152. "
820  "Try H.263+.\n", s->width, s->height);
821  return AVERROR(EINVAL);
822  }
823  s->out_format = FMT_H263;
824  avctx->delay = 0;
825  s->low_delay = 1;
826  break;
827  case AV_CODEC_ID_H263P:
828  s->out_format = FMT_H263;
829  s->h263_plus = 1;
830  /* Fx */
831  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
832  s->modified_quant = s->h263_aic;
833  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
834  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
835 
836  /* /Fx */
837  /* These are just to be sure */
838  avctx->delay = 0;
839  s->low_delay = 1;
840  break;
841  case AV_CODEC_ID_FLV1:
842  s->out_format = FMT_H263;
843  s->h263_flv = 2; /* format = 1; 11-bit codes */
844  s->unrestricted_mv = 1;
845  s->rtp_mode = 0; /* don't allow GOB */
846  avctx->delay = 0;
847  s->low_delay = 1;
848  break;
849  case AV_CODEC_ID_RV10:
850  s->out_format = FMT_H263;
851  avctx->delay = 0;
852  s->low_delay = 1;
853  break;
854  case AV_CODEC_ID_RV20:
855  s->out_format = FMT_H263;
856  avctx->delay = 0;
857  s->low_delay = 1;
858  s->modified_quant = 1;
859  s->h263_aic = 1;
860  s->h263_plus = 1;
861  s->loop_filter = 1;
862  s->unrestricted_mv = 0;
863  break;
864  case AV_CODEC_ID_MPEG4:
865  s->out_format = FMT_H263;
866  s->h263_pred = 1;
867  s->unrestricted_mv = 1;
868  s->low_delay = s->max_b_frames ? 0 : 1;
869  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
870  break;
872  s->out_format = FMT_H263;
873  s->h263_pred = 1;
874  s->unrestricted_mv = 1;
875  s->msmpeg4_version = 2;
876  avctx->delay = 0;
877  s->low_delay = 1;
878  break;
880  s->out_format = FMT_H263;
881  s->h263_pred = 1;
882  s->unrestricted_mv = 1;
883  s->msmpeg4_version = 3;
884  s->flipflop_rounding = 1;
885  avctx->delay = 0;
886  s->low_delay = 1;
887  break;
888  case AV_CODEC_ID_WMV1:
889  s->out_format = FMT_H263;
890  s->h263_pred = 1;
891  s->unrestricted_mv = 1;
892  s->msmpeg4_version = 4;
893  s->flipflop_rounding = 1;
894  avctx->delay = 0;
895  s->low_delay = 1;
896  break;
897  case AV_CODEC_ID_WMV2:
898  s->out_format = FMT_H263;
899  s->h263_pred = 1;
900  s->unrestricted_mv = 1;
901  s->msmpeg4_version = 5;
902  s->flipflop_rounding = 1;
903  avctx->delay = 0;
904  s->low_delay = 1;
905  break;
906  default:
907  return AVERROR(EINVAL);
908  }
909 
910 #if FF_API_PRIVATE_OPT
912  if (avctx->noise_reduction)
913  s->noise_reduction = avctx->noise_reduction;
915 #endif
916 
917  avctx->has_b_frames = !s->low_delay;
918 
919  s->encoding = 1;
920 
921  s->progressive_frame =
924  s->alternate_scan);
925 
926  /* init */
927  ff_mpv_idct_init(s);
928  if ((ret = ff_mpv_common_init(s)) < 0)
929  return ret;
930 
931  ff_fdctdsp_init(&s->fdsp, avctx);
932  ff_me_cmp_init(&s->mecc, avctx);
934  ff_pixblockdsp_init(&s->pdsp, avctx);
935  ff_qpeldsp_init(&s->qdsp);
936 
937  if (s->msmpeg4_version) {
939  2 * 2 * (MAX_LEVEL + 1) *
940  (MAX_RUN + 1) * 2 * sizeof(int), fail);
941  }
942  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
943 
944  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
945  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
946  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
947  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
948  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
949  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
951  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
953  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
954 
955 
956  if (s->noise_reduction) {
958  2 * 64 * sizeof(uint16_t), fail);
959  }
960 
962 
965 
966  if (s->slice_context_count > 1) {
967  s->rtp_mode = 1;
968 
969  if (avctx->codec_id == AV_CODEC_ID_H263P)
970  s->h263_slice_structured = 1;
971  }
972 
973  s->quant_precision = 5;
974 
975 #if FF_API_PRIVATE_OPT
977  if (avctx->frame_skip_threshold)
979  if (avctx->frame_skip_factor)
981  if (avctx->frame_skip_exp)
982  s->frame_skip_exp = avctx->frame_skip_exp;
983  if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
984  s->frame_skip_cmp = avctx->frame_skip_cmp;
986 #endif
987 
990 
996  if ((ret = ff_msmpeg4_encode_init(s)) < 0)
997  return ret;
999  && s->out_format == FMT_MPEG1)
1001 
1002  /* init q matrix */
1003  for (i = 0; i < 64; i++) {
1004  int j = s->idsp.idct_permutation[i];
1006  s->mpeg_quant) {
1009  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1010  s->intra_matrix[j] =
1012  } else {
1013  /* MPEG-1/2 */
1014  s->chroma_intra_matrix[j] =
1017  }
1018  if (s->avctx->intra_matrix)
1019  s->intra_matrix[j] = s->avctx->intra_matrix[i];
1020  if (s->avctx->inter_matrix)
1021  s->inter_matrix[j] = s->avctx->inter_matrix[i];
1022  }
1023 
1024  /* precompute matrix */
1025  /* for mjpeg, we do include qscale in the matrix */
1026  if (s->out_format != FMT_MJPEG) {
1028  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1029  31, 1);
1031  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1032  31, 0);
1033  }
1034 
1035  if ((ret = ff_rate_control_init(s)) < 0)
1036  return ret;
1037 
1038 #if FF_API_PRIVATE_OPT
1040  if (avctx->brd_scale)
1041  s->brd_scale = avctx->brd_scale;
1042 
1043  if (avctx->prediction_method)
1044  s->pred = avctx->prediction_method + 1;
1046 #endif
1047 
1048  if (s->b_frame_strategy == 2) {
1049  for (i = 0; i < s->max_b_frames + 2; i++) {
1050  s->tmp_frames[i] = av_frame_alloc();
1051  if (!s->tmp_frames[i])
1052  return AVERROR(ENOMEM);
1053 
1055  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1056  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1057 
1058  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1059  if (ret < 0)
1060  return ret;
1061  }
1062  }
1063 
1064  cpb_props = ff_add_cpb_side_data(avctx);
1065  if (!cpb_props)
1066  return AVERROR(ENOMEM);
1067  cpb_props->max_bitrate = avctx->rc_max_rate;
1068  cpb_props->min_bitrate = avctx->rc_min_rate;
1069  cpb_props->avg_bitrate = avctx->bit_rate;
1070  cpb_props->buffer_size = avctx->rc_buffer_size;
1071 
1072  return 0;
1073 fail:
1074  ff_mpv_encode_end(avctx);
1075  return AVERROR_UNKNOWN;
1076 }
1077 
1079 {
1080  MpegEncContext *s = avctx->priv_data;
1081  int i;
1082 
1084 
1085  ff_mpv_common_end(s);
1086  if (CONFIG_MJPEG_ENCODER &&
1087  s->out_format == FMT_MJPEG)
1089 
1090  av_freep(&avctx->extradata);
1091 
1092  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1093  av_frame_free(&s->tmp_frames[i]);
1094 
1097 
1098  av_freep(&s->avctx->stats_out);
1099  av_freep(&s->ac_stats);
1100 
1105  av_freep(&s->q_intra_matrix);
1106  av_freep(&s->q_inter_matrix);
1109  av_freep(&s->input_picture);
1111  av_freep(&s->dct_offset);
1112 
1113  return 0;
1114 }
1115 
1116 static int get_sae(uint8_t *src, int ref, int stride)
1117 {
1118  int x,y;
1119  int acc = 0;
1120 
1121  for (y = 0; y < 16; y++) {
1122  for (x = 0; x < 16; x++) {
1123  acc += FFABS(src[x + y * stride] - ref);
1124  }
1125  }
1126 
1127  return acc;
1128 }
1129 
1131  uint8_t *ref, int stride)
1132 {
1133  int x, y, w, h;
1134  int acc = 0;
1135 
1136  w = s->width & ~15;
1137  h = s->height & ~15;
1138 
1139  for (y = 0; y < h; y += 16) {
1140  for (x = 0; x < w; x += 16) {
1141  int offset = x + y * stride;
1142  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1143  stride, 16);
1144  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1145  int sae = get_sae(src + offset, mean, stride);
1146 
1147  acc += sae + 500 < sad;
1148  }
1149  }
1150  return acc;
1151 }
1152 
1153 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1154 {
1155  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1157  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1158  &s->linesize, &s->uvlinesize);
1159 }
1160 
1161 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1162 {
1163  Picture *pic = NULL;
1164  int64_t pts;
1165  int i, display_picture_number = 0, ret;
1166  int encoding_delay = s->max_b_frames ? s->max_b_frames
1167  : (s->low_delay ? 0 : 1);
1168  int flush_offset = 1;
1169  int direct = 1;
1170 
1171  if (pic_arg) {
1172  pts = pic_arg->pts;
1173  display_picture_number = s->input_picture_number++;
1174 
1175  if (pts != AV_NOPTS_VALUE) {
1176  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1177  int64_t last = s->user_specified_pts;
1178 
1179  if (pts <= last) {
1181  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1182  pts, last);
1183  return AVERROR(EINVAL);
1184  }
1185 
1186  if (!s->low_delay && display_picture_number == 1)
1187  s->dts_delta = pts - last;
1188  }
1189  s->user_specified_pts = pts;
1190  } else {
1191  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1192  s->user_specified_pts =
1193  pts = s->user_specified_pts + 1;
1194  av_log(s->avctx, AV_LOG_INFO,
1195  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1196  pts);
1197  } else {
1198  pts = display_picture_number;
1199  }
1200  }
1201 
1202  if (!pic_arg->buf[0] ||
1203  pic_arg->linesize[0] != s->linesize ||
1204  pic_arg->linesize[1] != s->uvlinesize ||
1205  pic_arg->linesize[2] != s->uvlinesize)
1206  direct = 0;
1207  if ((s->width & 15) || (s->height & 15))
1208  direct = 0;
1209  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1210  direct = 0;
1211  if (s->linesize & (STRIDE_ALIGN-1))
1212  direct = 0;
1213 
1214  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1215  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1216 
1217  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1218  if (i < 0)
1219  return i;
1220 
1221  pic = &s->picture[i];
1222  pic->reference = 3;
1223 
1224  if (direct) {
1225  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1226  return ret;
1227  }
1228  ret = alloc_picture(s, pic, direct);
1229  if (ret < 0)
1230  return ret;
1231 
1232  if (!direct) {
1233  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1234  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1235  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1236  // empty
1237  } else {
1238  int h_chroma_shift, v_chroma_shift;
1240  &h_chroma_shift,
1241  &v_chroma_shift);
1242 
1243  for (i = 0; i < 3; i++) {
1244  ptrdiff_t src_stride = pic_arg->linesize[i];
1245  ptrdiff_t dst_stride = i ? s->uvlinesize : s->linesize;
1246  int h_shift = i ? h_chroma_shift : 0;
1247  int v_shift = i ? v_chroma_shift : 0;
1248  int w = AV_CEIL_RSHIFT(s->width , h_shift);
1249  int h = AV_CEIL_RSHIFT(s->height, v_shift);
1250  uint8_t *src = pic_arg->data[i];
1251  uint8_t *dst = pic->f->data[i];
1252  int vpad = 16;
1253 
1254  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1255  && !s->progressive_sequence
1256  && FFALIGN(s->height, 32) - s->height > 16)
1257  vpad = 32;
1258 
1259  if (!s->avctx->rc_buffer_size)
1260  dst += INPLACE_OFFSET;
1261 
1262  if (src_stride == dst_stride)
1263  memcpy(dst, src, src_stride * h - src_stride + w);
1264  else {
1265  int h2 = h;
1266  uint8_t *dst2 = dst;
1267  while (h2--) {
1268  memcpy(dst2, src, w);
1269  dst2 += dst_stride;
1270  src += src_stride;
1271  }
1272  }
1273  if ((s->width & 15) || (s->height & (vpad-1))) {
1274  s->mpvencdsp.draw_edges(dst, dst_stride,
1275  w, h,
1276  16 >> h_shift,
1277  vpad >> v_shift,
1278  EDGE_BOTTOM);
1279  }
1280  }
1281  emms_c();
1282  }
1283  }
1284  ret = av_frame_copy_props(pic->f, pic_arg);
1285  if (ret < 0)
1286  return ret;
1287 
1288  pic->f->display_picture_number = display_picture_number;
1289  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1290  } else {
1291  /* Flushing: When we have not received enough input frames,
1292  * ensure s->input_picture[0] contains the first picture */
1293  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1294  if (s->input_picture[flush_offset])
1295  break;
1296 
1297  if (flush_offset <= 1)
1298  flush_offset = 1;
1299  else
1300  encoding_delay = encoding_delay - flush_offset + 1;
1301  }
1302 
1303  /* shift buffer entries */
1304  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1305  s->input_picture[i - flush_offset] = s->input_picture[i];
1306  for (int i = MAX_B_FRAMES + 1 - flush_offset; i <= MAX_B_FRAMES; i++)
1307  s->input_picture[i] = NULL;
1308 
1309  s->input_picture[encoding_delay] = (Picture*) pic;
1310 
1311  return 0;
1312 }
1313 
1315 {
1316  int x, y, plane;
1317  int score = 0;
1318  int64_t score64 = 0;
1319 
1320  for (plane = 0; plane < 3; plane++) {
1321  const int stride = p->f->linesize[plane];
1322  const int bw = plane ? 1 : 2;
1323  for (y = 0; y < s->mb_height * bw; y++) {
1324  for (x = 0; x < s->mb_width * bw; x++) {
1325  int off = p->shared ? 0 : 16;
1326  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1327  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1328  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1329 
1330  switch (FFABS(s->frame_skip_exp)) {
1331  case 0: score = FFMAX(score, v); break;
1332  case 1: score += FFABS(v); break;
1333  case 2: score64 += v * (int64_t)v; break;
1334  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1335  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1336  }
1337  }
1338  }
1339  }
1340  emms_c();
1341 
1342  if (score)
1343  score64 = score;
1344  if (s->frame_skip_exp < 0)
1345  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1346  -1.0/s->frame_skip_exp);
1347 
1348  if (score64 < s->frame_skip_threshold)
1349  return 1;
1350  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1351  return 1;
1352  return 0;
1353 }
1354 
1356 {
1357  AVPacket pkt = { 0 };
1358  int ret;
1359  int size = 0;
1360 
1361  av_init_packet(&pkt);
1362 
1363  ret = avcodec_send_frame(c, frame);
1364  if (ret < 0)
1365  return ret;
1366 
1367  do {
1368  ret = avcodec_receive_packet(c, &pkt);
1369  if (ret >= 0) {
1370  size += pkt.size;
1371  av_packet_unref(&pkt);
1372  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1373  return ret;
1374  } while (ret >= 0);
1375 
1376  return size;
1377 }
1378 
1380 {
1381  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1382  const int scale = s->brd_scale;
1383  int width = s->width >> scale;
1384  int height = s->height >> scale;
1385  int i, j, out_size, p_lambda, b_lambda, lambda2;
1386  int64_t best_rd = INT64_MAX;
1387  int best_b_count = -1;
1388  int ret = 0;
1389 
1390  av_assert0(scale >= 0 && scale <= 3);
1391 
1392  //emms_c();
1393  //s->next_picture_ptr->quality;
1394  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1395  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1396  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1397  if (!b_lambda) // FIXME we should do this somewhere else
1398  b_lambda = p_lambda;
1399  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1401 
1402  for (i = 0; i < s->max_b_frames + 2; i++) {
1403  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1404  s->next_picture_ptr;
1405  uint8_t *data[4];
1406 
1407  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1408  pre_input = *pre_input_ptr;
1409  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1410 
1411  if (!pre_input.shared && i) {
1412  data[0] += INPLACE_OFFSET;
1413  data[1] += INPLACE_OFFSET;
1414  data[2] += INPLACE_OFFSET;
1415  }
1416 
1417  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1418  s->tmp_frames[i]->linesize[0],
1419  data[0],
1420  pre_input.f->linesize[0],
1421  width, height);
1422  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1423  s->tmp_frames[i]->linesize[1],
1424  data[1],
1425  pre_input.f->linesize[1],
1426  width >> 1, height >> 1);
1427  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1428  s->tmp_frames[i]->linesize[2],
1429  data[2],
1430  pre_input.f->linesize[2],
1431  width >> 1, height >> 1);
1432  }
1433  }
1434 
1435  for (j = 0; j < s->max_b_frames + 1; j++) {
1436  AVCodecContext *c;
1437  int64_t rd = 0;
1438 
1439  if (!s->input_picture[j])
1440  break;
1441 
1443  if (!c)
1444  return AVERROR(ENOMEM);
1445 
1446  c->width = width;
1447  c->height = height;
1449  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1450  c->mb_decision = s->avctx->mb_decision;
1451  c->me_cmp = s->avctx->me_cmp;
1452  c->mb_cmp = s->avctx->mb_cmp;
1453  c->me_sub_cmp = s->avctx->me_sub_cmp;
1455  c->time_base = s->avctx->time_base;
1456  c->max_b_frames = s->max_b_frames;
1457 
1458  ret = avcodec_open2(c, codec, NULL);
1459  if (ret < 0)
1460  goto fail;
1461 
1463  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1464 
1465  out_size = encode_frame(c, s->tmp_frames[0]);
1466  if (out_size < 0) {
1467  ret = out_size;
1468  goto fail;
1469  }
1470 
1471  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1472 
1473  for (i = 0; i < s->max_b_frames + 1; i++) {
1474  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1475 
1476  s->tmp_frames[i + 1]->pict_type = is_p ?
1478  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1479 
1480  out_size = encode_frame(c, s->tmp_frames[i + 1]);
1481  if (out_size < 0) {
1482  ret = out_size;
1483  goto fail;
1484  }
1485 
1486  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1487  }
1488 
1489  /* get the delayed frames */
1490  out_size = encode_frame(c, NULL);
1491  if (out_size < 0) {
1492  ret = out_size;
1493  goto fail;
1494  }
1495  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1496 
1497  rd += c->error[0] + c->error[1] + c->error[2];
1498 
1499  if (rd < best_rd) {
1500  best_rd = rd;
1501  best_b_count = j;
1502  }
1503 
1504 fail:
1506  if (ret < 0)
1507  return ret;
1508  }
1509 
1510  return best_b_count;
1511 }
1512 
1514 {
1515  int i, ret;
1516 
1517  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1519  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1520 
1521  /* set next picture type & ordering */
1522  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1523  if (s->frame_skip_threshold || s->frame_skip_factor) {
1524  if (s->picture_in_gop_number < s->gop_size &&
1525  s->next_picture_ptr &&
1526  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1527  // FIXME check that the gop check above is +-1 correct
1528  av_frame_unref(s->input_picture[0]->f);
1529 
1530  ff_vbv_update(s, 0);
1531 
1532  goto no_output_pic;
1533  }
1534  }
1535 
1536  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1537  !s->next_picture_ptr || s->intra_only) {
1538  s->reordered_input_picture[0] = s->input_picture[0];
1541  s->coded_picture_number++;
1542  } else {
1543  int b_frames = 0;
1544 
1545  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1546  for (i = 0; i < s->max_b_frames + 1; i++) {
1547  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1548 
1549  if (pict_num >= s->rc_context.num_entries)
1550  break;
1551  if (!s->input_picture[i]) {
1552  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1553  break;
1554  }
1555 
1556  s->input_picture[i]->f->pict_type =
1557  s->rc_context.entry[pict_num].new_pict_type;
1558  }
1559  }
1560 
1561  if (s->b_frame_strategy == 0) {
1562  b_frames = s->max_b_frames;
1563  while (b_frames && !s->input_picture[b_frames])
1564  b_frames--;
1565  } else if (s->b_frame_strategy == 1) {
1566  for (i = 1; i < s->max_b_frames + 1; i++) {
1567  if (s->input_picture[i] &&
1568  s->input_picture[i]->b_frame_score == 0) {
1570  get_intra_count(s,
1571  s->input_picture[i ]->f->data[0],
1572  s->input_picture[i - 1]->f->data[0],
1573  s->linesize) + 1;
1574  }
1575  }
1576  for (i = 0; i < s->max_b_frames + 1; i++) {
1577  if (!s->input_picture[i] ||
1578  s->input_picture[i]->b_frame_score - 1 >
1579  s->mb_num / s->b_sensitivity)
1580  break;
1581  }
1582 
1583  b_frames = FFMAX(0, i - 1);
1584 
1585  /* reset scores */
1586  for (i = 0; i < b_frames + 1; i++) {
1587  s->input_picture[i]->b_frame_score = 0;
1588  }
1589  } else if (s->b_frame_strategy == 2) {
1590  b_frames = estimate_best_b_count(s);
1591  if (b_frames < 0)
1592  return b_frames;
1593  }
1594 
1595  emms_c();
1596 
1597  for (i = b_frames - 1; i >= 0; i--) {
1598  int type = s->input_picture[i]->f->pict_type;
1599  if (type && type != AV_PICTURE_TYPE_B)
1600  b_frames = i;
1601  }
1602  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1603  b_frames == s->max_b_frames) {
1605  "warning, too many B-frames in a row\n");
1606  }
1607 
1608  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1609  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1610  s->gop_size > s->picture_in_gop_number) {
1611  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1612  } else {
1614  b_frames = 0;
1615  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1616  }
1617  }
1618 
1619  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1620  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1621  b_frames--;
1622 
1623  s->reordered_input_picture[0] = s->input_picture[b_frames];
1627  s->coded_picture_number++;
1628  for (i = 0; i < b_frames; i++) {
1629  s->reordered_input_picture[i + 1] = s->input_picture[i];
1630  s->reordered_input_picture[i + 1]->f->pict_type =
1633  s->coded_picture_number++;
1634  }
1635  }
1636  }
1637 no_output_pic:
1639 
1640  if (s->reordered_input_picture[0]) {
1643  AV_PICTURE_TYPE_B ? 3 : 0;
1644 
1645  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1646  return ret;
1647 
1648  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1649  // input is a shared pix, so we can't modify it -> allocate a new
1650  // one & ensure that the shared one is reuseable
1651 
1652  Picture *pic;
1653  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1654  if (i < 0)
1655  return i;
1656  pic = &s->picture[i];
1657 
1659  if (alloc_picture(s, pic, 0) < 0) {
1660  return -1;
1661  }
1662 
1663  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1664  if (ret < 0)
1665  return ret;
1666 
1667  /* mark us unused / free shared pic */
1669  s->reordered_input_picture[0]->shared = 0;
1670 
1671  s->current_picture_ptr = pic;
1672  } else {
1673  // input is not a shared pix -> reuse buffer for current_pix
1675  for (i = 0; i < 4; i++) {
1676  s->new_picture.f->data[i] += INPLACE_OFFSET;
1677  }
1678  }
1680  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1681  s->current_picture_ptr)) < 0)
1682  return ret;
1683 
1685  }
1686  return 0;
1687 }
1688 
1689 static void frame_end(MpegEncContext *s)
1690 {
1691  if (s->unrestricted_mv &&
1693  !s->intra_only) {
1695  int hshift = desc->log2_chroma_w;
1696  int vshift = desc->log2_chroma_h;
1698  s->current_picture.f->linesize[0],
1699  s->h_edge_pos, s->v_edge_pos,
1701  EDGE_TOP | EDGE_BOTTOM);
1703  s->current_picture.f->linesize[1],
1704  s->h_edge_pos >> hshift,
1705  s->v_edge_pos >> vshift,
1706  EDGE_WIDTH >> hshift,
1707  EDGE_WIDTH >> vshift,
1708  EDGE_TOP | EDGE_BOTTOM);
1710  s->current_picture.f->linesize[2],
1711  s->h_edge_pos >> hshift,
1712  s->v_edge_pos >> vshift,
1713  EDGE_WIDTH >> hshift,
1714  EDGE_WIDTH >> vshift,
1715  EDGE_TOP | EDGE_BOTTOM);
1716  }
1717 
1718  emms_c();
1719 
1720  s->last_pict_type = s->pict_type;
1722  if (s->pict_type!= AV_PICTURE_TYPE_B)
1724 
1725 #if FF_API_CODED_FRAME
1730 #endif
1731 #if FF_API_ERROR_FRAME
1734  sizeof(s->current_picture.encoding_error));
1736 #endif
1737 }
1738 
1740 {
1741  int intra, i;
1742 
1743  for (intra = 0; intra < 2; intra++) {
1744  if (s->dct_count[intra] > (1 << 16)) {
1745  for (i = 0; i < 64; i++) {
1746  s->dct_error_sum[intra][i] >>= 1;
1747  }
1748  s->dct_count[intra] >>= 1;
1749  }
1750 
1751  for (i = 0; i < 64; i++) {
1752  s->dct_offset[intra][i] = (s->noise_reduction *
1753  s->dct_count[intra] +
1754  s->dct_error_sum[intra][i] / 2) /
1755  (s->dct_error_sum[intra][i] + 1);
1756  }
1757  }
1758 }
1759 
1761 {
1762  int ret;
1763 
1764  /* mark & release old frames */
1765  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1767  s->last_picture_ptr->f->buf[0]) {
1769  }
1770 
1773 
1775  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1776  s->current_picture_ptr)) < 0)
1777  return ret;
1778 
1779  if (s->pict_type != AV_PICTURE_TYPE_B) {
1781  if (!s->droppable)
1783  }
1784 
1785  if (s->last_picture_ptr) {
1787  if (s->last_picture_ptr->f->buf[0] &&
1788  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1789  s->last_picture_ptr)) < 0)
1790  return ret;
1791  }
1792  if (s->next_picture_ptr) {
1794  if (s->next_picture_ptr->f->buf[0] &&
1795  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1796  s->next_picture_ptr)) < 0)
1797  return ret;
1798  }
1799 
1800  if (s->picture_structure!= PICT_FRAME) {
1801  int i;
1802  for (i = 0; i < 4; i++) {
1804  s->current_picture.f->data[i] +=
1805  s->current_picture.f->linesize[i];
1806  }
1807  s->current_picture.f->linesize[i] *= 2;
1808  s->last_picture.f->linesize[i] *= 2;
1809  s->next_picture.f->linesize[i] *= 2;
1810  }
1811  }
1812 
1813  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1816  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1819  } else {
1822  }
1823 
1824  if (s->dct_error_sum) {
1827  }
1828 
1829  return 0;
1830 }
1831 
1833  const AVFrame *pic_arg, int *got_packet)
1834 {
1835  MpegEncContext *s = avctx->priv_data;
1836  int i, stuffing_count, ret;
1837  int context_count = s->slice_context_count;
1838 
1839  s->vbv_ignore_qmax = 0;
1840 
1841  s->picture_in_gop_number++;
1842 
1843  if (load_input_picture(s, pic_arg) < 0)
1844  return -1;
1845 
1846  if (select_input_picture(s) < 0) {
1847  return -1;
1848  }
1849 
1850  /* output? */
1851  if (s->new_picture.f->data[0]) {
1852  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1853  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1854  :
1855  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1856  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1857  return ret;
1858  if (s->mb_info) {
1861  s->mb_width*s->mb_height*12);
1862  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1863  }
1864 
1865  for (i = 0; i < context_count; i++) {
1866  int start_y = s->thread_context[i]->start_mb_y;
1867  int end_y = s->thread_context[i]-> end_mb_y;
1868  int h = s->mb_height;
1869  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1870  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1871 
1872  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1873  }
1874 
1875  s->pict_type = s->new_picture.f->pict_type;
1876  //emms_c();
1877  ret = frame_start(s);
1878  if (ret < 0)
1879  return ret;
1880 vbv_retry:
1881  ret = encode_picture(s, s->picture_number);
1882  if (growing_buffer) {
1883  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1884  pkt->data = s->pb.buf;
1885  pkt->size = avctx->internal->byte_buffer_size;
1886  }
1887  if (ret < 0)
1888  return -1;
1889 
1890 #if FF_API_STAT_BITS
1892  avctx->header_bits = s->header_bits;
1893  avctx->mv_bits = s->mv_bits;
1894  avctx->misc_bits = s->misc_bits;
1895  avctx->i_tex_bits = s->i_tex_bits;
1896  avctx->p_tex_bits = s->p_tex_bits;
1897  avctx->i_count = s->i_count;
1898  // FIXME f/b_count in avctx
1899  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1900  avctx->skip_count = s->skip_count;
1902 #endif
1903 
1904  frame_end(s);
1905 
1908 
1909  if (avctx->rc_buffer_size) {
1910  RateControlContext *rcc = &s->rc_context;
1911  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1912  int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1913  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1914 
1915  if (put_bits_count(&s->pb) > max_size &&
1916  s->lambda < s->lmax) {
1917  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1918  (s->qscale + 1) / s->qscale);
1919  if (s->adaptive_quant) {
1920  int i;
1921  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1922  s->lambda_table[i] =
1923  FFMAX(s->lambda_table[i] + min_step,
1924  s->lambda_table[i] * (s->qscale + 1) /
1925  s->qscale);
1926  }
1927  s->mb_skipped = 0; // done in frame_start()
1928  // done in encode_picture() so we must undo it
1929  if (s->pict_type == AV_PICTURE_TYPE_P) {
1930  if (s->flipflop_rounding ||
1931  s->codec_id == AV_CODEC_ID_H263P ||
1933  s->no_rounding ^= 1;
1934  }
1935  if (s->pict_type != AV_PICTURE_TYPE_B) {
1936  s->time_base = s->last_time_base;
1937  s->last_non_b_time = s->time - s->pp_time;
1938  }
1939  for (i = 0; i < context_count; i++) {
1940  PutBitContext *pb = &s->thread_context[i]->pb;
1941  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1942  }
1943  s->vbv_ignore_qmax = 1;
1944  av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1945  goto vbv_retry;
1946  }
1947 
1949  }
1950 
1951  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1953 
1954  for (i = 0; i < 4; i++) {
1956  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1957  }
1960  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1961  s->pict_type);
1962 
1963  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1964  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1965  s->misc_bits + s->i_tex_bits +
1966  s->p_tex_bits);
1967  flush_put_bits(&s->pb);
1968  s->frame_bits = put_bits_count(&s->pb);
1969 
1970  stuffing_count = ff_vbv_update(s, s->frame_bits);
1971  s->stuffing_bits = 8*stuffing_count;
1972  if (stuffing_count) {
1973  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1974  stuffing_count + 50) {
1975  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1976  return -1;
1977  }
1978 
1979  switch (s->codec_id) {
1982  while (stuffing_count--) {
1983  put_bits(&s->pb, 8, 0);
1984  }
1985  break;
1986  case AV_CODEC_ID_MPEG4:
1987  put_bits(&s->pb, 16, 0);
1988  put_bits(&s->pb, 16, 0x1C3);
1989  stuffing_count -= 4;
1990  while (stuffing_count--) {
1991  put_bits(&s->pb, 8, 0xFF);
1992  }
1993  break;
1994  default:
1995  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1996  }
1997  flush_put_bits(&s->pb);
1998  s->frame_bits = put_bits_count(&s->pb);
1999  }
2000 
2001  /* update MPEG-1/2 vbv_delay for CBR */
2002  if (s->avctx->rc_max_rate &&
2003  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2004  s->out_format == FMT_MPEG1 &&
2005  90000LL * (avctx->rc_buffer_size - 1) <=
2006  s->avctx->rc_max_rate * 0xFFFFLL) {
2007  AVCPBProperties *props;
2008  size_t props_size;
2009 
2010  int vbv_delay, min_delay;
2011  double inbits = s->avctx->rc_max_rate *
2012  av_q2d(s->avctx->time_base);
2013  int minbits = s->frame_bits - 8 *
2014  (s->vbv_delay_ptr - s->pb.buf - 1);
2015  double bits = s->rc_context.buffer_index + minbits - inbits;
2016 
2017  if (bits < 0)
2019  "Internal error, negative bits\n");
2020 
2021  av_assert1(s->repeat_first_field == 0);
2022 
2023  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2024  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2025  s->avctx->rc_max_rate;
2026 
2027  vbv_delay = FFMAX(vbv_delay, min_delay);
2028 
2029  av_assert0(vbv_delay < 0xFFFF);
2030 
2031  s->vbv_delay_ptr[0] &= 0xF8;
2032  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2033  s->vbv_delay_ptr[1] = vbv_delay >> 5;
2034  s->vbv_delay_ptr[2] &= 0x07;
2035  s->vbv_delay_ptr[2] |= vbv_delay << 3;
2036 
2037  props = av_cpb_properties_alloc(&props_size);
2038  if (!props)
2039  return AVERROR(ENOMEM);
2040  props->vbv_delay = vbv_delay * 300;
2041 
2043  (uint8_t*)props, props_size);
2044  if (ret < 0) {
2045  av_freep(&props);
2046  return ret;
2047  }
2048 
2049 #if FF_API_VBV_DELAY
2051  avctx->vbv_delay = vbv_delay * 300;
2053 #endif
2054  }
2055  s->total_bits += s->frame_bits;
2056 #if FF_API_STAT_BITS
2058  avctx->frame_bits = s->frame_bits;
2060 #endif
2061 
2062 
2063  pkt->pts = s->current_picture.f->pts;
2064  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2066  pkt->dts = pkt->pts - s->dts_delta;
2067  else
2068  pkt->dts = s->reordered_pts;
2069  s->reordered_pts = pkt->pts;
2070  } else
2071  pkt->dts = pkt->pts;
2072  if (s->current_picture.f->key_frame)
2073  pkt->flags |= AV_PKT_FLAG_KEY;
2074  if (s->mb_info)
2076  } else {
2077  s->frame_bits = 0;
2078  }
2079 
2080  /* release non-reference frames */
2081  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2082  if (!s->picture[i].reference)
2083  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2084  }
2085 
2086  av_assert1((s->frame_bits & 7) == 0);
2087 
2088  pkt->size = s->frame_bits / 8;
2089  *got_packet = !!pkt->size;
2090  return 0;
2091 }
2092 
2094  int n, int threshold)
2095 {
2096  static const char tab[64] = {
2097  3, 2, 2, 1, 1, 1, 1, 1,
2098  1, 1, 1, 1, 1, 1, 1, 1,
2099  1, 1, 1, 1, 1, 1, 1, 1,
2100  0, 0, 0, 0, 0, 0, 0, 0,
2101  0, 0, 0, 0, 0, 0, 0, 0,
2102  0, 0, 0, 0, 0, 0, 0, 0,
2103  0, 0, 0, 0, 0, 0, 0, 0,
2104  0, 0, 0, 0, 0, 0, 0, 0
2105  };
2106  int score = 0;
2107  int run = 0;
2108  int i;
2109  int16_t *block = s->block[n];
2110  const int last_index = s->block_last_index[n];
2111  int skip_dc;
2112 
2113  if (threshold < 0) {
2114  skip_dc = 0;
2115  threshold = -threshold;
2116  } else
2117  skip_dc = 1;
2118 
2119  /* Are all we could set to zero already zero? */
2120  if (last_index <= skip_dc - 1)
2121  return;
2122 
2123  for (i = 0; i <= last_index; i++) {
2124  const int j = s->intra_scantable.permutated[i];
2125  const int level = FFABS(block[j]);
2126  if (level == 1) {
2127  if (skip_dc && i == 0)
2128  continue;
2129  score += tab[run];
2130  run = 0;
2131  } else if (level > 1) {
2132  return;
2133  } else {
2134  run++;
2135  }
2136  }
2137  if (score >= threshold)
2138  return;
2139  for (i = skip_dc; i <= last_index; i++) {
2140  const int j = s->intra_scantable.permutated[i];
2141  block[j] = 0;
2142  }
2143  if (block[0])
2144  s->block_last_index[n] = 0;
2145  else
2146  s->block_last_index[n] = -1;
2147 }
2148 
2149 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2150  int last_index)
2151 {
2152  int i;
2153  const int maxlevel = s->max_qcoeff;
2154  const int minlevel = s->min_qcoeff;
2155  int overflow = 0;
2156 
2157  if (s->mb_intra) {
2158  i = 1; // skip clipping of intra dc
2159  } else
2160  i = 0;
2161 
2162  for (; i <= last_index; i++) {
2163  const int j = s->intra_scantable.permutated[i];
2164  int level = block[j];
2165 
2166  if (level > maxlevel) {
2167  level = maxlevel;
2168  overflow++;
2169  } else if (level < minlevel) {
2170  level = minlevel;
2171  overflow++;
2172  }
2173 
2174  block[j] = level;
2175  }
2176 
2177  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2178  av_log(s->avctx, AV_LOG_INFO,
2179  "warning, clipping %d dct coefficients to %d..%d\n",
2180  overflow, minlevel, maxlevel);
2181 }
2182 
2183 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2184 {
2185  int x, y;
2186  // FIXME optimize
2187  for (y = 0; y < 8; y++) {
2188  for (x = 0; x < 8; x++) {
2189  int x2, y2;
2190  int sum = 0;
2191  int sqr = 0;
2192  int count = 0;
2193 
2194  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2195  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2196  int v = ptr[x2 + y2 * stride];
2197  sum += v;
2198  sqr += v * v;
2199  count++;
2200  }
2201  }
2202  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2203  }
2204  }
2205 }
2206 
2208  int motion_x, int motion_y,
2209  int mb_block_height,
2210  int mb_block_width,
2211  int mb_block_count)
2212 {
2213  int16_t weight[12][64];
2214  int16_t orig[12][64];
2215  const int mb_x = s->mb_x;
2216  const int mb_y = s->mb_y;
2217  int i;
2218  int skip_dct[12];
2219  int dct_offset = s->linesize * 8; // default for progressive frames
2220  int uv_dct_offset = s->uvlinesize * 8;
2221  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2222  ptrdiff_t wrap_y, wrap_c;
2223 
2224  for (i = 0; i < mb_block_count; i++)
2225  skip_dct[i] = s->skipdct;
2226 
2227  if (s->adaptive_quant) {
2228  const int last_qp = s->qscale;
2229  const int mb_xy = mb_x + mb_y * s->mb_stride;
2230 
2231  s->lambda = s->lambda_table[mb_xy];
2232  update_qscale(s);
2233 
2234  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2235  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2236  s->dquant = s->qscale - last_qp;
2237 
2238  if (s->out_format == FMT_H263) {
2239  s->dquant = av_clip(s->dquant, -2, 2);
2240 
2241  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2242  if (!s->mb_intra) {
2243  if (s->pict_type == AV_PICTURE_TYPE_B) {
2244  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2245  s->dquant = 0;
2246  }
2247  if (s->mv_type == MV_TYPE_8X8)
2248  s->dquant = 0;
2249  }
2250  }
2251  }
2252  }
2253  ff_set_qscale(s, last_qp + s->dquant);
2254  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2255  ff_set_qscale(s, s->qscale + s->dquant);
2256 
2257  wrap_y = s->linesize;
2258  wrap_c = s->uvlinesize;
2259  ptr_y = s->new_picture.f->data[0] +
2260  (mb_y * 16 * wrap_y) + mb_x * 16;
2261  ptr_cb = s->new_picture.f->data[1] +
2262  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2263  ptr_cr = s->new_picture.f->data[2] +
2264  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2265 
2266  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2267  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2268  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2269  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2270  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2271  wrap_y, wrap_y,
2272  16, 16, mb_x * 16, mb_y * 16,
2273  s->width, s->height);
2274  ptr_y = ebuf;
2275  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2276  wrap_c, wrap_c,
2277  mb_block_width, mb_block_height,
2278  mb_x * mb_block_width, mb_y * mb_block_height,
2279  cw, ch);
2280  ptr_cb = ebuf + 16 * wrap_y;
2281  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2282  wrap_c, wrap_c,
2283  mb_block_width, mb_block_height,
2284  mb_x * mb_block_width, mb_y * mb_block_height,
2285  cw, ch);
2286  ptr_cr = ebuf + 16 * wrap_y + 16;
2287  }
2288 
2289  if (s->mb_intra) {
2291  int progressive_score, interlaced_score;
2292 
2293  s->interlaced_dct = 0;
2294  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2295  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2296  NULL, wrap_y, 8) - 400;
2297 
2298  if (progressive_score > 0) {
2299  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2300  NULL, wrap_y * 2, 8) +
2301  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2302  NULL, wrap_y * 2, 8);
2303  if (progressive_score > interlaced_score) {
2304  s->interlaced_dct = 1;
2305 
2306  dct_offset = wrap_y;
2307  uv_dct_offset = wrap_c;
2308  wrap_y <<= 1;
2309  if (s->chroma_format == CHROMA_422 ||
2310  s->chroma_format == CHROMA_444)
2311  wrap_c <<= 1;
2312  }
2313  }
2314  }
2315 
2316  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2317  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2318  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2319  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2320 
2321  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2322  skip_dct[4] = 1;
2323  skip_dct[5] = 1;
2324  } else {
2325  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2326  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2327  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2328  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2329  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2330  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2331  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2332  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2333  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2334  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2335  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2336  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2337  }
2338  }
2339  } else {
2340  op_pixels_func (*op_pix)[4];
2341  qpel_mc_func (*op_qpix)[16];
2342  uint8_t *dest_y, *dest_cb, *dest_cr;
2343 
2344  dest_y = s->dest[0];
2345  dest_cb = s->dest[1];
2346  dest_cr = s->dest[2];
2347 
2348  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2349  op_pix = s->hdsp.put_pixels_tab;
2350  op_qpix = s->qdsp.put_qpel_pixels_tab;
2351  } else {
2352  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2353  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2354  }
2355 
2356  if (s->mv_dir & MV_DIR_FORWARD) {
2357  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2358  s->last_picture.f->data,
2359  op_pix, op_qpix);
2360  op_pix = s->hdsp.avg_pixels_tab;
2361  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2362  }
2363  if (s->mv_dir & MV_DIR_BACKWARD) {
2364  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2365  s->next_picture.f->data,
2366  op_pix, op_qpix);
2367  }
2368 
2370  int progressive_score, interlaced_score;
2371 
2372  s->interlaced_dct = 0;
2373  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2374  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2375  ptr_y + wrap_y * 8,
2376  wrap_y, 8) - 400;
2377 
2378  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2379  progressive_score -= 400;
2380 
2381  if (progressive_score > 0) {
2382  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2383  wrap_y * 2, 8) +
2384  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2385  ptr_y + wrap_y,
2386  wrap_y * 2, 8);
2387 
2388  if (progressive_score > interlaced_score) {
2389  s->interlaced_dct = 1;
2390 
2391  dct_offset = wrap_y;
2392  uv_dct_offset = wrap_c;
2393  wrap_y <<= 1;
2394  if (s->chroma_format == CHROMA_422)
2395  wrap_c <<= 1;
2396  }
2397  }
2398  }
2399 
2400  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2401  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2402  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2403  dest_y + dct_offset, wrap_y);
2404  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2405  dest_y + dct_offset + 8, wrap_y);
2406 
2407  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2408  skip_dct[4] = 1;
2409  skip_dct[5] = 1;
2410  } else {
2411  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2412  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2413  if (!s->chroma_y_shift) { /* 422 */
2414  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2415  dest_cb + uv_dct_offset, wrap_c);
2416  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2417  dest_cr + uv_dct_offset, wrap_c);
2418  }
2419  }
2420  /* pre quantization */
2421  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2422  2 * s->qscale * s->qscale) {
2423  // FIXME optimize
2424  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2425  skip_dct[0] = 1;
2426  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2427  skip_dct[1] = 1;
2428  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2429  wrap_y, 8) < 20 * s->qscale)
2430  skip_dct[2] = 1;
2431  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2432  wrap_y, 8) < 20 * s->qscale)
2433  skip_dct[3] = 1;
2434  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2435  skip_dct[4] = 1;
2436  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2437  skip_dct[5] = 1;
2438  if (!s->chroma_y_shift) { /* 422 */
2439  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2440  dest_cb + uv_dct_offset,
2441  wrap_c, 8) < 20 * s->qscale)
2442  skip_dct[6] = 1;
2443  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2444  dest_cr + uv_dct_offset,
2445  wrap_c, 8) < 20 * s->qscale)
2446  skip_dct[7] = 1;
2447  }
2448  }
2449  }
2450 
2451  if (s->quantizer_noise_shaping) {
2452  if (!skip_dct[0])
2453  get_visual_weight(weight[0], ptr_y , wrap_y);
2454  if (!skip_dct[1])
2455  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2456  if (!skip_dct[2])
2457  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2458  if (!skip_dct[3])
2459  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2460  if (!skip_dct[4])
2461  get_visual_weight(weight[4], ptr_cb , wrap_c);
2462  if (!skip_dct[5])
2463  get_visual_weight(weight[5], ptr_cr , wrap_c);
2464  if (!s->chroma_y_shift) { /* 422 */
2465  if (!skip_dct[6])
2466  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2467  wrap_c);
2468  if (!skip_dct[7])
2469  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2470  wrap_c);
2471  }
2472  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2473  }
2474 
2475  /* DCT & quantize */
2476  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2477  {
2478  for (i = 0; i < mb_block_count; i++) {
2479  if (!skip_dct[i]) {
2480  int overflow;
2481  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2482  // FIXME we could decide to change to quantizer instead of
2483  // clipping
2484  // JS: I don't think that would be a good idea it could lower
2485  // quality instead of improve it. Just INTRADC clipping
2486  // deserves changes in quantizer
2487  if (overflow)
2488  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2489  } else
2490  s->block_last_index[i] = -1;
2491  }
2492  if (s->quantizer_noise_shaping) {
2493  for (i = 0; i < mb_block_count; i++) {
2494  if (!skip_dct[i]) {
2495  s->block_last_index[i] =
2496  dct_quantize_refine(s, s->block[i], weight[i],
2497  orig[i], i, s->qscale);
2498  }
2499  }
2500  }
2501 
2502  if (s->luma_elim_threshold && !s->mb_intra)
2503  for (i = 0; i < 4; i++)
2505  if (s->chroma_elim_threshold && !s->mb_intra)
2506  for (i = 4; i < mb_block_count; i++)
2508 
2509  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2510  for (i = 0; i < mb_block_count; i++) {
2511  if (s->block_last_index[i] == -1)
2512  s->coded_score[i] = INT_MAX / 256;
2513  }
2514  }
2515  }
2516 
2517  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2518  s->block_last_index[4] =
2519  s->block_last_index[5] = 0;
2520  s->block[4][0] =
2521  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2522  if (!s->chroma_y_shift) { /* 422 / 444 */
2523  for (i=6; i<12; i++) {
2524  s->block_last_index[i] = 0;
2525  s->block[i][0] = s->block[4][0];
2526  }
2527  }
2528  }
2529 
2530  // non c quantize code returns incorrect block_last_index FIXME
2531  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2532  for (i = 0; i < mb_block_count; i++) {
2533  int j;
2534  if (s->block_last_index[i] > 0) {
2535  for (j = 63; j > 0; j--) {
2536  if (s->block[i][s->intra_scantable.permutated[j]])
2537  break;
2538  }
2539  s->block_last_index[i] = j;
2540  }
2541  }
2542  }
2543 
2544  /* huffman encode */
2545  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2549  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2550  break;
2551  case AV_CODEC_ID_MPEG4:
2553  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2554  break;
2555  case AV_CODEC_ID_MSMPEG4V2:
2556  case AV_CODEC_ID_MSMPEG4V3:
2557  case AV_CODEC_ID_WMV1:
2559  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2560  break;
2561  case AV_CODEC_ID_WMV2:
2562  if (CONFIG_WMV2_ENCODER)
2563  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2564  break;
2565  case AV_CODEC_ID_H261:
2566  if (CONFIG_H261_ENCODER)
2567  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2568  break;
2569  case AV_CODEC_ID_H263:
2570  case AV_CODEC_ID_H263P:
2571  case AV_CODEC_ID_FLV1:
2572  case AV_CODEC_ID_RV10:
2573  case AV_CODEC_ID_RV20:
2574  if (CONFIG_H263_ENCODER)
2575  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2576  break;
2577  case AV_CODEC_ID_MJPEG:
2578  case AV_CODEC_ID_AMV:
2580  ff_mjpeg_encode_mb(s, s->block);
2581  break;
2582  default:
2583  av_assert1(0);
2584  }
2585 }
2586 
2587 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2588 {
2589  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2590  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2591  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2592 }
2593 
2595  int i;
2596 
2597  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2598 
2599  /* MPEG-1 */
2600  d->mb_skip_run= s->mb_skip_run;
2601  for(i=0; i<3; i++)
2602  d->last_dc[i] = s->last_dc[i];
2603 
2604  /* statistics */
2605  d->mv_bits= s->mv_bits;
2606  d->i_tex_bits= s->i_tex_bits;
2607  d->p_tex_bits= s->p_tex_bits;
2608  d->i_count= s->i_count;
2609  d->f_count= s->f_count;
2610  d->b_count= s->b_count;
2611  d->skip_count= s->skip_count;
2612  d->misc_bits= s->misc_bits;
2613  d->last_bits= 0;
2614 
2615  d->mb_skipped= 0;
2616  d->qscale= s->qscale;
2617  d->dquant= s->dquant;
2618 
2620 }
2621 
2623  int i;
2624 
2625  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2626  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2627 
2628  /* MPEG-1 */
2629  d->mb_skip_run= s->mb_skip_run;
2630  for(i=0; i<3; i++)
2631  d->last_dc[i] = s->last_dc[i];
2632 
2633  /* statistics */
2634  d->mv_bits= s->mv_bits;
2635  d->i_tex_bits= s->i_tex_bits;
2636  d->p_tex_bits= s->p_tex_bits;
2637  d->i_count= s->i_count;
2638  d->f_count= s->f_count;
2639  d->b_count= s->b_count;
2640  d->skip_count= s->skip_count;
2641  d->misc_bits= s->misc_bits;
2642 
2643  d->mb_intra= s->mb_intra;
2644  d->mb_skipped= s->mb_skipped;
2645  d->mv_type= s->mv_type;
2646  d->mv_dir= s->mv_dir;
2647  d->pb= s->pb;
2648  if(s->data_partitioning){
2649  d->pb2= s->pb2;
2650  d->tex_pb= s->tex_pb;
2651  }
2652  d->block= s->block;
2653  for(i=0; i<8; i++)
2654  d->block_last_index[i]= s->block_last_index[i];
2656  d->qscale= s->qscale;
2657 
2659 }
2660 
2661 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2663  int *dmin, int *next_block, int motion_x, int motion_y)
2664 {
2665  int score;
2666  uint8_t *dest_backup[3];
2667 
2668  copy_context_before_encode(s, backup, type);
2669 
2670  s->block= s->blocks[*next_block];
2671  s->pb= pb[*next_block];
2672  if(s->data_partitioning){
2673  s->pb2 = pb2 [*next_block];
2674  s->tex_pb= tex_pb[*next_block];
2675  }
2676 
2677  if(*next_block){
2678  memcpy(dest_backup, s->dest, sizeof(s->dest));
2679  s->dest[0] = s->sc.rd_scratchpad;
2680  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2681  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2682  av_assert0(s->linesize >= 32); //FIXME
2683  }
2684 
2685  encode_mb(s, motion_x, motion_y);
2686 
2687  score= put_bits_count(&s->pb);
2688  if(s->data_partitioning){
2689  score+= put_bits_count(&s->pb2);
2690  score+= put_bits_count(&s->tex_pb);
2691  }
2692 
2693  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2695 
2696  score *= s->lambda2;
2697  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2698  }
2699 
2700  if(*next_block){
2701  memcpy(s->dest, dest_backup, sizeof(s->dest));
2702  }
2703 
2704  if(score<*dmin){
2705  *dmin= score;
2706  *next_block^=1;
2707 
2708  copy_context_after_encode(best, s, type);
2709  }
2710 }
2711 
2712 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2713  const uint32_t *sq = ff_square_tab + 256;
2714  int acc=0;
2715  int x,y;
2716 
2717  if(w==16 && h==16)
2718  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2719  else if(w==8 && h==8)
2720  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2721 
2722  for(y=0; y<h; y++){
2723  for(x=0; x<w; x++){
2724  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2725  }
2726  }
2727 
2728  av_assert2(acc>=0);
2729 
2730  return acc;
2731 }
2732 
2733 static int sse_mb(MpegEncContext *s){
2734  int w= 16;
2735  int h= 16;
2736 
2737  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2738  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2739 
2740  if(w==16 && h==16)
2741  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2742  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2743  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2744  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2745  }else{
2746  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2747  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2748  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2749  }
2750  else
2751  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2752  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2753  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2754 }
2755 
2757  MpegEncContext *s= *(void**)arg;
2758 
2759 
2760  s->me.pre_pass=1;
2761  s->me.dia_size= s->avctx->pre_dia_size;
2762  s->first_slice_line=1;
2763  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2764  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2766  }
2767  s->first_slice_line=0;
2768  }
2769 
2770  s->me.pre_pass=0;
2771 
2772  return 0;
2773 }
2774 
2776  MpegEncContext *s= *(void**)arg;
2777 
2779 
2780  s->me.dia_size= s->avctx->dia_size;
2781  s->first_slice_line=1;
2782  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2783  s->mb_x=0; //for block init below
2785  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2786  s->block_index[0]+=2;
2787  s->block_index[1]+=2;
2788  s->block_index[2]+=2;
2789  s->block_index[3]+=2;
2790 
2791  /* compute motion vector & mb_type and store in context */
2794  else
2796  }
2797  s->first_slice_line=0;
2798  }
2799  return 0;
2800 }
2801 
2802 static int mb_var_thread(AVCodecContext *c, void *arg){
2803  MpegEncContext *s= *(void**)arg;
2804  int mb_x, mb_y;
2805 
2807 
2808  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2809  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2810  int xx = mb_x * 16;
2811  int yy = mb_y * 16;
2812  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2813  int varc;
2814  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2815 
2816  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2817  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2818 
2819  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2820  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2821  s->me.mb_var_sum_temp += varc;
2822  }
2823  }
2824  return 0;
2825 }
2826 
2829  if(s->partitioned_frame){
2831  }
2832 
2833  ff_mpeg4_stuffing(&s->pb);
2834  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2836  }
2837 
2839  flush_put_bits(&s->pb);
2840 
2841  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2842  s->misc_bits+= get_bits_diff(s);
2843 }
2844 
2846 {
2847  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2848  int offset = put_bits_count(&s->pb);
2849  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2850  int gobn = s->mb_y / s->gob_index;
2851  int pred_x, pred_y;
2852  if (CONFIG_H263_ENCODER)
2853  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2854  bytestream_put_le32(&ptr, offset);
2855  bytestream_put_byte(&ptr, s->qscale);
2856  bytestream_put_byte(&ptr, gobn);
2857  bytestream_put_le16(&ptr, mba);
2858  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2859  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2860  /* 4MV not implemented */
2861  bytestream_put_byte(&ptr, 0); /* hmv2 */
2862  bytestream_put_byte(&ptr, 0); /* vmv2 */
2863 }
2864 
2865 static void update_mb_info(MpegEncContext *s, int startcode)
2866 {
2867  if (!s->mb_info)
2868  return;
2869  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2870  s->mb_info_size += 12;
2871  s->prev_mb_info = s->last_mb_info;
2872  }
2873  if (startcode) {
2874  s->prev_mb_info = put_bits_count(&s->pb)/8;
2875  /* This might have incremented mb_info_size above, and we return without
2876  * actually writing any info into that slot yet. But in that case,
2877  * this will be called again at the start of the after writing the
2878  * start code, actually writing the mb info. */
2879  return;
2880  }
2881 
2882  s->last_mb_info = put_bits_count(&s->pb)/8;
2883  if (!s->mb_info_size)
2884  s->mb_info_size += 12;
2885  write_mb_info(s);
2886 }
2887 
2888 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2889 {
2890  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2891  && s->slice_context_count == 1
2892  && s->pb.buf == s->avctx->internal->byte_buffer) {
2893  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2894  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2895 
2896  uint8_t *new_buffer = NULL;
2897  int new_buffer_size = 0;
2898 
2899  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2900  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2901  return AVERROR(ENOMEM);
2902  }
2903 
2904  emms_c();
2905 
2906  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2907  s->avctx->internal->byte_buffer_size + size_increase);
2908  if (!new_buffer)
2909  return AVERROR(ENOMEM);
2910 
2911  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2913  s->avctx->internal->byte_buffer = new_buffer;
2914  s->avctx->internal->byte_buffer_size = new_buffer_size;
2915  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2916  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2917  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2918  }
2919  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2920  return AVERROR(EINVAL);
2921  return 0;
2922 }
2923 
2924 static int encode_thread(AVCodecContext *c, void *arg){
2925  MpegEncContext *s= *(void**)arg;
2926  int mb_x, mb_y;
2927  int chr_h= 16>>s->chroma_y_shift;
2928  int i, j;
2929  MpegEncContext best_s = { 0 }, backup_s;
2930  uint8_t bit_buf[2][MAX_MB_BYTES];
2931  uint8_t bit_buf2[2][MAX_MB_BYTES];
2932  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2933  PutBitContext pb[2], pb2[2], tex_pb[2];
2934 
2936 
2937  for(i=0; i<2; i++){
2938  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2939  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2940  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2941  }
2942 
2943  s->last_bits= put_bits_count(&s->pb);
2944  s->mv_bits=0;
2945  s->misc_bits=0;
2946  s->i_tex_bits=0;
2947  s->p_tex_bits=0;
2948  s->i_count=0;
2949  s->f_count=0;
2950  s->b_count=0;
2951  s->skip_count=0;
2952 
2953  for(i=0; i<3; i++){
2954  /* init last dc values */
2955  /* note: quant matrix value (8) is implied here */
2956  s->last_dc[i] = 128 << s->intra_dc_precision;
2957 
2959  }
2960  if(s->codec_id==AV_CODEC_ID_AMV){
2961  s->last_dc[0] = 128*8/13;
2962  s->last_dc[1] = 128*8/14;
2963  s->last_dc[2] = 128*8/14;
2964  }
2965  s->mb_skip_run = 0;
2966  memset(s->last_mv, 0, sizeof(s->last_mv));
2967 
2968  s->last_mv_dir = 0;
2969 
2970  switch(s->codec_id){
2971  case AV_CODEC_ID_H263:
2972  case AV_CODEC_ID_H263P:
2973  case AV_CODEC_ID_FLV1:
2974  if (CONFIG_H263_ENCODER)
2975  s->gob_index = H263_GOB_HEIGHT(s->height);
2976  break;
2977  case AV_CODEC_ID_MPEG4:
2980  break;
2981  }
2982 
2983  s->resync_mb_x=0;
2984  s->resync_mb_y=0;
2985  s->first_slice_line = 1;
2986  s->ptr_lastgob = s->pb.buf;
2987  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2988  s->mb_x=0;
2989  s->mb_y= mb_y;
2990 
2991  ff_set_qscale(s, s->qscale);
2993 
2994  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2995  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2996  int mb_type= s->mb_type[xy];
2997 // int d;
2998  int dmin= INT_MAX;
2999  int dir;
3000  int size_increase = s->avctx->internal->byte_buffer_size/4
3001  + s->mb_width*MAX_MB_BYTES;
3002 
3003  ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3004  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3005  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3006  return -1;
3007  }
3008  if(s->data_partitioning){
3009  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3010  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3011  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3012  return -1;
3013  }
3014  }
3015 
3016  s->mb_x = mb_x;
3017  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3019 
3022  xy= s->mb_y*s->mb_stride + s->mb_x;
3023  mb_type= s->mb_type[xy];
3024  }
3025 
3026  /* write gob / video packet header */
3027  if(s->rtp_mode){
3028  int current_packet_size, is_gob_start;
3029 
3030  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3031 
3032  is_gob_start = s->rtp_payload_size &&
3033  current_packet_size >= s->rtp_payload_size &&
3034  mb_y + mb_x > 0;
3035 
3036  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3037 
3038  switch(s->codec_id){
3039  case AV_CODEC_ID_H263:
3040  case AV_CODEC_ID_H263P:
3041  if(!s->h263_slice_structured)
3042  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3043  break;
3045  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3047  if(s->mb_skip_run) is_gob_start=0;
3048  break;
3049  case AV_CODEC_ID_MJPEG:
3050  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3051  break;
3052  }
3053 
3054  if(is_gob_start){
3055  if(s->start_mb_y != mb_y || mb_x!=0){
3056  write_slice_end(s);
3057 
3060  }
3061  }
3062 
3063  av_assert2((put_bits_count(&s->pb)&7) == 0);
3064  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3065 
3066  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3067  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3068  int d = 100 / s->error_rate;
3069  if(r % d == 0){
3070  current_packet_size=0;
3071  s->pb.buf_ptr= s->ptr_lastgob;
3072  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3073  }
3074  }
3075 
3076 #if FF_API_RTP_CALLBACK
3078  if (s->avctx->rtp_callback){
3079  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3080  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3081  }
3083 #endif
3084  update_mb_info(s, 1);
3085 
3086  switch(s->codec_id){
3087  case AV_CODEC_ID_MPEG4:
3088  if (CONFIG_MPEG4_ENCODER) {
3091  }
3092  break;
3098  }
3099  break;
3100  case AV_CODEC_ID_H263:
3101  case AV_CODEC_ID_H263P:
3102  if (CONFIG_H263_ENCODER)
3103  ff_h263_encode_gob_header(s, mb_y);
3104  break;
3105  }
3106 
3107  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3108  int bits= put_bits_count(&s->pb);
3109  s->misc_bits+= bits - s->last_bits;
3110  s->last_bits= bits;
3111  }
3112 
3113  s->ptr_lastgob += current_packet_size;
3114  s->first_slice_line=1;
3115  s->resync_mb_x=mb_x;
3116  s->resync_mb_y=mb_y;
3117  }
3118  }
3119 
3120  if( (s->resync_mb_x == s->mb_x)
3121  && s->resync_mb_y+1 == s->mb_y){
3122  s->first_slice_line=0;
3123  }
3124 
3125  s->mb_skipped=0;
3126  s->dquant=0; //only for QP_RD
3127 
3128  update_mb_info(s, 0);
3129 
3130  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3131  int next_block=0;
3132  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3133 
3134  copy_context_before_encode(&backup_s, s, -1);
3135  backup_s.pb= s->pb;
3138  if(s->data_partitioning){
3139  backup_s.pb2= s->pb2;
3140  backup_s.tex_pb= s->tex_pb;
3141  }
3142 
3143  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3144  s->mv_dir = MV_DIR_FORWARD;
3145  s->mv_type = MV_TYPE_16X16;
3146  s->mb_intra= 0;
3147  s->mv[0][0][0] = s->p_mv_table[xy][0];
3148  s->mv[0][0][1] = s->p_mv_table[xy][1];
3149  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3150  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3151  }
3152  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3153  s->mv_dir = MV_DIR_FORWARD;
3154  s->mv_type = MV_TYPE_FIELD;
3155  s->mb_intra= 0;
3156  for(i=0; i<2; i++){
3157  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3158  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3159  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3160  }
3161  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3162  &dmin, &next_block, 0, 0);
3163  }
3164  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3165  s->mv_dir = MV_DIR_FORWARD;
3166  s->mv_type = MV_TYPE_16X16;
3167  s->mb_intra= 0;
3168  s->mv[0][0][0] = 0;
3169  s->mv[0][0][1] = 0;
3170  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3171  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3172  }
3173  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3174  s->mv_dir = MV_DIR_FORWARD;
3175  s->mv_type = MV_TYPE_8X8;
3176  s->mb_intra= 0;
3177  for(i=0; i<4; i++){
3178  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3179  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3180  }
3181  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3182  &dmin, &next_block, 0, 0);
3183  }
3184  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3185  s->mv_dir = MV_DIR_FORWARD;
3186  s->mv_type = MV_TYPE_16X16;
3187  s->mb_intra= 0;
3188  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3189  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3190  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3191  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3192  }
3193  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3194  s->mv_dir = MV_DIR_BACKWARD;
3195  s->mv_type = MV_TYPE_16X16;
3196  s->mb_intra= 0;
3197  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3198  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3199  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3200  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3201  }
3202  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3204  s->mv_type = MV_TYPE_16X16;
3205  s->mb_intra= 0;
3206  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3207  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3208  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3209  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3210  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3211  &dmin, &next_block, 0, 0);
3212  }
3213  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3214  s->mv_dir = MV_DIR_FORWARD;
3215  s->mv_type = MV_TYPE_FIELD;
3216  s->mb_intra= 0;
3217  for(i=0; i<2; i++){
3218  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3219  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3220  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3221  }
3222  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3223  &dmin, &next_block, 0, 0);
3224  }
3225  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3226  s->mv_dir = MV_DIR_BACKWARD;
3227  s->mv_type = MV_TYPE_FIELD;
3228  s->mb_intra= 0;
3229  for(i=0; i<2; i++){
3230  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3231  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3232  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3233  }
3234  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3235  &dmin, &next_block, 0, 0);
3236  }
3237  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3239  s->mv_type = MV_TYPE_FIELD;
3240  s->mb_intra= 0;
3241  for(dir=0; dir<2; dir++){
3242  for(i=0; i<2; i++){
3243  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3244  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3245  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3246  }
3247  }
3248  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3249  &dmin, &next_block, 0, 0);
3250  }
3251  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3252  s->mv_dir = 0;
3253  s->mv_type = MV_TYPE_16X16;
3254  s->mb_intra= 1;
3255  s->mv[0][0][0] = 0;
3256  s->mv[0][0][1] = 0;
3257  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3258  &dmin, &next_block, 0, 0);
3259  if(s->h263_pred || s->h263_aic){
3260  if(best_s.mb_intra)
3261  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3262  else
3263  ff_clean_intra_table_entries(s); //old mode?
3264  }
3265  }
3266 
3267  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3268  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3269  const int last_qp= backup_s.qscale;
3270  int qpi, qp, dc[6];
3271  int16_t ac[6][16];
3272  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3273  static const int dquant_tab[4]={-1,1,-2,2};
3274  int storecoefs = s->mb_intra && s->dc_val[0];
3275 
3276  av_assert2(backup_s.dquant == 0);
3277 
3278  //FIXME intra
3279  s->mv_dir= best_s.mv_dir;
3280  s->mv_type = MV_TYPE_16X16;
3281  s->mb_intra= best_s.mb_intra;
3282  s->mv[0][0][0] = best_s.mv[0][0][0];
3283  s->mv[0][0][1] = best_s.mv[0][0][1];
3284  s->mv[1][0][0] = best_s.mv[1][0][0];
3285  s->mv[1][0][1] = best_s.mv[1][0][1];
3286 
3287  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3288  for(; qpi<4; qpi++){
3289  int dquant= dquant_tab[qpi];
3290  qp= last_qp + dquant;
3291  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3292  continue;
3293  backup_s.dquant= dquant;
3294  if(storecoefs){
3295  for(i=0; i<6; i++){
3296  dc[i]= s->dc_val[0][ s->block_index[i] ];
3297  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3298  }
3299  }
3300 
3301  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3302  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3303  if(best_s.qscale != qp){
3304  if(storecoefs){
3305  for(i=0; i<6; i++){
3306  s->dc_val[0][ s->block_index[i] ]= dc[i];
3307  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3308  }
3309  }
3310  }
3311  }
3312  }
3313  }
3315  int mx= s->b_direct_mv_table[xy][0];
3316  int my= s->b_direct_mv_table[xy][1];
3317 
3318  backup_s.dquant = 0;
3320  s->mb_intra= 0;
3321  ff_mpeg4_set_direct_mv(s, mx, my);
3322  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3323  &dmin, &next_block, mx, my);
3324  }
3326  backup_s.dquant = 0;
3328  s->mb_intra= 0;
3329  ff_mpeg4_set_direct_mv(s, 0, 0);
3330  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3331  &dmin, &next_block, 0, 0);
3332  }
3333  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3334  int coded=0;
3335  for(i=0; i<6; i++)
3336  coded |= s->block_last_index[i];
3337  if(coded){
3338  int mx,my;
3339  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3340  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3341  mx=my=0; //FIXME find the one we actually used
3342  ff_mpeg4_set_direct_mv(s, mx, my);
3343  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3344  mx= s->mv[1][0][0];
3345  my= s->mv[1][0][1];
3346  }else{
3347  mx= s->mv[0][0][0];
3348  my= s->mv[0][0][1];
3349  }
3350 
3351  s->mv_dir= best_s.mv_dir;
3352  s->mv_type = best_s.mv_type;
3353  s->mb_intra= 0;
3354 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3355  s->mv[0][0][1] = best_s.mv[0][0][1];
3356  s->mv[1][0][0] = best_s.mv[1][0][0];
3357  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3358  backup_s.dquant= 0;
3359  s->skipdct=1;
3360  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3361  &dmin, &next_block, mx, my);
3362  s->skipdct=0;
3363  }
3364  }
3365 
3366  s->current_picture.qscale_table[xy] = best_s.qscale;
3367 
3368  copy_context_after_encode(s, &best_s, -1);
3369 
3370  pb_bits_count= put_bits_count(&s->pb);
3371  flush_put_bits(&s->pb);
3372  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3373  s->pb= backup_s.pb;
3374 
3375  if(s->data_partitioning){
3376  pb2_bits_count= put_bits_count(&s->pb2);
3377  flush_put_bits(&s->pb2);
3378  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3379  s->pb2= backup_s.pb2;
3380 
3381  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3382  flush_put_bits(&s->tex_pb);
3383  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3384  s->tex_pb= backup_s.tex_pb;
3385  }
3386  s->last_bits= put_bits_count(&s->pb);
3387 
3388  if (CONFIG_H263_ENCODER &&
3391 
3392  if(next_block==0){ //FIXME 16 vs linesize16
3393  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3394  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3395  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3396  }
3397 
3400  } else {
3401  int motion_x = 0, motion_y = 0;
3403  // only one MB-Type possible
3404 
3405  switch(mb_type){
3407  s->mv_dir = 0;
3408  s->mb_intra= 1;
3409  motion_x= s->mv[0][0][0] = 0;
3410  motion_y= s->mv[0][0][1] = 0;
3411  break;
3413  s->mv_dir = MV_DIR_FORWARD;
3414  s->mb_intra= 0;
3415  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3416  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3417  break;
3419  s->mv_dir = MV_DIR_FORWARD;
3420  s->mv_type = MV_TYPE_FIELD;
3421  s->mb_intra= 0;
3422  for(i=0; i<2; i++){
3423  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3424  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3425  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3426  }
3427  break;
3429  s->mv_dir = MV_DIR_FORWARD;
3430  s->mv_type = MV_TYPE_8X8;
3431  s->mb_intra= 0;
3432  for(i=0; i<4; i++){
3433  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3434  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3435  }
3436  break;
3438  if (CONFIG_MPEG4_ENCODER) {
3440  s->mb_intra= 0;
3441  motion_x=s->b_direct_mv_table[xy][0];
3442  motion_y=s->b_direct_mv_table[xy][1];
3443  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3444  }
3445  break;
3447  if (CONFIG_MPEG4_ENCODER) {
3449  s->mb_intra= 0;
3450  ff_mpeg4_set_direct_mv(s, 0, 0);
3451  }
3452  break;
3455  s->mb_intra= 0;
3456  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3457  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3458  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3459  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3460  break;
3462  s->mv_dir = MV_DIR_BACKWARD;
3463  s->mb_intra= 0;
3464  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3465  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3466  break;
3468  s->mv_dir = MV_DIR_FORWARD;
3469  s->mb_intra= 0;
3470  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3471  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3472  break;
3474  s->mv_dir = MV_DIR_FORWARD;
3475  s->mv_type = MV_TYPE_FIELD;
3476  s->mb_intra= 0;
3477  for(i=0; i<2; i++){
3478  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3479  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3480  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3481  }
3482  break;
3484  s->mv_dir = MV_DIR_BACKWARD;
3485  s->mv_type = MV_TYPE_FIELD;
3486  s->mb_intra= 0;
3487  for(i=0; i<2; i++){
3488  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3489  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3490  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3491  }
3492  break;
3495  s->mv_type = MV_TYPE_FIELD;
3496  s->mb_intra= 0;
3497  for(dir=0; dir<2; dir++){
3498  for(i=0; i<2; i++){
3499  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3500  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3501  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3502  }
3503  }
3504  break;
3505  default:
3506  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3507  }
3508 
3509  encode_mb(s, motion_x, motion_y);
3510 
3511  // RAL: Update last macroblock type
3512  s->last_mv_dir = s->mv_dir;
3513 
3514  if (CONFIG_H263_ENCODER &&
3517 
3519  }
3520 
3521  /* clean the MV table in IPS frames for direct mode in B-frames */
3522  if(s->mb_intra /* && I,P,S_TYPE */){
3523  s->p_mv_table[xy][0]=0;
3524  s->p_mv_table[xy][1]=0;
3525  }
3526 
3527  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3528  int w= 16;
3529  int h= 16;
3530 
3531  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3532  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3533 
3535  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3536  s->dest[0], w, h, s->linesize);
3538  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3539  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3541  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3542  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3543  }
3544  if(s->loop_filter){
3547  }
3548  ff_dlog(s->avctx, "MB %d %d bits\n",
3549  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3550  }
3551  }
3552 
3553  //not beautiful here but we must write it before flushing so it has to be here
3556 
3557  write_slice_end(s);
3558 
3559 #if FF_API_RTP_CALLBACK
3561  /* Send the last GOB if RTP */
3562  if (s->avctx->rtp_callback) {
3563  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3564  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3565  /* Call the RTP callback to send the last GOB */
3566  emms_c();
3567  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3568  }
3570 #endif
3571 
3572  return 0;
3573 }
3574 
3575 #define MERGE(field) dst->field += src->field; src->field=0
3577  MERGE(me.scene_change_score);
3578  MERGE(me.mc_mb_var_sum_temp);
3579  MERGE(me.mb_var_sum_temp);
3580 }
3581 
3583  int i;
3584 
3585  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3586  MERGE(dct_count[1]);
3587  MERGE(mv_bits);
3588  MERGE(i_tex_bits);
3589  MERGE(p_tex_bits);
3590  MERGE(i_count);
3591  MERGE(f_count);
3592  MERGE(b_count);
3593  MERGE(skip_count);
3594  MERGE(misc_bits);
3595  MERGE(er.error_count);
3600 
3601  if (dst->noise_reduction){
3602  for(i=0; i<64; i++){
3603  MERGE(dct_error_sum[0][i]);
3604  MERGE(dct_error_sum[1][i]);
3605  }
3606  }
3607 
3608  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3609  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3610  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3611  flush_put_bits(&dst->pb);
3612 }
3613 
3614 static int estimate_qp(MpegEncContext *s, int dry_run){
3615  if (s->next_lambda){
3618  if(!dry_run) s->next_lambda= 0;
3619  } else if (!s->fixed_qscale) {
3620  int quality = ff_rate_estimate_qscale(s, dry_run);
3622  s->current_picture.f->quality = quality;
3623  if (s->current_picture.f->quality < 0)
3624  return -1;
3625  }
3626 
3627  if(s->adaptive_quant){
3628  switch(s->codec_id){
3629  case AV_CODEC_ID_MPEG4:
3632  break;
3633  case AV_CODEC_ID_H263:
3634  case AV_CODEC_ID_H263P:
3635  case AV_CODEC_ID_FLV1:
3636  if (CONFIG_H263_ENCODER)
3638  break;
3639  default:
3640  ff_init_qscale_tab(s);
3641  }
3642 
3643  s->lambda= s->lambda_table[0];
3644  //FIXME broken
3645  }else
3646  s->lambda = s->current_picture.f->quality;
3647  update_qscale(s);
3648  return 0;
3649 }
3650 
3651 /* must be called before writing the header */
3654  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3655 
3656  if(s->pict_type==AV_PICTURE_TYPE_B){
3657  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3658  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3659  }else{
3660  s->pp_time= s->time - s->last_non_b_time;
3661  s->last_non_b_time= s->time;
3662  av_assert1(s->picture_number==0 || s->pp_time > 0);
3663  }
3664 }
3665 
3667 {
3668  int i, ret;
3669  int bits;
3670  int context_count = s->slice_context_count;
3671 
3673 
3674  /* Reset the average MB variance */
3675  s->me.mb_var_sum_temp =
3676  s->me.mc_mb_var_sum_temp = 0;
3677 
3678  /* we need to initialize some time vars before we can encode B-frames */
3679  // RAL: Condition added for MPEG1VIDEO
3683  ff_set_mpeg4_time(s);
3684 
3685  s->me.scene_change_score=0;
3686 
3687 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3688 
3689  if(s->pict_type==AV_PICTURE_TYPE_I){
3690  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3691  else s->no_rounding=0;
3692  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3694  s->no_rounding ^= 1;
3695  }
3696 
3697  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3698  if (estimate_qp(s,1) < 0)
3699  return -1;
3700  ff_get_2pass_fcode(s);
3701  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3703  s->lambda= s->last_lambda_for[s->pict_type];
3704  else
3706  update_qscale(s);
3707  }
3708 
3714  }
3715 
3716  s->mb_intra=0; //for the rate distortion & bit compare functions
3717  for(i=1; i<context_count; i++){
3719  if (ret < 0)
3720  return ret;
3721  }
3722 
3723  if(ff_init_me(s)<0)
3724  return -1;
3725 
3726  /* Estimate motion for every MB */
3727  if(s->pict_type != AV_PICTURE_TYPE_I){
3728  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3729  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3730  if (s->pict_type != AV_PICTURE_TYPE_B) {
3731  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3732  s->me_pre == 2) {
3733  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3734  }
3735  }
3736 
3737  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3738  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3739  /* I-Frame */
3740  for(i=0; i<s->mb_stride*s->mb_height; i++)
3742 
3743  if(!s->fixed_qscale){
3744  /* finding spatial complexity for I-frame rate control */
3745  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3746  }
3747  }
3748  for(i=1; i<context_count; i++){
3750  }
3752  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3753  emms_c();
3754 
3756  s->pict_type == AV_PICTURE_TYPE_P) {
3758  for(i=0; i<s->mb_stride*s->mb_height; i++)
3760  if(s->msmpeg4_version >= 3)
3761  s->no_rounding=1;
3762  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3764  }
3765 
3766  if(!s->umvplus){
3769 
3771  int a,b;
3772  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3774  s->f_code= FFMAX3(s->f_code, a, b);
3775  }
3776 
3780  int j;
3781  for(i=0; i<2; i++){
3782  for(j=0; j<2; j++)
3785  }
3786  }
3787  }
3788 
3789  if(s->pict_type==AV_PICTURE_TYPE_B){
3790  int a, b;
3791 
3794  s->f_code = FFMAX(a, b);
3795 
3798  s->b_code = FFMAX(a, b);
3799 
3805  int dir, j;
3806  for(dir=0; dir<2; dir++){
3807  for(i=0; i<2; i++){
3808  for(j=0; j<2; j++){
3811  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3812  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3813  }
3814  }
3815  }
3816  }
3817  }
3818  }
3819 
3820  if (estimate_qp(s, 0) < 0)
3821  return -1;
3822 
3823  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3824  s->pict_type == AV_PICTURE_TYPE_I &&
3825  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3826  s->qscale= 3; //reduce clipping problems
3827 
3828  if (s->out_format == FMT_MJPEG) {
3829  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3830  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3831 
3832  if (s->avctx->intra_matrix) {
3833  chroma_matrix =
3834  luma_matrix = s->avctx->intra_matrix;
3835  }
3836  if (s->avctx->chroma_intra_matrix)
3837  chroma_matrix = s->avctx->chroma_intra_matrix;
3838 
3839  /* for mjpeg, we do include qscale in the matrix */
3840  for(i=1;i<64;i++){
3841  int j = s->idsp.idct_permutation[i];
3842 
3843  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3844  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3845  }
3846  s->y_dc_scale_table=
3848  s->chroma_intra_matrix[0] =
3851  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3853  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3854  s->qscale= 8;
3855  }
3856  if(s->codec_id == AV_CODEC_ID_AMV){
3857  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3858  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3859  for(i=1;i<64;i++){
3861 
3862  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3863  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3864  }
3865  s->y_dc_scale_table= y;
3866  s->c_dc_scale_table= c;
3867  s->intra_matrix[0] = 13;
3868  s->chroma_intra_matrix[0] = 14;
3870  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3872  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3873  s->qscale= 8;
3874  }
3875 
3876  //FIXME var duplication
3878  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3881 
3882  if (s->current_picture.f->key_frame)
3883  s->picture_in_gop_number=0;
3884 
3885  s->mb_x = s->mb_y = 0;
3886  s->last_bits= put_bits_count(&s->pb);
3887  switch(s->out_format) {
3888  case FMT_MJPEG:
3892  break;
3893  case FMT_H261:
3894  if (CONFIG_H261_ENCODER)
3895  ff_h261_encode_picture_header(s, picture_number);
3896  break;
3897  case FMT_H263:
3899  ff_wmv2_encode_picture_header(s, picture_number);
3900  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3901  ff_msmpeg4_encode_picture_header(s, picture_number);
3902  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3903  ret = ff_mpeg4_encode_picture_header(s, picture_number);
3904  if (ret < 0)
3905  return ret;
3906  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3907  ret = ff_rv10_encode_picture_header(s, picture_number);
3908  if (ret < 0)
3909  return ret;
3910  }
3911  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3912  ff_rv20_encode_picture_header(s, picture_number);
3913  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3914  ff_flv_encode_picture_header(s, picture_number);
3915  else if (CONFIG_H263_ENCODER)
3916  ff_h263_encode_picture_header(s, picture_number);
3917  break;
3918  case FMT_MPEG1:
3920  ff_mpeg1_encode_picture_header(s, picture_number);
3921  break;
3922  default:
3923  av_assert0(0);
3924  }
3925  bits= put_bits_count(&s->pb);
3926  s->header_bits= bits - s->last_bits;
3927 
3928  for(i=1; i<context_count; i++){
3930  }
3931  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3932  for(i=1; i<context_count; i++){
3933  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3934  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3936  }
3937  emms_c();
3938  return 0;
3939 }
3940 
3941 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3942  const int intra= s->mb_intra;
3943  int i;
3944 
3945  s->dct_count[intra]++;
3946 
3947  for(i=0; i<64; i++){
3948  int level= block[i];
3949 
3950  if(level){
3951  if(level>0){
3952  s->dct_error_sum[intra][i] += level;
3953  level -= s->dct_offset[intra][i];
3954  if(level<0) level=0;
3955  }else{
3956  s->dct_error_sum[intra][i] -= level;
3957  level += s->dct_offset[intra][i];
3958  if(level>0) level=0;
3959  }
3960  block[i]= level;
3961  }
3962  }
3963 }
3964 
3966  int16_t *block, int n,
3967  int qscale, int *overflow){
3968  const int *qmat;
3969  const uint16_t *matrix;
3970  const uint8_t *scantable;
3971  const uint8_t *perm_scantable;
3972  int max=0;
3973  unsigned int threshold1, threshold2;
3974  int bias=0;
3975  int run_tab[65];
3976  int level_tab[65];
3977  int score_tab[65];
3978  int survivor[65];
3979  int survivor_count;
3980  int last_run=0;
3981  int last_level=0;
3982  int last_score= 0;
3983  int last_i;
3984  int coeff[2][64];
3985  int coeff_count[64];
3986  int qmul, qadd, start_i, last_non_zero, i, dc;
3987  const int esc_length= s->ac_esc_length;
3988  uint8_t * length;
3989  uint8_t * last_length;
3990  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3991  int mpeg2_qscale;
3992 
3993  s->fdsp.fdct(block);
3994 
3995  if(s->dct_error_sum)
3996  s->denoise_dct(s, block);
3997  qmul= qscale*16;
3998  qadd= ((qscale-1)|1)*8;
3999 
4000  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4001  else mpeg2_qscale = qscale << 1;
4002 
4003  if (s->mb_intra) {
4004  int q;
4005  scantable= s->intra_scantable.scantable;
4006  perm_scantable= s->intra_scantable.permutated;
4007  if (!s->h263_aic) {
4008  if (n < 4)
4009  q = s->y_dc_scale;
4010  else
4011  q = s->c_dc_scale;
4012  q = q << 3;
4013  } else{
4014  /* For AIC we skip quant/dequant of INTRADC */
4015  q = 1 << 3;
4016  qadd=0;
4017  }
4018 
4019  /* note: block[0] is assumed to be positive */
4020  block[0] = (block[0] + (q >> 1)) / q;
4021  start_i = 1;
4022  last_non_zero = 0;
4023  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4024  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4025  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4026  bias= 1<<(QMAT_SHIFT-1);
4027 
4028  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4029  length = s->intra_chroma_ac_vlc_length;
4030  last_length= s->intra_chroma_ac_vlc_last_length;
4031  } else {
4032  length = s->intra_ac_vlc_length;
4033  last_length= s->intra_ac_vlc_last_length;
4034  }
4035  } else {
4036  scantable= s->inter_scantable.scantable;
4037  perm_scantable= s->inter_scantable.permutated;
4038  start_i = 0;
4039  last_non_zero = -1;
4040  qmat = s->q_inter_matrix[qscale];
4041  matrix = s->inter_matrix;
4042  length = s->inter_ac_vlc_length;
4043  last_length= s->inter_ac_vlc_last_length;
4044  }
4045  last_i= start_i;
4046 
4047  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4048  threshold2= (threshold1<<1);
4049 
4050  for(i=63; i>=start_i; i--) {
4051  const int j = scantable[i];
4052  int level = block[j] * qmat[j];
4053 
4054  if(((unsigned)(level+threshold1))>threshold2){
4055  last_non_zero = i;
4056  break;
4057  }
4058  }
4059 
4060  for(i=start_i; i<=last_non_zero; i++) {
4061  const int j = scantable[i];
4062  int level = block[j] * qmat[j];
4063 
4064 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4065 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4066  if(((unsigned)(level+threshold1))>threshold2){
4067  if(level>0){
4068  level= (bias + level)>>QMAT_SHIFT;
4069  coeff[0][i]= level;
4070  coeff[1][i]= level-1;
4071 // coeff[2][k]= level-2;
4072  }else{
4073  level= (bias - level)>>QMAT_SHIFT;
4074  coeff[0][i]= -level;
4075  coeff[1][i]= -level+1;
4076 // coeff[2][k]= -level+2;
4077  }
4078  coeff_count[i]= FFMIN(level, 2);
4079  av_assert2(coeff_count[i]);
4080  max |=level;
4081  }else{
4082  coeff[0][i]= (level>>31)|1;
4083  coeff_count[i]= 1;
4084  }
4085  }
4086 
4087  *overflow= s->max_qcoeff < max; //overflow might have happened
4088 
4089  if(last_non_zero < start_i){
4090  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4091  return last_non_zero;
4092  }
4093 
4094  score_tab[start_i]= 0;
4095  survivor[0]= start_i;
4096  survivor_count= 1;
4097 
4098  for(i=start_i; i<=last_non_zero; i++){
4099  int level_index, j, zero_distortion;
4100  int dct_coeff= FFABS(block[ scantable[i] ]);
4101  int best_score=256*256*256*120;
4102 
4103  if (s->fdsp.fdct == ff_fdct_ifast)
4104  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4105  zero_distortion= dct_coeff*dct_coeff;
4106 
4107  for(level_index=0; level_index < coeff_count[i]; level_index++){
4108  int distortion;
4109  int level= coeff[level_index][i];
4110  const int alevel= FFABS(level);
4111  int unquant_coeff;
4112 
4113  av_assert2(level);
4114 
4115  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4116  unquant_coeff= alevel*qmul + qadd;
4117  } else if(s->out_format == FMT_MJPEG) {
4118  j = s->idsp.idct_permutation[scantable[i]];
4119  unquant_coeff = alevel * matrix[j] * 8;
4120  }else{ // MPEG-1
4121  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4122  if(s->mb_intra){
4123  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4124  unquant_coeff = (unquant_coeff - 1) | 1;
4125  }else{
4126  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4127  unquant_coeff = (unquant_coeff - 1) | 1;
4128  }
4129  unquant_coeff<<= 3;
4130  }
4131 
4132  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4133  level+=64;
4134  if((level&(~127)) == 0){
4135  for(j=survivor_count-1; j>=0; j--){
4136  int run= i - survivor[j];
4137  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4138  score += score_tab[i-run];
4139 
4140  if(score < best_score){
4141  best_score= score;
4142  run_tab[i+1]= run;
4143  level_tab[i+1]= level-64;
4144  }
4145  }
4146 
4147  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4148  for(j=survivor_count-1; j>=0; j--){
4149  int run= i - survivor[j];
4150  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4151  score += score_tab[i-run];
4152  if(score < last_score){
4153  last_score= score;
4154  last_run= run;
4155  last_level= level-64;
4156  last_i= i+1;
4157  }
4158  }
4159  }
4160  }else{
4161  distortion += esc_length*lambda;
4162  for(j=survivor_count-1; j>=0; j--){
4163  int run= i - survivor[j];
4164  int score= distortion + score_tab[i-run];
4165 
4166  if(score < best_score){
4167  best_score= score;
4168  run_tab[i+1]= run;
4169  level_tab[i+1]= level-64;
4170  }
4171  }
4172 
4173  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4174  for(j=survivor_count-1; j>=0; j--){
4175  int run= i - survivor[j];
4176  int score= distortion + score_tab[i-run];
4177  if(score < last_score){
4178  last_score= score;
4179  last_run= run;
4180  last_level= level-64;
4181  last_i= i+1;
4182  }
4183  }
4184  }
4185  }
4186  }
4187 
4188  score_tab[i+1]= best_score;
4189 
4190  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4191  if(last_non_zero <= 27){
4192  for(; survivor_count; survivor_count--){
4193  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4194  break;
4195  }
4196  }else{
4197  for(; survivor_count; survivor_count--){
4198  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4199  break;
4200  }
4201  }
4202 
4203  survivor[ survivor_count++ ]= i+1;
4204  }
4205 
4206  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4207  last_score= 256*256*256*120;
4208  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4209  int score= score_tab[i];
4210  if (i)
4211  score += lambda * 2; // FIXME more exact?
4212 
4213  if(score < last_score){
4214  last_score= score;
4215  last_i= i;
4216  last_level= level_tab[i];
4217  last_run= run_tab[i];
4218  }
4219  }
4220  }
4221 
4222  s->coded_score[n] = last_score;
4223 
4224  dc= FFABS(block[0]);
4225  last_non_zero= last_i - 1;
4226  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4227 
4228  if(last_non_zero < start_i)
4229  return last_non_zero;
4230 
4231  if(last_non_zero == 0 && start_i == 0){
4232  int best_level= 0;
4233  int best_score= dc * dc;
4234 
4235  for(i=0; i<coeff_count[0]; i++){
4236  int level= coeff[i][0];
4237  int alevel= FFABS(level);
4238  int unquant_coeff, score, distortion;
4239 
4240  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4241  unquant_coeff= (alevel*qmul + qadd)>>3;
4242  } else{ // MPEG-1
4243  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4244  unquant_coeff = (unquant_coeff - 1) | 1;
4245  }
4246  unquant_coeff = (unquant_coeff + 4) >> 3;
4247  unquant_coeff<<= 3 + 3;
4248 
4249  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4250  level+=64;
4251  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4252  else score= distortion + esc_length*lambda;
4253 
4254  if(score < best_score){
4255  best_score= score;
4256  best_level= level - 64;
4257  }
4258  }
4259  block[0]= best_level;
4260  s->coded_score[n] = best_score - dc*dc;
4261  if(best_level == 0) return -1;
4262  else return last_non_zero;
4263  }
4264 
4265  i= last_i;
4266  av_assert2(last_level);
4267 
4268  block[ perm_scantable[last_non_zero] ]= last_level;
4269  i -= last_run + 1;
4270 
4271  for(; i>start_i; i -= run_tab[i] + 1){
4272  block[ perm_scantable[i-1] ]= level_tab[i];
4273  }
4274 
4275  return last_non_zero;
4276 }
4277 
4278 static int16_t basis[64][64];
4279 
4280 static void build_basis(uint8_t *perm){
4281  int i, j, x, y;
4282  emms_c();
4283  for(i=0; i<8; i++){
4284  for(j=0; j<8; j++){
4285  for(y=0; y<8; y++){
4286  for(x=0; x<8; x++){
4287  double s= 0.25*(1<<BASIS_SHIFT);
4288  int index= 8*i + j;
4289  int perm_index= perm[index];
4290  if(i==0) s*= sqrt(0.5);
4291  if(j==0) s*= sqrt(0.5);
4292  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4293  }
4294  }
4295  }
4296  }
4297 }
4298 
4299 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4300  int16_t *block, int16_t *weight, int16_t *orig,
4301  int n, int qscale){
4302  int16_t rem[64];
4303  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4304  const uint8_t *scantable;
4305  const uint8_t *perm_scantable;
4306 // unsigned int threshold1, threshold2;
4307 // int bias=0;
4308  int run_tab[65];
4309  int prev_run=0;
4310  int prev_level=0;
4311  int qmul, qadd, start_i, last_non_zero, i, dc;
4312  uint8_t * length;
4313  uint8_t * last_length;
4314  int lambda;
4315  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4316 
4317  if(basis[0][0] == 0)
4319 
4320  qmul= qscale*2;
4321  qadd= (qscale-1)|1;
4322  if (s->mb_intra) {
4323  scantable= s->intra_scantable.scantable;
4324  perm_scantable= s->intra_scantable.permutated;
4325  if (!s->h263_aic) {
4326  if (n < 4)
4327  q = s->y_dc_scale;
4328  else
4329  q = s->c_dc_scale;
4330  } else{
4331  /* For AIC we skip quant/dequant of INTRADC */
4332  q = 1;
4333  qadd=0;
4334  }
4335  q <<= RECON_SHIFT-3;
4336  /* note: block[0] is assumed to be positive */
4337  dc= block[0]*q;
4338 // block[0] = (block[0] + (q >> 1)) / q;
4339  start_i = 1;
4340 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4341 // bias= 1<<(QMAT_SHIFT-1);
4342  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4343  length = s->intra_chroma_ac_vlc_length;
4344  last_length= s->intra_chroma_ac_vlc_last_length;
4345  } else {
4346  length = s->intra_ac_vlc_length;
4347  last_length= s->intra_ac_vlc_last_length;
4348  }
4349  } else {
4350  scantable= s->inter_scantable.scantable;
4351  perm_scantable= s->inter_scantable.permutated;
4352  dc= 0;
4353  start_i = 0;
4354  length = s->inter_ac_vlc_length;
4355  last_length= s->inter_ac_vlc_last_length;
4356  }
4357  last_non_zero = s->block_last_index[n];
4358 
4359  dc += (1<<(RECON_SHIFT-1));
4360  for(i=0; i<64; i++){
4361  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4362  }
4363 
4364  sum=0;
4365  for(i=0; i<64; i++){
4366  int one= 36;
4367  int qns=4;
4368  int w;
4369 
4370  w= FFABS(weight[i]) + qns*one;
4371  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4372 
4373  weight[i] = w;
4374 // w=weight[i] = (63*qns + (w/2)) / w;
4375 
4376  av_assert2(w>0);
4377  av_assert2(w<(1<<6));
4378  sum += w*w;
4379  }
4380  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4381 
4382  run=0;
4383  rle_index=0;
4384  for(i=start_i; i<=last_non_zero; i++){
4385  int j= perm_scantable[i];
4386  const int level= block[j];
4387  int coeff;
4388 
4389  if(level){
4390  if(level<0) coeff= qmul*level - qadd;
4391  else coeff= qmul*level + qadd;
4392  run_tab[rle_index++]=run;
4393  run=0;
4394 
4395  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4396  }else{
4397  run++;
4398  }
4399  }
4400 
4401  for(;;){
4402  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4403  int best_coeff=0;
4404  int best_change=0;
4405  int run2, best_unquant_change=0, analyze_gradient;
4406  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4407 
4408  if(analyze_gradient){
4409  for(i=0; i<64; i++){
4410  int w= weight[i];
4411 
4412  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4413  }
4414  s->fdsp.fdct(d1);
4415  }
4416 
4417  if(start_i){
4418  const int level= block[0];
4419  int change, old_coeff;
4420 
4421  av_assert2(s->mb_intra);
4422 
4423  old_coeff= q*level;
4424 
4425  for(change=-1; change<=1; change+=2){
4426  int new_level= level + change;
4427  int score, new_coeff;
4428 
4429  new_coeff= q*new_level;
4430  if(new_coeff >= 2048 || new_coeff < 0)
4431  continue;
4432 
4433  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4434  new_coeff - old_coeff);
4435  if(score<best_score){
4436  best_score= score;
4437  best_coeff= 0;
4438  best_change= change;
4439  best_unquant_change= new_coeff - old_coeff;
4440  }
4441  }
4442  }
4443 
4444  run=0;
4445  rle_index=0;
4446  run2= run_tab[rle_index++];
4447  prev_level=0;
4448  prev_run=0;
4449 
4450  for(i=start_i; i<64; i++){
4451  int j= perm_scantable[i];
4452  const int level= block[j];
4453  int change, old_coeff;
4454 
4455  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4456  break;
4457 
4458  if(level){
4459  if(level<0) old_coeff= qmul*level - qadd;
4460  else old_coeff= qmul*level + qadd;
4461  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4462  }else{
4463  old_coeff=0;
4464  run2--;
4465  av_assert2(run2>=0 || i >= last_non_zero );
4466  }
4467 
4468  for(change=-1; change<=1; change+=2){
4469  int new_level= level + change;
4470  int score, new_coeff, unquant_change;
4471 
4472  score=0;
4473  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4474  continue;
4475 
4476  if(new_level){
4477  if(new_level<0) new_coeff= qmul*new_level - qadd;
4478  else new_coeff= qmul*new_level + qadd;
4479  if(new_coeff >= 2048 || new_coeff <= -2048)
4480  continue;
4481  //FIXME check for overflow
4482 
4483  if(level){
4484  if(level < 63 && level > -63){
4485  if(i < last_non_zero)
4486  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4487  - length[UNI_AC_ENC_INDEX(run, level+64)];
4488  else
4489  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4490  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4491  }
4492  }else{
4493  av_assert2(FFABS(new_level)==1);
4494 
4495  if(analyze_gradient){
4496  int g= d1[ scantable[i] ];
4497  if(g && (g^new_level) >= 0)
4498  continue;
4499  }
4500 
4501  if(i < last_non_zero){
4502  int next_i= i + run2 + 1;
4503  int next_level= block[ perm_scantable[next_i] ] + 64;
4504 
4505  if(next_level&(~127))
4506  next_level= 0;
4507 
4508  if(next_i < last_non_zero)
4509  score += length[UNI_AC_ENC_INDEX(run, 65)]
4510  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4511  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4512  else
4513  score += length[UNI_AC_ENC_INDEX(run, 65)]
4514  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4515  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4516  }else{
4517  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4518  if(prev_level){
4519  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4520  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4521  }
4522  }
4523  }
4524  }else{
4525  new_coeff=0;
4526  av_assert2(FFABS(level)==1);
4527 
4528  if(i < last_non_zero){
4529  int next_i= i + run2 + 1;
4530  int next_level= block[ perm_scantable[next_i] ] + 64;
4531 
4532  if(next_level&(~127))
4533  next_level= 0;
4534 
4535  if(next_i < last_non_zero)
4536  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4537  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4538  - length[UNI_AC_ENC_INDEX(run, 65)];
4539  else
4540  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4541  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4542  - length[UNI_AC_ENC_INDEX(run, 65)];
4543  }else{
4544  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4545  if(prev_level){
4546  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4547  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4548  }
4549  }
4550  }
4551 
4552  score *= lambda;
4553 
4554  unquant_change= new_coeff - old_coeff;
4555  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4556 
4557  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4558  unquant_change);
4559  if(score<best_score){
4560  best_score= score;
4561  best_coeff= i;
4562  best_change= change;
4563  best_unquant_change= unquant_change;
4564  }
4565  }
4566  if(level){
4567  prev_level= level + 64;
4568  if(prev_level&(~127))
4569  prev_level= 0;
4570  prev_run= run;
4571  run=0;
4572  }else{
4573  run++;
4574  }
4575  }
4576 
4577  if(best_change){
4578  int j= perm_scantable[ best_coeff ];
4579 
4580  block[j] += best_change;
4581 
4582  if(best_coeff > last_non_zero){
4583  last_non_zero= best_coeff;
4584  av_assert2(block[j]);
4585  }else{
4586  for(; last_non_zero>=start_i; last_non_zero--){
4587  if(block[perm_scantable[last_non_zero]])
4588  break;
4589  }
4590  }
4591 
4592  run=0;
4593  rle_index=0;
4594  for(i=start_i; i<=last_non_zero; i++){
4595  int j= perm_scantable[i];
4596  const int level= block[j];
4597 
4598  if(level){
4599  run_tab[rle_index++]=run;
4600  run=0;
4601  }else{
4602  run++;
4603  }
4604  }
4605 
4606  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4607  }else{
4608  break;
4609  }
4610  }
4611 
4612  return last_non_zero;
4613 }
4614 
4615 /**
4616  * Permute an 8x8 block according to permutation.
4617  * @param block the block which will be permuted according to
4618  * the given permutation vector
4619  * @param permutation the permutation vector
4620  * @param last the last non zero coefficient in scantable order, used to
4621  * speed the permutation up
4622  * @param scantable the used scantable, this is only used to speed the
4623  * permutation up, the block is not (inverse) permutated
4624  * to scantable order!
4625  */
4626 void ff_block_permute(int16_t *block, uint8_t *permutation,
4627  const uint8_t *scantable, int last)
4628 {
4629  int i;
4630  int16_t temp[64];
4631 
4632  if (last <= 0)
4633  return;
4634  //FIXME it is ok but not clean and might fail for some permutations
4635  // if (permutation[1] == 1)
4636  // return;
4637 
4638  for (i = 0; i <= last; i++) {
4639  const int j = scantable[i];
4640  temp[j] = block[j];
4641  block[j] = 0;
4642  }
4643 
4644  for (i = 0; i <= last; i++) {
4645  const int j = scantable[i];
4646  const int perm_j = permutation[j];
4647  block[perm_j] = temp[j];
4648  }
4649 }
4650 
4652  int16_t *block, int n,
4653  int qscale, int *overflow)
4654 {
4655  int i, j, level, last_non_zero, q, start_i;
4656  const int *qmat;
4657  const uint8_t *scantable;
4658  int bias;
4659  int max=0;
4660  unsigned int threshold1, threshold2;
4661 
4662  s->fdsp.fdct(block);
4663 
4664  if(s->dct_error_sum)
4665  s->denoise_dct(s, block);
4666 
4667  if (s->mb_intra) {
4668  scantable= s->intra_scantable.scantable;
4669  if (!s->h263_aic) {
4670  if (n < 4)
4671  q = s->y_dc_scale;
4672  else
4673  q = s->c_dc_scale;
4674  q = q << 3;
4675  } else
4676  /* For AIC we skip quant/dequant of INTRADC */
4677  q = 1 << 3;
4678 
4679  /* note: block[0] is assumed to be positive */
4680  block[0] = (block[0] + (q >> 1)) / q;
4681  start_i = 1;
4682  last_non_zero = 0;
4683  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4684  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4685  } else {
4686  scantable= s->inter_scantable.scantable;
4687  start_i = 0;
4688  last_non_zero = -1;
4689  qmat = s->q_inter_matrix[qscale];
4690  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4691  }
4692  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4693  threshold2= (threshold1<<1);
4694  for(i=63;i>=start_i;i--) {
4695  j = scantable[i];
4696  level = block[j] * qmat[j];
4697 
4698  if(((unsigned)(level+threshold1))>threshold2){
4699  last_non_zero = i;
4700  break;
4701  }else{
4702  block[j]=0;
4703  }
4704  }
4705  for(i=start_i; i<=last_non_zero; i++) {
4706  j = scantable[i];
4707  level = block[j] * qmat[j];
4708 
4709 // if( bias+level >= (1<<QMAT_SHIFT)
4710 // || bias-level >= (1<<QMAT_SHIFT)){
4711  if(((unsigned)(level+threshold1))>threshold2){
4712  if(level>0){
4713  level= (bias + level)>>QMAT_SHIFT;
4714  block[j]= level;
4715  }else{
4716  level= (bias - level)>>QMAT_SHIFT;
4717  block[j]= -level;
4718  }
4719  max |=level;
4720  }else{
4721  block[j]=0;
4722  }
4723  }
4724  *overflow= s->max_qcoeff < max; //overflow might have happened
4725 
4726  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4727  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4729  scantable, last_non_zero);
4730 
4731  return last_non_zero;
4732 }
4733 
4734 #define OFFSET(x) offsetof(MpegEncContext, x)
4735 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4736 static const AVOption h263_options[] = {
4737  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4738  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4740  { NULL },
4741 };
4742 
4743 static const AVClass h263_class = {
4744  .class_name = "H.263 encoder",
4745  .item_name = av_default_item_name,
4746  .option = h263_options,
4747  .version = LIBAVUTIL_VERSION_INT,
4748 };
4749 
4751  .name = "h263",
4752  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4753  .type = AVMEDIA_TYPE_VIDEO,
4754  .id = AV_CODEC_ID_H263,
4755  .priv_data_size = sizeof(MpegEncContext),
4757  .encode2 = ff_mpv_encode_picture,
4758  .close = ff_mpv_encode_end,
4760  .priv_class = &h263_class,
4761 };
4762 
4763 static const AVOption h263p_options[] = {
4764  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4765  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4766  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4767  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4769  { NULL },
4770 };
4771 static const AVClass h263p_class = {
4772  .class_name = "H.263p encoder",
4773  .item_name = av_default_item_name,
4774  .option = h263p_options,
4775  .version = LIBAVUTIL_VERSION_INT,
4776 };
4777 
4779  .name = "h263p",
4780  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4781  .type = AVMEDIA_TYPE_VIDEO,
4782  .id = AV_CODEC_ID_H263P,
4783  .priv_data_size = sizeof(MpegEncContext),
4785  .encode2 = ff_mpv_encode_picture,
4786  .close = ff_mpv_encode_end,
4787  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4789  .priv_class = &h263p_class,
4790 };
4791 
4792 static const AVClass msmpeg4v2_class = {
4793  .class_name = "msmpeg4v2 encoder",
4794  .item_name = av_default_item_name,
4795  .option = ff_mpv_generic_options,
4796  .version = LIBAVUTIL_VERSION_INT,
4797 };
4798 
4800  .name = "msmpeg4v2",
4801  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4802  .type = AVMEDIA_TYPE_VIDEO,
4803  .id = AV_CODEC_ID_MSMPEG4V2,
4804  .priv_data_size = sizeof(MpegEncContext),
4806  .encode2 = ff_mpv_encode_picture,
4807  .close = ff_mpv_encode_end,
4809  .priv_class = &msmpeg4v2_class,
4810 };
4811 
4812 static const AVClass msmpeg4v3_class = {
4813  .class_name = "msmpeg4v3 encoder",
4814  .item_name = av_default_item_name,
4815  .option = ff_mpv_generic_options,
4816  .version = LIBAVUTIL_VERSION_INT,
4817 };
4818 
4820  .name = "msmpeg4",
4821  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4822  .type = AVMEDIA_TYPE_VIDEO,
4823  .id = AV_CODEC_ID_MSMPEG4V3,
4824  .priv_data_size = sizeof(MpegEncContext),
4826  .encode2 = ff_mpv_encode_picture,
4827  .close = ff_mpv_encode_end,
4829  .priv_class = &msmpeg4v3_class,
4830 };
4831 
4832 static const AVClass wmv1_class = {
4833  .class_name = "wmv1 encoder",
4834  .item_name = av_default_item_name,
4835  .option = ff_mpv_generic_options,
4836  .version = LIBAVUTIL_VERSION_INT,
4837 };
4838 
4840  .name = "wmv1",
4841  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4842  .type = AVMEDIA_TYPE_VIDEO,
4843  .id = AV_CODEC_ID_WMV1,
4844  .priv_data_size = sizeof(MpegEncContext),
4846  .encode2 = ff_mpv_encode_picture,
4847  .close = ff_mpv_encode_end,
4849  .priv_class = &wmv1_class,
4850 };
int last_time_base
Definition: mpegvideo.h:388
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:54
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1598
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1035
static const AVClass wmv1_class
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:108
int chroma_elim_threshold
Definition: mpegvideo.h:117
#define INPLACE_OFFSET
Definition: mpegutils.h:121
void ff_jpeg_fdct_islow_10(int16_t *data)
static const AVOption h263_options[]
int frame_bits
bits used for the current frame
Definition: mpegvideo.h:338
IDCTDSPContext idsp
Definition: mpegvideo.h:230
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define NULL
Definition: coverity.c:32
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideo.h:341
const struct AVCodec * codec
Definition: avcodec.h:535
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:672
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:589
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2279
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1490
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
int picture_number
Definition: mpegvideo.h:127
#define RECON_SHIFT
me_cmp_func frame_skip_cmp[6]
Definition: me_cmp.h:76
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:107
rate control context.
Definition: ratecontrol.h:63
static int shift(int a, int b)
Definition: sonic.c:82
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
#define CONFIG_WMV2_ENCODER
Definition: config.h:1341
int size
void ff_mpeg1_encode_init(MpegEncContext *s)
Definition: mpeg12enc.c:1037
int esc3_level_length
Definition: mpegvideo.h:440
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
int time_increment_bits
Definition: mpegvideo.h:387
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:103
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
Definition: mpegvideo.h:248
#define FF_CMP_DCTMAX
Definition: avcodec.h:944
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:1016
AVOption.
Definition: opt.h:246
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:728
uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideo.h:279
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:153
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:885
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:188
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegpicture.h:74
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1713
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:278
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
int pre_pass
= 1 for the pre pass
Definition: motion_est.h:72
#define CONFIG_RV10_ENCODER
Definition: config.h:1324
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:304
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:588
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:531
AVFrame * tmp_frames[MAX_B_FRAMES+2]
Definition: mpegvideo.h:571
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
RateControlEntry * entry
Definition: ratecontrol.h:65
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:75
void(* shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height)
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:114
int64_t bit_rate
the average bitrate
Definition: avcodec.h:576
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
else temp
Definition: vf_mcdeint.c:256
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:491
const char * g
Definition: vf_curves.c:115
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:761
const char * desc
Definition: nvenc.c:79
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:154
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
#define OFFSET(x)
uint16_t * mb_var
Table for MB variances.
Definition: mpegpicture.h:65
uint16_t(* q_chroma_intra_matrix16)[2][64]
Definition: mpegvideo.h:328
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:301
static int estimate_qp(MpegEncContext *s, int dry_run)
#define MAX_MV
Definition: motion_est.h:35
int acc
Definition: yuv2rgb.c:555
int max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: avcodec.h:454
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:786
int16_t(*[3] ac_val)[16]
used for MPEG-4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:194
MJPEG encoder.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:132
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:422
attribute_deprecated int frame_skip_cmp
Definition: avcodec.h:1471
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:616
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
#define me
int frame_skip_cmp
Definition: mpegvideo.h:579
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:438
int b_frame_strategy
Definition: mpegvideo.h:572
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:112
int num
Numerator.
Definition: rational.h:59
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
int size
Definition: packet.h:356
enum AVCodecID codec_id
Definition: mpegvideo.h:112
const char * b
Definition: vf_curves.c:116
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:857
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
#define CONFIG_MJPEG_ENCODER
Definition: config.h:1302
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:64
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:366
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: bitstream.c:48
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:115
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:905
int frame_skip_exp
Definition: mpegvideo.h:578
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:40
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
Definition: mpegvideo.h:254
static int select_input_picture(MpegEncContext *s)
static const AVClass msmpeg4v3_class
int min_qcoeff
minimum encodable coefficient
Definition: mpegvideo.h:308
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
int out_size
Definition: movenc.c:55
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:930
int coded_score[12]
Definition: mpegvideo.h:320
mpegvideo header.
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:238
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:71
int scene_change_score
Definition: motion_est.h:87
int mpv_flags
flags set by private options
Definition: mpegvideo.h:541
uint8_t permutated[64]
Definition: idctdsp.h:33
static const AVClass h263_class
uint8_t run
Definition: svq3.c:209
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1761
uint8_t * intra_ac_vlc_length
Definition: mpegvideo.h:311
#define EDGE_TOP
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:411
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:366
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:318
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:133
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
QpelDSPContext qdsp
Definition: mpegvideo.h:235
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:93
int stride
Definition: mace.c:144
AVCodec.
Definition: codec.h:190
#define MAX_FCODE
Definition: mpegutils.h:48
static void write_mb_info(MpegEncContext *s)
int time_base
time in seconds of last I,P,S Frame
Definition: mpegvideo.h:389
uint8_t(* mv_penalty)[MAX_DMV *2+1]
bit amount needed to encode a MV
Definition: motion_est.h:93
int qscale
QP.
Definition: mpegvideo.h:204
int h263_aic
Advanced INTRA Coding (AIC)
Definition: mpegvideo.h:87
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
Definition: mpegvideo.h:250
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:307
int min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: avcodec.h:463
int chroma_x_shift
Definition: mpegvideo.h:486
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:114
int field_select[2][2]
Definition: mpegvideo.h:277
attribute_deprecated int frame_skip_exp
Definition: avcodec.h:1467
#define CONFIG_RV20_ENCODER
Definition: config.h:1325
int quant_precision
Definition: mpegvideo.h:400
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1916
common functions for use with the Xvid wrappers
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:649
int modified_quant
Definition: mpegvideo.h:379
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:591
int skipdct
skip dct and code zero residual
Definition: mpegvideo.h:220
int b_frame_score
Definition: mpegpicture.h:84
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: avcodec.h:490
static int16_t block[64]
Definition: dct.c:115
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
attribute_deprecated int mv_bits
Definition: avcodec.h:1523
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:105
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:866
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:128
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:378
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
Definition: mpeg12enc.c:412
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
uint8_t * ptr_lastgob
Definition: mpegvideo.h:503
int64_t time
time of current frame
Definition: mpegvideo.h:390
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
static int encode_picture(MpegEncContext *s, int picture_number)
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:584
static const AVClass msmpeg4v2_class
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4) ...
Definition: mpegvideo.h:264
ScratchpadContext sc
Definition: mpegvideo.h:202
uint8_t
#define av_cold
Definition: attributes.h:88
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
Picture ** input_picture
next pictures on display order for encoding
Definition: mpegvideo.h:137
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:106
AVOptions.
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:409
enum OutputFormat out_format
output format
Definition: mpegvideo.h:104
attribute_deprecated int i_count
Definition: avcodec.h:1531
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:115
#define CONFIG_FAANDCT
Definition: config.h:627
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:334
int noise_reduction
Definition: mpegvideo.h:582
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:213
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
uint16_t * chroma_intra_matrix
custom intra quantization matrix
Definition: avcodec.h:2172
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void(* diff_pixels)(int16_t *av_restrict block, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride)
Definition: pixblockdsp.h:35
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:224
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Multithreading support functions.
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:33
int pre_dia_size
ME prepass diamond size & shape.
Definition: avcodec.h:981
AVCodec ff_h263_encoder
int frame_skip_threshold
Definition: mpegvideo.h:576
static const AVOption h263p_options[]
static int get_sae(uint8_t *src, int ref, int stride)
#define FF_CMP_VSSE
Definition: avcodec.h:940
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:325
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:465
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
#define emms_c()
Definition: internal.h:55
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:352
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:284
H.263 tables.
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:116
int interlaced_dct
Definition: mpegvideo.h:491
int(* q_chroma_intra_matrix)[64]
Definition: mpegvideo.h:324
int me_cmp
motion estimation comparison function
Definition: avcodec.h:912
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:71
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:180
#define CHROMA_420
Definition: mpegvideo.h:483
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:451
int intra_dc_precision
Definition: mpegvideo.h:464
int repeat_first_field
Definition: mpegvideo.h:480
static AVFrame * frame
quarterpel DSP functions
const char data[16]
Definition: mxf.c:91
#define CONFIG_MPEG1VIDEO_ENCODER
Definition: config.h:1303
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define height
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:251
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
uint8_t * data
Definition: packet.h:355
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define ff_dlog(a,...)
#define AVERROR_EOF
End of file.
Definition: error.h:55
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:392
me_cmp_func nsse[6]
Definition: me_cmp.h:65
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
const uint8_t * scantable
Definition: idctdsp.h:32
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:329
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:129
#define max(a, b)
Definition: cuda_runtime.h:33
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:79
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: avcodec.h:481
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:845
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1549
int max_qcoeff
maximum encodable coefficient
Definition: mpegvideo.h:309
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
static void update_noise_reduction(MpegEncContext *s)
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:590
int scenechange_threshold
Definition: mpegvideo.h:581
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:2032
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
Definition: mpeg12enc.c:1028
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:435
#define MAX_LEVEL
Definition: rl.h:36
attribute_deprecated int frame_skip_threshold
Definition: avcodec.h:1459
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:53
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:210
int flipflop_rounding
Definition: mpegvideo.h:437
#define CHROMA_444
Definition: mpegvideo.h:485
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:64
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
Definition: mpegvideo.h:451
uint8_t * mb_info_ptr
Definition: mpegvideo.h:369
#define av_log(a,...)
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:740
#define ff_sqrt
Definition: mathops.h:206
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2343
#define ROUNDED_DIV(a, b)
Definition: common.h:56
int(* q_inter_matrix)[64]
Definition: mpegvideo.h:325
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:1597
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:388
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:755
attribute_deprecated int skip_count
Definition: avcodec.h:1535
#define EDGE_WIDTH
Definition: mpegpicture.h:33
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
Definition: mpegvideo.h:323
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:102
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
#define src
Definition: vp8dsp.c:254
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:187
enum AVCodecID id
Definition: codec.h:204
int h263_plus
H.263+ headers.
Definition: mpegvideo.h:109
H263DSPContext h263dsp
Definition: mpegvideo.h:237
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:156
#define MAX_DMV
Definition: motion_est.h:37
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideo.h:215
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
int width
Definition: frame.h:358
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:324
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:185
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
uint8_t * inter_ac_vlc_last_length
Definition: mpegvideo.h:316
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:279
#define MAX_MB_BYTES
Definition: mpegutils.h:47
int64_t total_bits
Definition: mpegvideo.h:337
#define PTRDIFF_SPECIFIER
Definition: internal.h:263
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:195
#define ARCH_X86
Definition: config.h:38
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int chroma_y_shift
Definition: mpegvideo.h:487
int strict_std_compliance
strictly follow the std (MPEG-4, ...)
Definition: mpegvideo.h:118
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:405
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegpicture.h:37
#define AVERROR(e)
Definition: error.h:43
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
Definition: mpegpicture.h:90
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:472
int me_sub_cmp
subpixel motion estimation comparison function
Definition: avcodec.h:918
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:2031
int qmax
maximum quantizer
Definition: avcodec.h:1379
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
static void update_mb_info(MpegEncContext *s, int startcode)
#define MERGE(field)
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:223
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:188
ERContext er
Definition: mpegvideo.h:566
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1808
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:219
static int sse_mb(MpegEncContext *s)
int reference
Definition: mpegpicture.h:87
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
PixblockDSPContext pdsp
Definition: mpegvideo.h:234
const char * arg
Definition: jacosubdec.c:66
uint8_t * intra_chroma_ac_vlc_length
Definition: mpegvideo.h:313
int h263_slice_structured
Definition: mpegvideo.h:377
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:914
uint8_t * buf
Definition: put_bits.h:38
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:60
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
MpegvideoEncDSPContext mpvencdsp
Definition: mpegvideo.h:233
const char * name
Name of the codec implementation.
Definition: codec.h:197
uint8_t bits
Definition: vp3data.h:202
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:401
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:291
int me_pre
prepass for motion estimation
Definition: mpegvideo.h:260
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:555
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:406
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:257
static const uint8_t offset[127][2]
Definition: vf_spp.c:93
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1138
#define FFMAX(a, b)
Definition: common.h:94
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
#define fail()
Definition: checkasm.h:123
int64_t mb_var_sum_temp
Definition: motion_est.h:86
int(* pix_norm1)(uint8_t *pix, int line_size)
int(* pix_sum)(uint8_t *pix, int line_size)
attribute_deprecated int b_sensitivity
Definition: avcodec.h:1132
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:361
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:67
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
static void frame_end(MpegEncContext *s)
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:356
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1393
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2...
Definition: ituh263enc.c:266
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:126
int * lambda_table
Definition: mpegvideo.h:208
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:1052
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1415
common internal API header
uint8_t * intra_ac_vlc_last_length
Definition: mpegvideo.h:312
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:77
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:135
#define CHROMA_422
Definition: mpegvideo.h:484
float border_masking
Definition: mpegvideo.h:553
int progressive_frame
Definition: mpegvideo.h:489
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:262
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:275
#define FFMIN(a, b)
Definition: common.h:96
int display_picture_number
picture number in display order
Definition: frame.h:418
uint16_t(* q_inter_matrix16)[2][64]
Definition: mpegvideo.h:329
uint8_t * vbv_delay_ptr
pointer to vbv_delay in the bitstream
Definition: mpegvideo.h:453
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideo.h:113
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:338
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
int umvplus
== H.263+ && unrestricted_mv
Definition: mpegvideo.h:375
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:174
#define width
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:521
int intra_quant_bias
bias for the quantizer
Definition: mpegvideo.h:306
int width
picture width / height.
Definition: avcodec.h:699
uint8_t w
Definition: llviddspenc.c:38
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:184
Picture.
Definition: mpegpicture.h:45
attribute_deprecated int noise_reduction
Definition: avcodec.h:1044
int alternate_scan
Definition: mpegvideo.h:471
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow. ...
Definition: avcodec.h:1422
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:868
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:296
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
Definition: mpegvideo.h:327
attribute_deprecated int frame_skip_factor
Definition: avcodec.h:1463
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:1015
perm
Definition: f_perms.c:74
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1801
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:535
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:423
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
Definition: mpegvideo.h:443
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:533
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:86
MotionEstContext me
Definition: mpegvideo.h:282
int frame_skip_factor
Definition: mpegvideo.h:577
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:348
int mb_decision
macroblock decision mode
Definition: avcodec.h:1014
#define CONFIG_FLV_ENCODER
Definition: config.h:1291
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:198
#define MAX_B_FRAMES
Definition: mpegvideo.h:64
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:527
int ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:121
int ac_esc_length
num of bits needed to encode the longest esc
Definition: mpegvideo.h:310
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:358
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:1789
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:293
Compute and use optimal Huffman tables.
Definition: mjpegenc.h:97
#define av_log2
Definition: intmath.h:83
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
Definition: mpegvideo.h:297
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:83
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:110
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:373
AVCodec ff_h263p_encoder
attribute_deprecated int i_tex_bits
Definition: avcodec.h:1527
static void build_basis(uint8_t *perm)
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
int first_slice_line
used in MPEG-4 too to handle resync markers
Definition: mpegvideo.h:436
int frame_pred_frame_dct
Definition: mpegvideo.h:465
attribute_deprecated int misc_bits
Definition: avcodec.h:1537
This structure describes the bitrate properties of an encoded bitstream.
Definition: avcodec.h:448
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegpicture.h:68
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
int coded_picture_number
picture number in bitstream order
Definition: frame.h:414
#define src1
Definition: h264pred.c:139
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
uint16_t inter_matrix[64]
Definition: mpegvideo.h:302
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
void ff_jpeg_fdct_islow_8(int16_t *data)
int64_t last_non_b_time
Definition: mpegvideo.h:391
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:237
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:74
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:155
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:207
void ff_faandct(int16_t *data)
Definition: faandct.c:114
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
Libavcodec external API header.
attribute_deprecated int mpeg_quant
Definition: avcodec.h:821
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:42
int h263_flv
use flv H.263 header
Definition: mpegvideo.h:110
attribute_deprecated int scenechange_threshold
Definition: avcodec.h:1040
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:172
static const AVClass h263p_class
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:134
enum AVCodecID codec_id
Definition: avcodec.h:536
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:73
attribute_deprecated int prediction_method
Definition: avcodec.h:885
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:90
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
attribute_deprecated int b_frame_strategy
Definition: avcodec.h:800
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:474
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t * intra_chroma_ac_vlc_last_length
Definition: mpegvideo.h:314
main external API structure.
Definition: avcodec.h:526
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:236
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:605
long long int64_t
Definition: coverity.c:34
ScanTable intra_scantable
Definition: mpegvideo.h:91
void(* add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
int qmin
minimum quantizer
Definition: avcodec.h:1372
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:100
#define FF_CMP_NSSE
Definition: avcodec.h:941
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
static void write_slice_end(MpegEncContext *s)
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideo.h:144
int64_t user_specified_pts
last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
Definition: mpegvideo.h:140
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:525
FDCTDSPContext fdsp
Definition: mpegvideo.h:227
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:392
uint8_t * buf_end
Definition: put_bits.h:38
static int frame_start(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:859
int luma_elim_threshold
Definition: mpegvideo.h:116
attribute_deprecated int header_bits
Definition: avcodec.h:1525
Picture * picture
main picture buffer
Definition: mpegvideo.h:136
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:404
uint8_t * inter_ac_vlc_length
Definition: mpegvideo.h:315
int progressive_sequence
Definition: mpegvideo.h:456
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions...
Definition: avcodec.h:1026
H.261 codec.
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:240
uint8_t * buf_ptr
Definition: put_bits.h:38
Describe the class of an AVClass context structure.
Definition: log.h:67
int stuffing_bits
bits used for stuffing
Definition: mpegvideo.h:339
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:82
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
Definition: mpegvideo.h:255
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:1596
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegpicture.h:82
int index
Definition: gxfenc.c:89
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:109
int(* try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
struct AVFrame * f
Definition: mpegpicture.h:46
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1017
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:529
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:298
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
int input_picture_number
used to set pic->display_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:125
cl_device_type type
AVCodec ff_wmv1_encoder
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:135
int mb_info
interval for outputting info about mb offsets as side data
Definition: mpegvideo.h:367
void ff_set_mpeg4_time(MpegEncContext *s)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
attribute_deprecated int brd_scale
Definition: avcodec.h:1099
#define STRIDE_ALIGN
Definition: internal.h:108
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:126
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:574
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:117
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:337
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:537
int f_code
forward MV resolution
Definition: mpegvideo.h:238
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1061
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:119
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:117
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
void(* denoise_dct)(struct MpegEncContext *s, int16_t *block)
Definition: mpegvideo.h:539
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
attribute_deprecated int p_tex_bits
Definition: avcodec.h:1529
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1560
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions...
Definition: avcodec.h:1035
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:115
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:212
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
int last_mv_dir
last mv_dir, used for B-frame encoding
Definition: mpegvideo.h:452
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:325
int h263_pred
use MPEG-4/H.263 ac/dc predictions
Definition: mpegvideo.h:105
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:252
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:873
static int64_t pts
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:852
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
Definition: pixblockdsp.h:29
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:256
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
Definition: mpegvideo.h:253
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:287
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:189
uint8_t level
Definition: svq3.c:210
me_cmp_func sad[6]
Definition: me_cmp.h:56
int me_penalty_compensation
Definition: mpegvideo.h:259
int64_t mc_mb_var_sum_temp
Definition: motion_est.h:85
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
Definition: mpegvideo.h:249
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:131
me_cmp_func sse[6]
Definition: me_cmp.h:57
static int estimate_motion_thread(AVCodecContext *c, void *arg)
int vbv_ignore_qmax
Definition: mpegvideo.h:555
#define BASIS_SHIFT
MpegEncContext.
Definition: mpegvideo.h:81
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:183
int8_t * qscale_table
Definition: mpegpicture.h:50
#define MAX_RUN
Definition: rl.h:35
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:721
PutBitContext pb
bit output
Definition: mpegvideo.h:151
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:299
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
#define CONFIG_MPEG4_ENCODER
Definition: config.h:1305
#define CONFIG_MPEG2VIDEO_ENCODER
Definition: config.h:1304
int
static void update_qscale(MpegEncContext *s)
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:924
int quantizer_noise_shaping
Definition: mpegvideo.h:542
int(* dct_error_sum)[64]
Definition: mpegvideo.h:332
int(* fast_dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:538
MECmpContext mecc
Definition: mpegvideo.h:231
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:284
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:130
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
if(ret< 0)
Definition: vf_mcdeint.c:279
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:110
attribute_deprecated int rtp_payload_size
Definition: avcodec.h:1512
uint8_t * dest[3]
Definition: mpegvideo.h:295
int shared
Definition: mpegpicture.h:88
static double c[64]
int last_pict_type
Definition: mpegvideo.h:214
#define CONFIG_H261_ENCODER
Definition: config.h:1293
#define COPY(a)
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:447
int adaptive_quant
use adaptive quantization
Definition: mpegvideo.h:209
static int16_t basis[64][64]
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:162
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:182
Bi-dir predicted.
Definition: avutil.h:276
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there&#39;s a delay
Definition: mpegvideo.h:148
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:1780
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:681
#define H263_GOB_HEIGHT(h)
Definition: h263.h:42
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
int den
Denominator.
Definition: rational.h:60
#define CONFIG_H263_ENCODER
Definition: config.h:1294
#define CONFIG_H263P_ENCODER
Definition: config.h:1295
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
Definition: mpegvideo.h:190
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
AVCodec ff_msmpeg4v3_encoder
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:466
int trellis
trellis RD quantization
Definition: avcodec.h:1479
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:2046
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:300
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg12enc.c:426
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:104
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:509
int slices
Number of slices.
Definition: avcodec.h:1177
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
void * priv_data
Definition: avcodec.h:553
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:85
#define PICT_FRAME
Definition: mpegutils.h:39
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:353
void ff_mpeg4_init_partitions(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:890
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int picture_structure
Definition: mpegvideo.h:461
int dia_size
ME diamond size & shape.
Definition: avcodec.h:954
#define av_free(p)
attribute_deprecated int frame_bits
Definition: avcodec.h:1541
VideoDSPContext vdsp
Definition: mpegvideo.h:236
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
#define VE
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
attribute_deprecated int me_penalty_compensation
Definition: avcodec.h:1087
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2260
int avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: avcodec.h:472
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1598
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:357
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:561
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:508
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:283
int64_t bit_rate
wanted bit rate
Definition: mpegvideo.h:103
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:408
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:168
attribute_deprecated int p_count
Definition: avcodec.h:1533
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
attribute_deprecated void(* rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb)
Definition: avcodec.h:1506
static void set_frame_distances(MpegEncContext *s)
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
#define EDGE_BOTTOM
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1699
Picture ** reordered_input_picture
pointer to the next pictures in coded order for encoding
Definition: mpegvideo.h:138
static const struct twinvq_data tab
unsigned int byte_buffer_size
Definition: internal.h:150
atomic_int error_count
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:354
static int encode_thread(AVCodecContext *c, void *arg)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:644
int height
Definition: frame.h:358
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1650
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:131
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
#define av_freep(p)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:90
#define av_always_inline
Definition: attributes.h:45
#define M_PI
Definition: mathematics.h:52
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
int rtp_payload_size
Definition: mpegvideo.h:498
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:343
Floating point AAN DCT
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:82
int inter_quant_bias
bias for the quantizer
Definition: mpegvideo.h:307
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:111
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:332
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:239
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:376
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1829
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:365
int dct_count[2]
Definition: mpegvideo.h:333
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegpicture.h:81
static int encode_frame(AVCodecContext *c, AVFrame *frame)
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:523
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: packet.h:332
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:149
int delay
Codec delay.
Definition: avcodec.h:682
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1593
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
int ff_check_alignment(void)
Definition: me_cmp.c:1014
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:149
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:1850
me_cmp_func ildct_cmp[6]
Definition: me_cmp.h:75
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Predicted.
Definition: avutil.h:275
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:206
AVCodec ff_msmpeg4v2_encoder
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1408
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:393
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
enum idct_permutation_type perm_type
Definition: idctdsp.h:97
attribute_deprecated int pre_me
Definition: avcodec.h:966
HpelDSPContext hdsp
Definition: mpegvideo.h:229
static const uint8_t sp5x_quant_table[20][64]
Definition: sp5x.h:135
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideo.h:340