FFmpeg  4.3.9
vp9recon.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "libavutil/avassert.h"
25 
26 #include "avcodec.h"
27 #include "internal.h"
28 #include "videodsp.h"
29 #include "vp9data.h"
30 #include "vp9dec.h"
31 
33  uint8_t *dst_edge, ptrdiff_t stride_edge,
34  uint8_t *dst_inner, ptrdiff_t stride_inner,
35  uint8_t *l, int col, int x, int w,
36  int row, int y, enum TxfmMode tx,
37  int p, int ss_h, int ss_v, int bytesperpixel)
38 {
39  VP9Context *s = td->s;
40  int have_top = row > 0 || y > 0;
41  int have_left = col > td->tile_col_start || x > 0;
42  int have_right = x < w - 1;
43  int bpp = s->s.h.bpp;
44  static const uint8_t mode_conv[10][2 /* have_left */][2 /* have_top */] = {
45  [VERT_PRED] = { { DC_127_PRED, VERT_PRED },
46  { DC_127_PRED, VERT_PRED } },
47  [HOR_PRED] = { { DC_129_PRED, DC_129_PRED },
48  { HOR_PRED, HOR_PRED } },
49  [DC_PRED] = { { DC_128_PRED, TOP_DC_PRED },
50  { LEFT_DC_PRED, DC_PRED } },
52  { DC_127_PRED, DIAG_DOWN_LEFT_PRED } },
54  { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED } },
56  { VERT_RIGHT_PRED, VERT_RIGHT_PRED } },
58  { HOR_DOWN_PRED, HOR_DOWN_PRED } },
60  { DC_127_PRED, VERT_LEFT_PRED } },
62  { HOR_UP_PRED, HOR_UP_PRED } },
63  [TM_VP8_PRED] = { { DC_129_PRED, VERT_PRED },
64  { HOR_PRED, TM_VP8_PRED } },
65  };
66  static const struct {
67  uint8_t needs_left:1;
68  uint8_t needs_top:1;
69  uint8_t needs_topleft:1;
70  uint8_t needs_topright:1;
71  uint8_t invert_left:1;
72  } edges[N_INTRA_PRED_MODES] = {
73  [VERT_PRED] = { .needs_top = 1 },
74  [HOR_PRED] = { .needs_left = 1 },
75  [DC_PRED] = { .needs_top = 1, .needs_left = 1 },
76  [DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
77  [DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
78  .needs_topleft = 1 },
79  [VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
80  .needs_topleft = 1 },
81  [HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1,
82  .needs_topleft = 1 },
83  [VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
84  [HOR_UP_PRED] = { .needs_left = 1, .invert_left = 1 },
85  [TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1,
86  .needs_topleft = 1 },
87  [LEFT_DC_PRED] = { .needs_left = 1 },
88  [TOP_DC_PRED] = { .needs_top = 1 },
89  [DC_128_PRED] = { 0 },
90  [DC_127_PRED] = { 0 },
91  [DC_129_PRED] = { 0 }
92  };
93 
94  av_assert2(mode >= 0 && mode < 10);
95  mode = mode_conv[mode][have_left][have_top];
96  if (edges[mode].needs_top) {
97  uint8_t *top, *topleft;
98  int n_px_need = 4 << tx, n_px_have = (((s->cols - col) << !ss_h) - x) * 4;
99  int n_px_need_tr = 0;
100 
101  if (tx == TX_4X4 && edges[mode].needs_topright && have_right)
102  n_px_need_tr = 4;
103 
104  // if top of sb64-row, use s->intra_pred_data[] instead of
105  // dst[-stride] for intra prediction (it contains pre- instead of
106  // post-loopfilter data)
107  if (have_top) {
108  top = !(row & 7) && !y ?
109  s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
110  y == 0 ? &dst_edge[-stride_edge] : &dst_inner[-stride_inner];
111  if (have_left)
112  topleft = !(row & 7) && !y ?
113  s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
114  y == 0 || x == 0 ? &dst_edge[-stride_edge] :
115  &dst_inner[-stride_inner];
116  }
117 
118  if (have_top &&
119  (!edges[mode].needs_topleft || (have_left && top == topleft)) &&
120  (tx != TX_4X4 || !edges[mode].needs_topright || have_right) &&
121  n_px_need + n_px_need_tr <= n_px_have) {
122  *a = top;
123  } else {
124  if (have_top) {
125  if (n_px_need <= n_px_have) {
126  memcpy(*a, top, n_px_need * bytesperpixel);
127  } else {
128 #define memset_bpp(c, i1, v, i2, num) do { \
129  if (bytesperpixel == 1) { \
130  memset(&(c)[(i1)], (v)[(i2)], (num)); \
131  } else { \
132  int n, val = AV_RN16A(&(v)[(i2) * 2]); \
133  for (n = 0; n < (num); n++) { \
134  AV_WN16A(&(c)[((i1) + n) * 2], val); \
135  } \
136  } \
137 } while (0)
138  memcpy(*a, top, n_px_have * bytesperpixel);
139  memset_bpp(*a, n_px_have, (*a), n_px_have - 1, n_px_need - n_px_have);
140  }
141  } else {
142 #define memset_val(c, val, num) do { \
143  if (bytesperpixel == 1) { \
144  memset((c), (val), (num)); \
145  } else { \
146  int n; \
147  for (n = 0; n < (num); n++) { \
148  AV_WN16A(&(c)[n * 2], (val)); \
149  } \
150  } \
151 } while (0)
152  memset_val(*a, (128 << (bpp - 8)) - 1, n_px_need);
153  }
154  if (edges[mode].needs_topleft) {
155  if (have_left && have_top) {
156 #define assign_bpp(c, i1, v, i2) do { \
157  if (bytesperpixel == 1) { \
158  (c)[(i1)] = (v)[(i2)]; \
159  } else { \
160  AV_COPY16(&(c)[(i1) * 2], &(v)[(i2) * 2]); \
161  } \
162 } while (0)
163  assign_bpp(*a, -1, topleft, -1);
164  } else {
165 #define assign_val(c, i, v) do { \
166  if (bytesperpixel == 1) { \
167  (c)[(i)] = (v); \
168  } else { \
169  AV_WN16A(&(c)[(i) * 2], (v)); \
170  } \
171 } while (0)
172  assign_val((*a), -1, (128 << (bpp - 8)) + (have_top ? +1 : -1));
173  }
174  }
175  if (tx == TX_4X4 && edges[mode].needs_topright) {
176  if (have_top && have_right &&
177  n_px_need + n_px_need_tr <= n_px_have) {
178  memcpy(&(*a)[4 * bytesperpixel], &top[4 * bytesperpixel], 4 * bytesperpixel);
179  } else {
180  memset_bpp(*a, 4, *a, 3, 4);
181  }
182  }
183  }
184  }
185  if (edges[mode].needs_left) {
186  if (have_left) {
187  int n_px_need = 4 << tx, i, n_px_have = (((s->rows - row) << !ss_v) - y) * 4;
188  uint8_t *dst = x == 0 ? dst_edge : dst_inner;
189  ptrdiff_t stride = x == 0 ? stride_edge : stride_inner;
190 
191  if (edges[mode].invert_left) {
192  if (n_px_need <= n_px_have) {
193  for (i = 0; i < n_px_need; i++)
194  assign_bpp(l, i, &dst[i * stride], -1);
195  } else {
196  for (i = 0; i < n_px_have; i++)
197  assign_bpp(l, i, &dst[i * stride], -1);
198  memset_bpp(l, n_px_have, l, n_px_have - 1, n_px_need - n_px_have);
199  }
200  } else {
201  if (n_px_need <= n_px_have) {
202  for (i = 0; i < n_px_need; i++)
203  assign_bpp(l, n_px_need - 1 - i, &dst[i * stride], -1);
204  } else {
205  for (i = 0; i < n_px_have; i++)
206  assign_bpp(l, n_px_need - 1 - i, &dst[i * stride], -1);
207  memset_bpp(l, 0, l, n_px_need - n_px_have, n_px_need - n_px_have);
208  }
209  }
210  } else {
211  memset_val(l, (128 << (bpp - 8)) + 1, 4 << tx);
212  }
213  }
214 
215  return mode;
216 }
217 
218 static av_always_inline void intra_recon(VP9TileData *td, ptrdiff_t y_off,
219  ptrdiff_t uv_off, int bytesperpixel)
220 {
221  VP9Context *s = td->s;
222  VP9Block *b = td->b;
223  int row = td->row, col = td->col;
224  int w4 = ff_vp9_bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
225  int h4 = ff_vp9_bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
226  int end_x = FFMIN(2 * (s->cols - col), w4);
227  int end_y = FFMIN(2 * (s->rows - row), h4);
228  int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless;
229  int uvstep1d = 1 << b->uvtx, p;
230  uint8_t *dst = td->dst[0], *dst_r = s->s.frames[CUR_FRAME].tf.f->data[0] + y_off;
231  LOCAL_ALIGNED_32(uint8_t, a_buf, [96]);
232  LOCAL_ALIGNED_32(uint8_t, l, [64]);
233 
234  for (n = 0, y = 0; y < end_y; y += step1d) {
235  uint8_t *ptr = dst, *ptr_r = dst_r;
236  for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d * bytesperpixel,
237  ptr_r += 4 * step1d * bytesperpixel, n += step) {
238  int mode = b->mode[b->bs > BS_8x8 && b->tx == TX_4X4 ?
239  y * 2 + x : 0];
240  uint8_t *a = &a_buf[32];
241  enum TxfmType txtp = ff_vp9_intra_txfm_type[mode];
242  int eob = b->skip ? 0 : b->tx > TX_8X8 ? AV_RN16A(&td->eob[n]) : td->eob[n];
243 
244  mode = check_intra_mode(td, mode, &a, ptr_r,
245  s->s.frames[CUR_FRAME].tf.f->linesize[0],
246  ptr, td->y_stride, l,
247  col, x, w4, row, y, b->tx, 0, 0, 0, bytesperpixel);
248  s->dsp.intra_pred[b->tx][mode](ptr, td->y_stride, l, a);
249  if (eob)
250  s->dsp.itxfm_add[tx][txtp](ptr, td->y_stride,
251  td->block + 16 * n * bytesperpixel, eob);
252  }
253  dst_r += 4 * step1d * s->s.frames[CUR_FRAME].tf.f->linesize[0];
254  dst += 4 * step1d * td->y_stride;
255  }
256 
257  // U/V
258  w4 >>= s->ss_h;
259  end_x >>= s->ss_h;
260  end_y >>= s->ss_v;
261  step = 1 << (b->uvtx * 2);
262  for (p = 0; p < 2; p++) {
263  dst = td->dst[1 + p];
264  dst_r = s->s.frames[CUR_FRAME].tf.f->data[1 + p] + uv_off;
265  for (n = 0, y = 0; y < end_y; y += uvstep1d) {
266  uint8_t *ptr = dst, *ptr_r = dst_r;
267  for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d * bytesperpixel,
268  ptr_r += 4 * uvstep1d * bytesperpixel, n += step) {
269  int mode = b->uvmode;
270  uint8_t *a = &a_buf[32];
271  int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&td->uveob[p][n]) : td->uveob[p][n];
272 
273  mode = check_intra_mode(td, mode, &a, ptr_r,
274  s->s.frames[CUR_FRAME].tf.f->linesize[1],
275  ptr, td->uv_stride, l, col, x, w4, row, y,
276  b->uvtx, p + 1, s->ss_h, s->ss_v, bytesperpixel);
277  s->dsp.intra_pred[b->uvtx][mode](ptr, td->uv_stride, l, a);
278  if (eob)
279  s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, td->uv_stride,
280  td->uvblock[p] + 16 * n * bytesperpixel, eob);
281  }
282  dst_r += 4 * uvstep1d * s->s.frames[CUR_FRAME].tf.f->linesize[1];
283  dst += 4 * uvstep1d * td->uv_stride;
284  }
285  }
286 }
287 
288 void ff_vp9_intra_recon_8bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
289 {
290  intra_recon(td, y_off, uv_off, 1);
291 }
292 
293 void ff_vp9_intra_recon_16bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
294 {
295  intra_recon(td, y_off, uv_off, 2);
296 }
297 
299  uint8_t *dst, ptrdiff_t dst_stride,
300  const uint8_t *ref, ptrdiff_t ref_stride,
301  ThreadFrame *ref_frame,
302  ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
303  int bw, int bh, int w, int h, int bytesperpixel)
304 {
305  VP9Context *s = td->s;
306  int mx = mv->x, my = mv->y, th;
307 
308  y += my >> 3;
309  x += mx >> 3;
310  ref += y * ref_stride + x * bytesperpixel;
311  mx &= 7;
312  my &= 7;
313  // FIXME bilinear filter only needs 0/1 pixels, not 3/4
314  // we use +7 because the last 7 pixels of each sbrow can be changed in
315  // the longest loopfilter of the next sbrow
316  th = (y + bh + 4 * !!my + 7) >> 6;
317  ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
318  // The arm/aarch64 _hv filters read one more row than what actually is
319  // needed, so switch to emulated edge one pixel sooner vertically
320  // (!!my * 5) than horizontally (!!mx * 4).
321  // The arm/aarch64 _h filters read one more pixel than what actually is
322  // needed, so switch to emulated edge if that would read beyond the bottom
323  // right block.
324  if (x < !!mx * 3 || y < !!my * 3 ||
325  ((ARCH_AARCH64 || ARCH_ARM) && (x + !!mx * 5 > w - bw) && (y + !!my * 5 + 1 > h - bh)) ||
326  x + !!mx * 4 > w - bw || y + !!my * 5 > h - bh) {
328  ref - !!my * 3 * ref_stride - !!mx * 3 * bytesperpixel,
329  160, ref_stride,
330  bw + !!mx * 7, bh + !!my * 7,
331  x - !!mx * 3, y - !!my * 3, w, h);
332  ref = td->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
333  ref_stride = 160;
334  }
335  mc[!!mx][!!my](dst, dst_stride, ref, ref_stride, bh, mx << 1, my << 1);
336 }
337 
339  uint8_t *dst_u, uint8_t *dst_v,
340  ptrdiff_t dst_stride,
341  const uint8_t *ref_u, ptrdiff_t src_stride_u,
342  const uint8_t *ref_v, ptrdiff_t src_stride_v,
343  ThreadFrame *ref_frame,
344  ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
345  int bw, int bh, int w, int h, int bytesperpixel)
346 {
347  VP9Context *s = td->s;
348  int mx = mv->x * (1 << !s->ss_h), my = mv->y * (1 << !s->ss_v), th;
349 
350  y += my >> 4;
351  x += mx >> 4;
352  ref_u += y * src_stride_u + x * bytesperpixel;
353  ref_v += y * src_stride_v + x * bytesperpixel;
354  mx &= 15;
355  my &= 15;
356  // FIXME bilinear filter only needs 0/1 pixels, not 3/4
357  // we use +7 because the last 7 pixels of each sbrow can be changed in
358  // the longest loopfilter of the next sbrow
359  th = (y + bh + 4 * !!my + 7) >> (6 - s->ss_v);
360  ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
361  // The arm/aarch64 _hv filters read one more row than what actually is
362  // needed, so switch to emulated edge one pixel sooner vertically
363  // (!!my * 5) than horizontally (!!mx * 4).
364  // The arm/aarch64 _h filters read one more pixel than what actually is
365  // needed, so switch to emulated edge if that would read beyond the bottom
366  // right block.
367  if (x < !!mx * 3 || y < !!my * 3 ||
368  ((ARCH_AARCH64 || ARCH_ARM) && (x + !!mx * 5 > w - bw) && (y + !!my * 5 + 1 > h - bh)) ||
369  x + !!mx * 4 > w - bw || y + !!my * 5 > h - bh) {
371  ref_u - !!my * 3 * src_stride_u - !!mx * 3 * bytesperpixel,
372  160, src_stride_u,
373  bw + !!mx * 7, bh + !!my * 7,
374  x - !!mx * 3, y - !!my * 3, w, h);
375  ref_u = td->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
376  mc[!!mx][!!my](dst_u, dst_stride, ref_u, 160, bh, mx, my);
377 
379  ref_v - !!my * 3 * src_stride_v - !!mx * 3 * bytesperpixel,
380  160, src_stride_v,
381  bw + !!mx * 7, bh + !!my * 7,
382  x - !!mx * 3, y - !!my * 3, w, h);
383  ref_v = td->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
384  mc[!!mx][!!my](dst_v, dst_stride, ref_v, 160, bh, mx, my);
385  } else {
386  mc[!!mx][!!my](dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my);
387  mc[!!mx][!!my](dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my);
388  }
389 }
390 
391 #define mc_luma_dir(td, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
392  px, py, pw, ph, bw, bh, w, h, i) \
393  mc_luma_unscaled(td, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
394  mv, bw, bh, w, h, bytesperpixel)
395 #define mc_chroma_dir(td, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
396  row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
397  mc_chroma_unscaled(td, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
398  row, col, mv, bw, bh, w, h, bytesperpixel)
399 #define SCALED 0
400 #define FN(x) x##_8bpp
401 #define BYTES_PER_PIXEL 1
402 #include "vp9_mc_template.c"
403 #undef FN
404 #undef BYTES_PER_PIXEL
405 #define FN(x) x##_16bpp
406 #define BYTES_PER_PIXEL 2
407 #include "vp9_mc_template.c"
408 #undef mc_luma_dir
409 #undef mc_chroma_dir
410 #undef FN
411 #undef BYTES_PER_PIXEL
412 #undef SCALED
413 
415  vp9_mc_func (*mc)[2],
416  uint8_t *dst, ptrdiff_t dst_stride,
417  const uint8_t *ref, ptrdiff_t ref_stride,
418  ThreadFrame *ref_frame,
419  ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
420  int px, int py, int pw, int ph,
421  int bw, int bh, int w, int h, int bytesperpixel,
422  const uint16_t *scale, const uint8_t *step)
423 {
424  VP9Context *s = td->s;
425  if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
426  s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
427  mc_luma_unscaled(td, mc, dst, dst_stride, ref, ref_stride, ref_frame,
428  y, x, in_mv, bw, bh, w, h, bytesperpixel);
429  } else {
430 #define scale_mv(n, dim) (((int64_t)(n) * scale[dim]) >> 14)
431  int mx, my;
432  int refbw_m1, refbh_m1;
433  int th;
434  VP56mv mv;
435 
436  mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 8, (s->cols * 8 - x + px + 3) * 8);
437  mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 8, (s->rows * 8 - y + py + 3) * 8);
438  // BUG libvpx seems to scale the two components separately. This introduces
439  // rounding errors but we have to reproduce them to be exactly compatible
440  // with the output from libvpx...
441  mx = scale_mv(mv.x * 2, 0) + scale_mv(x * 16, 0);
442  my = scale_mv(mv.y * 2, 1) + scale_mv(y * 16, 1);
443 
444  y = my >> 4;
445  x = mx >> 4;
446  ref += y * ref_stride + x * bytesperpixel;
447  mx &= 15;
448  my &= 15;
449  refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
450  refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
451  // FIXME bilinear filter only needs 0/1 pixels, not 3/4
452  // we use +7 because the last 7 pixels of each sbrow can be changed in
453  // the longest loopfilter of the next sbrow
454  th = (y + refbh_m1 + 4 + 7) >> 6;
455  ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
456  // The arm/aarch64 _hv filters read one more row than what actually is
457  // needed, so switch to emulated edge one pixel sooner vertically
458  // (y + 5 >= h - refbh_m1) than horizontally (x + 4 >= w - refbw_m1).
459  if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 5 >= h - refbh_m1) {
461  ref - 3 * ref_stride - 3 * bytesperpixel,
462  288, ref_stride,
463  refbw_m1 + 8, refbh_m1 + 8,
464  x - 3, y - 3, w, h);
465  ref = td->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
466  ref_stride = 288;
467  }
468  smc(dst, dst_stride, ref, ref_stride, bh, mx, my, step[0], step[1]);
469  }
470 }
471 
473  vp9_mc_func (*mc)[2],
474  uint8_t *dst_u, uint8_t *dst_v,
475  ptrdiff_t dst_stride,
476  const uint8_t *ref_u, ptrdiff_t src_stride_u,
477  const uint8_t *ref_v, ptrdiff_t src_stride_v,
478  ThreadFrame *ref_frame,
479  ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
480  int px, int py, int pw, int ph,
481  int bw, int bh, int w, int h, int bytesperpixel,
482  const uint16_t *scale, const uint8_t *step)
483 {
484  VP9Context *s = td->s;
485  if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
486  s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
487  mc_chroma_unscaled(td, mc, dst_u, dst_v, dst_stride, ref_u, src_stride_u,
488  ref_v, src_stride_v, ref_frame,
489  y, x, in_mv, bw, bh, w, h, bytesperpixel);
490  } else {
491  int mx, my;
492  int refbw_m1, refbh_m1;
493  int th;
494  VP56mv mv;
495 
496  if (s->ss_h) {
497  // BUG https://code.google.com/p/webm/issues/detail?id=820
498  mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 16, (s->cols * 4 - x + px + 3) * 16);
499  mx = scale_mv(mv.x, 0) + (scale_mv(x * 16, 0) & ~15) + (scale_mv(x * 32, 0) & 15);
500  } else {
501  mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 8, (s->cols * 8 - x + px + 3) * 8);
502  mx = scale_mv(mv.x * 2, 0) + scale_mv(x * 16, 0);
503  }
504  if (s->ss_v) {
505  // BUG https://code.google.com/p/webm/issues/detail?id=820
506  mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 16, (s->rows * 4 - y + py + 3) * 16);
507  my = scale_mv(mv.y, 1) + (scale_mv(y * 16, 1) & ~15) + (scale_mv(y * 32, 1) & 15);
508  } else {
509  mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 8, (s->rows * 8 - y + py + 3) * 8);
510  my = scale_mv(mv.y * 2, 1) + scale_mv(y * 16, 1);
511  }
512 #undef scale_mv
513  y = my >> 4;
514  x = mx >> 4;
515  ref_u += y * src_stride_u + x * bytesperpixel;
516  ref_v += y * src_stride_v + x * bytesperpixel;
517  mx &= 15;
518  my &= 15;
519  refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
520  refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
521  // FIXME bilinear filter only needs 0/1 pixels, not 3/4
522  // we use +7 because the last 7 pixels of each sbrow can be changed in
523  // the longest loopfilter of the next sbrow
524  th = (y + refbh_m1 + 4 + 7) >> (6 - s->ss_v);
525  ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
526  // The arm/aarch64 _hv filters read one more row than what actually is
527  // needed, so switch to emulated edge one pixel sooner vertically
528  // (y + 5 >= h - refbh_m1) than horizontally (x + 4 >= w - refbw_m1).
529  if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 5 >= h - refbh_m1) {
531  ref_u - 3 * src_stride_u - 3 * bytesperpixel,
532  288, src_stride_u,
533  refbw_m1 + 8, refbh_m1 + 8,
534  x - 3, y - 3, w, h);
535  ref_u = td->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
536  smc(dst_u, dst_stride, ref_u, 288, bh, mx, my, step[0], step[1]);
537 
539  ref_v - 3 * src_stride_v - 3 * bytesperpixel,
540  288, src_stride_v,
541  refbw_m1 + 8, refbh_m1 + 8,
542  x - 3, y - 3, w, h);
543  ref_v = td->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
544  smc(dst_v, dst_stride, ref_v, 288, bh, mx, my, step[0], step[1]);
545  } else {
546  smc(dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my, step[0], step[1]);
547  smc(dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my, step[0], step[1]);
548  }
549  }
550 }
551 
552 #define mc_luma_dir(td, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
553  px, py, pw, ph, bw, bh, w, h, i) \
554  mc_luma_scaled(td, s->dsp.s##mc, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
555  mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
556  s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
557 #define mc_chroma_dir(td, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
558  row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
559  mc_chroma_scaled(td, s->dsp.s##mc, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
560  row, col, mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
561  s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
562 #define SCALED 1
563 #define FN(x) x##_scaled_8bpp
564 #define BYTES_PER_PIXEL 1
565 #include "vp9_mc_template.c"
566 #undef FN
567 #undef BYTES_PER_PIXEL
568 #define FN(x) x##_scaled_16bpp
569 #define BYTES_PER_PIXEL 2
570 #include "vp9_mc_template.c"
571 #undef mc_luma_dir
572 #undef mc_chroma_dir
573 #undef FN
574 #undef BYTES_PER_PIXEL
575 #undef SCALED
576 
577 static av_always_inline void inter_recon(VP9TileData *td, int bytesperpixel)
578 {
579  VP9Context *s = td->s;
580  VP9Block *b = td->b;
581  int row = td->row, col = td->col;
582 
583  if (s->mvscale[b->ref[0]][0] == REF_INVALID_SCALE ||
584  (b->comp && s->mvscale[b->ref[1]][0] == REF_INVALID_SCALE)) {
585  if (!s->td->error_info) {
587  av_log(NULL, AV_LOG_ERROR, "Bitstream not supported, "
588  "reference frame has invalid dimensions\n");
589  }
590  return;
591  }
592 
593  if (s->mvscale[b->ref[0]][0] || (b->comp && s->mvscale[b->ref[1]][0])) {
594  if (bytesperpixel == 1) {
595  inter_pred_scaled_8bpp(td);
596  } else {
597  inter_pred_scaled_16bpp(td);
598  }
599  } else {
600  if (bytesperpixel == 1) {
601  inter_pred_8bpp(td);
602  } else {
603  inter_pred_16bpp(td);
604  }
605  }
606 
607  if (!b->skip) {
608  /* mostly copied intra_recon() */
609 
610  int w4 = ff_vp9_bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
611  int h4 = ff_vp9_bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
612  int end_x = FFMIN(2 * (s->cols - col), w4);
613  int end_y = FFMIN(2 * (s->rows - row), h4);
614  int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless;
615  int uvstep1d = 1 << b->uvtx, p;
616  uint8_t *dst = td->dst[0];
617 
618  // y itxfm add
619  for (n = 0, y = 0; y < end_y; y += step1d) {
620  uint8_t *ptr = dst;
621  for (x = 0; x < end_x; x += step1d,
622  ptr += 4 * step1d * bytesperpixel, n += step) {
623  int eob = b->tx > TX_8X8 ? AV_RN16A(&td->eob[n]) : td->eob[n];
624 
625  if (eob)
626  s->dsp.itxfm_add[tx][DCT_DCT](ptr, td->y_stride,
627  td->block + 16 * n * bytesperpixel, eob);
628  }
629  dst += 4 * td->y_stride * step1d;
630  }
631 
632  // uv itxfm add
633  end_x >>= s->ss_h;
634  end_y >>= s->ss_v;
635  step = 1 << (b->uvtx * 2);
636  for (p = 0; p < 2; p++) {
637  dst = td->dst[p + 1];
638  for (n = 0, y = 0; y < end_y; y += uvstep1d) {
639  uint8_t *ptr = dst;
640  for (x = 0; x < end_x; x += uvstep1d,
641  ptr += 4 * uvstep1d * bytesperpixel, n += step) {
642  int eob = b->uvtx > TX_8X8 ? AV_RN16A(&td->uveob[p][n]) : td->uveob[p][n];
643 
644  if (eob)
645  s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, td->uv_stride,
646  td->uvblock[p] + 16 * n * bytesperpixel, eob);
647  }
648  dst += 4 * uvstep1d * td->uv_stride;
649  }
650  }
651  }
652 }
653 
655 {
656  inter_recon(td, 1);
657 }
658 
660 {
661  inter_recon(td, 2);
662 }
#define assign_val(c, i, v)
ThreadFrame tf
Definition: vp9shared.h:60
#define NULL
Definition: coverity.c:32
Definition: vp9.h:47
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
void ff_vp9_intra_recon_16bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
Definition: vp9recon.c:293
static av_always_inline void inter_recon(VP9TileData *td, int bytesperpixel)
Definition: vp9recon.c:577
void(* vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *ref, ptrdiff_t ref_stride, int h, int mx, int my)
Definition: vp9dsp.h:32
VP9Context * s
Definition: vp9dec.h:166
AVFrame * f
Definition: thread.h:35
VP9BitstreamHeader h
Definition: vp9shared.h:160
VideoDSPContext vdsp
Definition: vp9dec.h:98
uint8_t ss_v
Definition: vp9dec.h:109
const char * b
Definition: vf_curves.c:116
void(* intra_pred[N_TXFM_SIZES][N_INTRA_PRED_MODES])(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top)
Definition: vp9dsp.h:51
static av_always_inline int check_intra_mode(VP9TileData *td, int mode, uint8_t **a, uint8_t *dst_edge, ptrdiff_t stride_edge, uint8_t *dst_inner, ptrdiff_t stride_inner, uint8_t *l, int col, int x, int w, int row, int y, enum TxfmMode tx, int p, int ss_h, int ss_v, int bytesperpixel)
Definition: vp9recon.c:32
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
unsigned cols
Definition: vp9dec.h:118
uint8_t ref[2]
Definition: vp9dec.h:82
int stride
Definition: mace.c:144
Definition: vp9.h:29
uint8_t * intra_pred_data[3]
Definition: vp9dec.h:150
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int16_t y
Definition: vp56.h:68
void ff_vp9_inter_recon_8bpp(VP9TileData *td)
Definition: vp9recon.c:654
uint8_t
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
TxfmType
Definition: vp9.h:37
VP9Frame frames[3]
Definition: vp9shared.h:166
Definition: vp9.h:46
static av_always_inline void mc_luma_unscaled(VP9TileData *td, vp9_mc_func(*mc)[2], uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *ref, ptrdiff_t ref_stride, ThreadFrame *ref_frame, ptrdiff_t y, ptrdiff_t x, const VP56mv *mv, int bw, int bh, int w, int h, int bytesperpixel)
Definition: vp9recon.c:298
uint8_t * uveob[2]
Definition: vp9dec.h:225
int col
Definition: vp9dec.h:169
VP9DSPContext dsp
Definition: vp9dec.h:97
Definition: vp9.h:38
uint16_t mvscale[3][2]
Definition: vp9dec.h:155
uint8_t mode[4]
Definition: vp9dec.h:82
#define assign_bpp(c, i1, v, i2)
#define av_log(a,...)
#define memset_val(c, val, num)
Definition: vp9.h:28
unsigned tile_col_start
Definition: vp9dec.h:173
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
int width
Definition: frame.h:358
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define td
Definition: regdef.h:70
TxfmMode
Definition: vp9.h:27
simple assert() macros that are a bit more flexible than ISO C assert().
#define memset_bpp(c, i1, v, i2, num)
#define FFMAX(a, b)
Definition: common.h:94
#define scale_mv(n, dim)
static av_always_inline void mc_luma_scaled(VP9TileData *td, vp9_scaled_mc_func smc, vp9_mc_func(*mc)[2], uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *ref, ptrdiff_t ref_stride, ThreadFrame *ref_frame, ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv, int px, int py, int pw, int ph, int bw, int bh, int w, int h, int bytesperpixel, const uint16_t *scale, const uint8_t *step)
Definition: vp9recon.c:414
#define th
Definition: regdef.h:75
VP9TileData * td
Definition: vp9dec.h:95
#define FFMIN(a, b)
Definition: common.h:96
#define ARCH_ARM
Definition: config.h:19
uint8_t w
Definition: llviddspenc.c:38
#define s(width, name)
Definition: cbs_vp9.c:257
VP9SharedContext s
Definition: vp9dec.h:94
uint8_t uvmode
Definition: vp9dec.h:82
static av_always_inline void mc_chroma_scaled(VP9TileData *td, vp9_scaled_mc_func smc, vp9_mc_func(*mc)[2], uint8_t *dst_u, uint8_t *dst_v, ptrdiff_t dst_stride, const uint8_t *ref_u, ptrdiff_t src_stride_u, const uint8_t *ref_v, ptrdiff_t src_stride_v, ThreadFrame *ref_frame, ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv, int px, int py, int pw, int ph, int bw, int bh, int w, int h, int bytesperpixel, const uint16_t *scale, const uint8_t *step)
Definition: vp9recon.c:472
int16_t * block
Definition: vp9dec.h:224
#define mc
unsigned rows
Definition: vp9dec.h:118
static const int8_t mv[256][2]
Definition: 4xm.c:77
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
ptrdiff_t uv_stride
Definition: vp9dec.h:171
#define CUR_FRAME
Definition: vp9shared.h:163
int row
Definition: vp9dec.h:169
enum TxfmType ff_vp9_intra_txfm_type[14]
Definition: vp9data.c:437
enum TxfmMode tx uvtx
Definition: vp9dec.h:86
uint8_t ss_h
Definition: vp9dec.h:109
void ff_vp9_inter_recon_16bpp(VP9TileData *td)
Definition: vp9recon.c:659
int16_t * uvblock[2]
Definition: vp9dec.h:224
enum BlockSize bs
Definition: vp9dec.h:85
const uint8_t ff_vp9_bwh_tab[2][N_BS_SIZES][2]
Definition: vp9data.c:25
Definition: vp56.h:66
uint8_t comp
Definition: vp9dec.h:82
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
Definition: vp9.h:48
unsigned eob[4][2][2][6][6][2]
Definition: vp9dec.h:201
#define LOCAL_ALIGNED_32(t, v,...)
Definition: internal.h:137
uint8_t * dst[3]
Definition: vp9dec.h:170
void(* itxfm_add[N_TXFM_SIZES+1][N_TXFM_TYPES])(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
Definition: vp9dsp.h:70
int16_t x
Definition: vp56.h:67
common internal api header.
static av_always_inline void mc_chroma_unscaled(VP9TileData *td, vp9_mc_func(*mc)[2], uint8_t *dst_u, uint8_t *dst_v, ptrdiff_t dst_stride, const uint8_t *ref_u, ptrdiff_t src_stride_u, const uint8_t *ref_v, ptrdiff_t src_stride_v, ThreadFrame *ref_frame, ptrdiff_t y, ptrdiff_t x, const VP56mv *mv, int bw, int bh, int w, int h, int bytesperpixel)
Definition: vp9recon.c:338
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
Core video DSP helper functions.
static av_always_inline void intra_recon(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off, int bytesperpixel)
Definition: vp9recon.c:218
uint8_t edge_emu_buffer[135 *144 *2]
Definition: vp9dec.h:205
void ff_vp9_intra_recon_8bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
Definition: vp9recon.c:288
void(* vp9_scaled_mc_func)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *ref, ptrdiff_t ref_stride, int h, int mx, int my, int dx, int dy)
Definition: vp9dsp.h:35
#define AV_RN16A(p)
Definition: intreadwrite.h:522
int error_info
Definition: vp9dec.h:228
#define ARCH_AARCH64
Definition: config.h:17
int height
Definition: frame.h:358
VP9Block * b
Definition: vp9dec.h:172
#define av_always_inline
Definition: attributes.h:45
#define REF_INVALID_SCALE
Definition: vp9dec.h:39
ptrdiff_t y_stride
Definition: vp9dec.h:171
uint8_t skip
Definition: vp9dec.h:82
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83