FFmpeg  4.3.9
vdpau_vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 HW decode acceleration through VDPAU
3  *
4  * Copyright (c) 2019 Manoj Gupta Bonda
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software Foundation,
20  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <vdpau/vdpau.h>
24 #include "libavutil/pixdesc.h"
25 #include "avcodec.h"
26 #include "internal.h"
27 #include "vp9data.h"
28 #include "vp9dec.h"
29 #include "hwconfig.h"
30 #include "vdpau.h"
31 #include "vdpau_internal.h"
32 
34  const uint8_t *buffer, uint32_t size)
35 {
36  VP9Context *s = avctx->priv_data;
37  VP9SharedContext *h = &(s->s);
38  VP9Frame pic = h->frames[CUR_FRAME];
39  struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
40  int i;
41 
42  VdpPictureInfoVP9 *info = &pic_ctx->info.vp9;
43  const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
44  if (!pixdesc) {
45  return AV_PIX_FMT_NONE;
46  }
47 
48  info->width = avctx->width;
49  info->height = avctx->height;
50  /* fill LvPictureInfoVP9 struct */
51  info->lastReference = VDP_INVALID_HANDLE;
52  info->goldenReference = VDP_INVALID_HANDLE;
53  info->altReference = VDP_INVALID_HANDLE;
54 
55  if (h->refs[h->h.refidx[0]].f && h->refs[h->h.refidx[0]].f->private_ref) {
56  info->lastReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[0]].f);
57  }
58  if (h->refs[h->h.refidx[1]].f && h->refs[h->h.refidx[1]].f->private_ref) {
59  info->goldenReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[1]].f);
60  }
61  if (h->refs[h->h.refidx[2]].f && h->refs[h->h.refidx[2]].f->private_ref) {
62  info->altReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[2]].f);
63  }
64 
65  info->profile = h->h.profile;
66  info->frameContextIdx = h->h.framectxid;
67  info->keyFrame = h->h.keyframe;
68  info->showFrame = !h->h.invisible;
69  info->errorResilient = h->h.errorres;
70  info->frameParallelDecoding = h->h.parallelmode;
71 
72  info->subSamplingX = pixdesc->log2_chroma_w;
73  info->subSamplingY = pixdesc->log2_chroma_h;
74 
75  info->intraOnly = h->h.intraonly;
76  info->allowHighPrecisionMv = h->h.keyframe ? 0 : h->h.highprecisionmvs;
77  info->refreshEntropyProbs = h->h.refreshctx;
78 
79  info->bitDepthMinus8Luma = pixdesc->comp[0].depth - 8;
80  info->bitDepthMinus8Chroma = pixdesc->comp[1].depth - 8;
81 
82  info->loopFilterLevel = h->h.filter.level;
83  info->loopFilterSharpness = h->h.filter.sharpness;
84  info->modeRefLfEnabled = h->h.lf_delta.enabled;
85 
86  info->log2TileColumns = h->h.tiling.log2_tile_cols;
87  info->log2TileRows = h->h.tiling.log2_tile_rows;
88 
89  info->segmentEnabled = h->h.segmentation.enabled;
90  info->segmentMapUpdate = h->h.segmentation.update_map;
91  info->segmentMapTemporalUpdate = h->h.segmentation.temporal;
92  info->segmentFeatureMode = h->h.segmentation.absolute_vals;
93 
94  info->qpYAc = h->h.yac_qi;
95  info->qpYDc = h->h.ydc_qdelta;
96  info->qpChDc = h->h.uvdc_qdelta;
97  info->qpChAc = h->h.uvac_qdelta;
98 
99  info->resetFrameContext = h->h.resetctx;
100  info->mcompFilterType = h->h.filtermode ^ (h->h.filtermode <= 1);
101  info->uncompressedHeaderSize = h->h.uncompressed_header_size;
102  info->compressedHeaderSize = h->h.compressed_header_size;
103  info->refFrameSignBias[0] = 0;
104 
105 
106  for (i = 0; i < FF_ARRAY_ELEMS(info->mbModeLfDelta); i++)
107  info->mbModeLfDelta[i] = h->h.lf_delta.mode[i];
108 
109  for (i = 0; i < FF_ARRAY_ELEMS(info->mbRefLfDelta); i++)
110  info->mbRefLfDelta[i] = h->h.lf_delta.ref[i];
111 
112  for (i = 0; i < FF_ARRAY_ELEMS(info->mbSegmentTreeProbs); i++)
113  info->mbSegmentTreeProbs[i] = h->h.segmentation.prob[i];
114 
115  for (i = 0; i < FF_ARRAY_ELEMS(info->activeRefIdx); i++) {
116  info->activeRefIdx[i] = h->h.refidx[i];
117  info->segmentPredProbs[i] = h->h.segmentation.pred_prob[i];
118  info->refFrameSignBias[i + 1] = h->h.signbias[i];
119  }
120 
121  for (i = 0; i < FF_ARRAY_ELEMS(info->segmentFeatureEnable); i++) {
122  info->segmentFeatureEnable[i][0] = h->h.segmentation.feat[i].q_enabled;
123  info->segmentFeatureEnable[i][1] = h->h.segmentation.feat[i].lf_enabled;
124  info->segmentFeatureEnable[i][2] = h->h.segmentation.feat[i].ref_enabled;
125  info->segmentFeatureEnable[i][3] = h->h.segmentation.feat[i].skip_enabled;
126 
127  info->segmentFeatureData[i][0] = h->h.segmentation.feat[i].q_val;
128  info->segmentFeatureData[i][1] = h->h.segmentation.feat[i].lf_val;
129  info->segmentFeatureData[i][2] = h->h.segmentation.feat[i].ref_val;
130  info->segmentFeatureData[i][3] = 0;
131  }
132 
133  switch (avctx->colorspace) {
134  default:
136  info->colorSpace = 0;
137  break;
138  case AVCOL_SPC_BT470BG:
139  info->colorSpace = 1;
140  break;
141  case AVCOL_SPC_BT709:
142  info->colorSpace = 2;
143  break;
144  case AVCOL_SPC_SMPTE170M:
145  info->colorSpace = 3;
146  break;
147  case AVCOL_SPC_SMPTE240M:
148  info->colorSpace = 4;
149  break;
151  info->colorSpace = 5;
152  break;
153  case AVCOL_SPC_RESERVED:
154  info->colorSpace = 6;
155  break;
156  case AVCOL_SPC_RGB:
157  info->colorSpace = 7;
158  break;
159  }
160 
161  return ff_vdpau_common_start_frame(pic_ctx, buffer, size);
162 
163 }
164 
165 static const uint8_t start_code_prefix[3] = { 0x00, 0x00, 0x01 };
166 
168  const uint8_t *buffer, uint32_t size)
169 {
170  VP9SharedContext *h = avctx->priv_data;
171  VP9Frame pic = h->frames[CUR_FRAME];
172  struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
173 
174  int val;
175 
176  val = ff_vdpau_add_buffer(pic_ctx, start_code_prefix, 3);
177  if (val)
178  return val;
179 
180  val = ff_vdpau_add_buffer(pic_ctx, buffer, size);
181  if (val)
182  return val;
183 
184  return 0;
185 }
186 
188 {
189  VP9SharedContext *h = avctx->priv_data;
190  VP9Frame pic = h->frames[CUR_FRAME];
191  struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
192 
193  int val;
194 
195  val = ff_vdpau_common_end_frame(avctx, pic.tf.f, pic_ctx);
196  if (val < 0)
197  return val;
198 
199  return 0;
200 }
201 
202 static int vdpau_vp9_init(AVCodecContext *avctx)
203 {
204  VdpDecoderProfile profile;
205  uint32_t level = avctx->level;
206 
207  switch (avctx->profile) {
208  case FF_PROFILE_VP9_0:
209  profile = VDP_DECODER_PROFILE_VP9_PROFILE_0;
210  break;
211  case FF_PROFILE_VP9_1:
212  profile = VDP_DECODER_PROFILE_VP9_PROFILE_1;
213  break;
214  case FF_PROFILE_VP9_2:
215  profile = VDP_DECODER_PROFILE_VP9_PROFILE_2;
216  break;
217  case FF_PROFILE_VP9_3:
218  profile = VDP_DECODER_PROFILE_VP9_PROFILE_3;
219  break;
220  default:
221  return AVERROR(ENOTSUP);
222  }
223 
224  return ff_vdpau_common_init(avctx, profile, level);
225 }
226 
228  .name = "vp9_vdpau",
229  .type = AVMEDIA_TYPE_VIDEO,
230  .id = AV_CODEC_ID_VP9,
231  .pix_fmt = AV_PIX_FMT_VDPAU,
232  .start_frame = vdpau_vp9_start_frame,
233  .end_frame = vdpau_vp9_end_frame,
234  .decode_slice = vdpau_vp9_decode_slice,
235  .frame_priv_data_size = sizeof(struct vdpau_picture_context),
236  .init = vdpau_vp9_init,
237  .uninit = ff_vdpau_common_uninit,
238  .frame_params = ff_vdpau_common_frame_params,
239  .priv_data_size = sizeof(VDPAUContext),
240  .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
241 };
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:511
ThreadFrame tf
Definition: vp9shared.h:60
uint8_t parallelmode
Definition: vp9shared.h:108
int size
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
uint8_t update_map
Definition: vp9shared.h:133
AVFrame * f
Definition: thread.h:35
int ff_vdpau_common_start_frame(struct vdpau_picture_context *pic_ctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: vdpau.c:322
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:515
VP9BitstreamHeader h
Definition: vp9shared.h:160
struct VP9BitstreamHeader::@185::@187 feat[MAX_SEGMENT]
uint8_t prob[7]
Definition: vp9shared.h:134
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:516
uint8_t framectxid
Definition: vp9shared.h:109
Public libavcodec VDPAU header.
struct VP9BitstreamHeader::@186 tiling
int profile
profile
Definition: avcodec.h:1863
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:510
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
functionally identical to above
Definition: pixfmt.h:517
unsigned log2_tile_rows
Definition: vp9shared.h:151
int uncompressed_header_size
Definition: vp9shared.h:155
#define FF_PROFILE_VP9_0
Definition: avcodec.h:1946
enum FilterMode filtermode
Definition: vp9shared.h:105
static char buffer[20]
Definition: seek.c:32
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
int ff_vdpau_common_uninit(AVCodecContext *avctx)
Definition: vdpau.c:284
uint8_t
uint8_t absolute_vals
Definition: vp9shared.h:132
VP9Frame frames[3]
Definition: vp9shared.h:166
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:679
unsigned log2_tile_cols
Definition: vp9shared.h:151
uint8_t refidx[3]
Definition: vp9shared.h:111
int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, int level)
Definition: vdpau.c:133
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define AVERROR(e)
Definition: error.h:43
uint8_t signbias[3]
Definition: vp9shared.h:112
#define FF_PROFILE_VP9_3
Definition: avcodec.h:1949
uint8_t refreshctx
Definition: vp9shared.h:107
void * hwaccel_picture_private
Definition: vp9shared.h:67
#define FF_PROFILE_VP9_2
Definition: avcodec.h:1948
#define HWACCEL_CAP_ASYNC_SAFE
Definition: hwconfig.h:26
static const uint8_t start_code_prefix[3]
Definition: vdpau_vp9.c:165
const AVHWAccel ff_vp9_vdpau_hwaccel
Definition: vdpau_vp9.c:227
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2420
struct VP9BitstreamHeader::@185 segmentation
uint8_t keyframe
Definition: vp9shared.h:98
int width
picture width / height.
Definition: avcodec.h:699
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:520
#define s(width, name)
Definition: cbs_vp9.c:257
VP9SharedContext s
Definition: vp9dec.h:94
int level
level
Definition: avcodec.h:1986
int ff_vdpau_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Definition: vdpau.c:114
static int vdpau_vp9_init(AVCodecContext *avctx)
Definition: vdpau_vp9.c:202
#define FF_ARRAY_ELEMS(a)
#define FF_PROFILE_VP9_1
Definition: avcodec.h:1947
Libavcodec external API header.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
main external API structure.
Definition: avcodec.h:526
static int vdpau_vp9_end_frame(AVCodecContext *avctx)
Definition: vdpau_vp9.c:187
int8_t mode[2]
Definition: vp9shared.h:122
#define CUR_FRAME
Definition: vp9shared.h:163
static int vdpau_vp9_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: vdpau_vp9.c:33
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:197
mfxU16 profile
Definition: qsvenc.c:45
uint8_t level
Definition: svq3.c:210
struct VP9BitstreamHeader::@183 filter
common internal api header.
int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, const uint8_t *buf, uint32_t size)
Definition: vdpau.c:377
static int vdpau_vp9_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: vdpau_vp9.c:167
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame, struct vdpau_picture_context *pic_ctx)
Definition: vdpau.c:332
void * priv_data
Definition: avcodec.h:553
union VDPAUPictureInfo info
VDPAU picture information.
struct VP9BitstreamHeader::@184 lf_delta
uint8_t invisible
Definition: vp9shared.h:99
ThreadFrame refs[8]
Definition: vp9shared.h:162
uint8_t pred_prob[3]
Definition: vp9shared.h:135
int depth
Number of bits in the component.
Definition: pixdesc.h:58
uint8_t highprecisionmvs
Definition: vp9shared.h:104
static double val(void *priv, double ch)
Definition: aeval.c:76
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:2080
for(j=16;j >0;--j)
int compressed_header_size
Definition: vp9shared.h:156
static uintptr_t ff_vdpau_get_surface_id(AVFrame *pic)
Extract VdpVideoSurface from an AVFrame.