1
mirror of https://git.videolan.org/git/ffmpeg.git synced 2024-08-31 05:26:37 +02:00

avcodec/libdav1d: properly free all output picture references

Dav1dPictures contain more than one buffer reference, so we're forced to use the
API properly to free them all.

Signed-off-by: James Almer <jamrial@gmail.com>
This commit is contained in:
James Almer 2019-01-23 17:39:20 -03:00
parent 90adbf4abf
commit ca44fa5d7f

View File

@ -74,11 +74,10 @@ static void libdav1d_data_free(const uint8_t *data, void *opaque) {
} }
static void libdav1d_frame_free(void *opaque, uint8_t *data) { static void libdav1d_frame_free(void *opaque, uint8_t *data) {
Dav1dPicture p = { 0 }; Dav1dPicture *p = opaque;
p.ref = opaque; dav1d_picture_unref(p);
p.data[0] = (void *) 0x1; // this has to be non-NULL av_free(p);
dav1d_picture_unref(&p);
} }
static const enum AVPixelFormat pix_fmt[][3] = { static const enum AVPixelFormat pix_fmt[][3] = {
@ -92,7 +91,7 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
{ {
Libdav1dContext *dav1d = c->priv_data; Libdav1dContext *dav1d = c->priv_data;
Dav1dData *data = &dav1d->data; Dav1dData *data = &dav1d->data;
Dav1dPicture p = { 0 }; Dav1dPicture *p;
int res; int res;
if (!data->sz) { if (!data->sz) {
@ -124,43 +123,49 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
return res; return res;
} }
res = dav1d_get_picture(dav1d->c, &p); p = av_mallocz(sizeof(*p));
if (!p)
return AVERROR(ENOMEM);
res = dav1d_get_picture(dav1d->c, p);
if (res < 0) { if (res < 0) {
if (res == -EINVAL) if (res == -EINVAL)
res = AVERROR_INVALIDDATA; res = AVERROR_INVALIDDATA;
else if (res == -EAGAIN && c->internal->draining) else if (res == -EAGAIN && c->internal->draining)
res = AVERROR_EOF; res = AVERROR_EOF;
av_free(p);
return res; return res;
} }
av_assert0(p.data[0] != NULL); av_assert0(p->data[0] != NULL);
frame->buf[0] = av_buffer_create(NULL, 0, libdav1d_frame_free, frame->buf[0] = av_buffer_create(NULL, 0, libdav1d_frame_free,
p.ref, AV_BUFFER_FLAG_READONLY); p, AV_BUFFER_FLAG_READONLY);
if (!frame->buf[0]) { if (!frame->buf[0]) {
dav1d_picture_unref(&p); dav1d_picture_unref(p);
av_free(p);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
frame->data[0] = p.data[0]; frame->data[0] = p->data[0];
frame->data[1] = p.data[1]; frame->data[1] = p->data[1];
frame->data[2] = p.data[2]; frame->data[2] = p->data[2];
frame->linesize[0] = p.stride[0]; frame->linesize[0] = p->stride[0];
frame->linesize[1] = p.stride[1]; frame->linesize[1] = p->stride[1];
frame->linesize[2] = p.stride[1]; frame->linesize[2] = p->stride[1];
c->profile = p.seq_hdr->profile; c->profile = p->seq_hdr->profile;
frame->format = c->pix_fmt = pix_fmt[p.p.layout][p.seq_hdr->hbd]; frame->format = c->pix_fmt = pix_fmt[p->p.layout][p->seq_hdr->hbd];
frame->width = p.p.w; frame->width = p->p.w;
frame->height = p.p.h; frame->height = p->p.h;
if (c->width != p.p.w || c->height != p.p.h) { if (c->width != p->p.w || c->height != p->p.h) {
res = ff_set_dimensions(c, p.p.w, p.p.h); res = ff_set_dimensions(c, p->p.w, p->p.h);
if (res < 0) if (res < 0)
return res; return res;
} }
switch (p.seq_hdr->chr) { switch (p->seq_hdr->chr) {
case DAV1D_CHR_VERTICAL: case DAV1D_CHR_VERTICAL:
frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_LEFT; frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_LEFT;
break; break;
@ -168,22 +173,22 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
break; break;
} }
frame->colorspace = c->colorspace = (enum AVColorSpace) p.seq_hdr->mtrx; frame->colorspace = c->colorspace = (enum AVColorSpace) p->seq_hdr->mtrx;
frame->color_primaries = c->color_primaries = (enum AVColorPrimaries) p.seq_hdr->pri; frame->color_primaries = c->color_primaries = (enum AVColorPrimaries) p->seq_hdr->pri;
frame->color_trc = c->color_trc = (enum AVColorTransferCharacteristic) p.seq_hdr->trc; frame->color_trc = c->color_trc = (enum AVColorTransferCharacteristic) p->seq_hdr->trc;
frame->color_range = c->color_range = p.seq_hdr->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; frame->color_range = c->color_range = p->seq_hdr->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
// match timestamps and packet size // match timestamps and packet size
frame->pts = p.m.timestamp; frame->pts = p->m.timestamp;
#if FF_API_PKT_PTS #if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS FF_DISABLE_DEPRECATION_WARNINGS
frame->pkt_pts = p.m.timestamp; frame->pkt_pts = p->m.timestamp;
FF_ENABLE_DEPRECATION_WARNINGS FF_ENABLE_DEPRECATION_WARNINGS
#endif #endif
frame->pkt_dts = p.m.timestamp; frame->pkt_dts = p->m.timestamp;
frame->key_frame = p.frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY; frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
switch (p.frame_hdr->frame_type) { switch (p->frame_hdr->frame_type) {
case DAV1D_FRAME_TYPE_KEY: case DAV1D_FRAME_TYPE_KEY:
case DAV1D_FRAME_TYPE_INTRA: case DAV1D_FRAME_TYPE_INTRA:
frame->pict_type = AV_PICTURE_TYPE_I; frame->pict_type = AV_PICTURE_TYPE_I;