Merge remote-tracking branch 'cigaes/master'

* cigaes/master:
  lavf/lavd: version bump and APIchanges for uncoded frames.
  tools: add uncoded_frame test program.
  lavf: add uncodedframecrc test muxer.
  lavd/xv: preliminary support of uncoded frame.
  lavd/alsa: preliminary support of uncoded frame.
  lavf: add write_uncoded_frame() API.

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2014-02-12 00:37:51 +01:00
commit 058a3d6542
13 changed files with 640 additions and 13 deletions

View File

@ -31,7 +31,7 @@ OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
TOOLS = qt-faststart trasher
TOOLS = qt-faststart trasher uncoded_frame
TOOLS-$(CONFIG_ZLIB) += cws2fws
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
@ -61,6 +61,8 @@ $(TOOLS): %$(EXESUF): %.o $(EXEOBJS)
$(LD) $(LDFLAGS) $(LD_O) $^ $(ELIBS)
tools/cws2fws$(EXESUF): ELIBS = $(ZLIB)
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
tools/uncoded_frame$(EXESUF): ELIBS = $(FF_EXTRALIBS)
config.h: .config
.config: $(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c))

View File

@ -15,6 +15,9 @@ libavutil: 2012-10-22
API changes, most recent first:
2014-02-11 - 1b05ac2 - lavf 55.32.100 - avformat.h
Add av_write_uncoded_frame() and av_interleaved_write_uncoded_frame().
2014-02-04 - 3adb5f8 / d9ae103 - lavf 55.30.100 / 55.11.0 - avformat.h
Add AVFormatContext.max_interleave_delta for controlling amount of buffering
when interleaving.

View File

@ -113,6 +113,24 @@ static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
return 0;
}
static int audio_write_frame(AVFormatContext *s1, int stream_index,
AVFrame **frame, unsigned flags)
{
AlsaData *s = s1->priv_data;
AVPacket pkt;
/* ff_alsa_open() should have accepted only supported formats */
if ((flags & AV_WRITE_UNCODED_FRAME_QUERY))
return av_sample_fmt_is_planar(s1->streams[stream_index]->codec->sample_fmt) ?
AVERROR(EINVAL) : 0;
/* set only used fields */
pkt.data = (*frame)->data[0];
pkt.size = (*frame)->nb_samples * s->frame_size;
pkt.dts = (*frame)->pkt_dts;
pkt.duration = av_frame_get_pkt_duration(*frame);
return audio_write_packet(s1, &pkt);
}
static void
audio_get_output_timestamp(AVFormatContext *s1, int stream,
int64_t *dts, int64_t *wall)
@ -133,6 +151,7 @@ AVOutputFormat ff_alsa_muxer = {
.write_header = audio_write_header,
.write_packet = audio_write_packet,
.write_trailer = ff_alsa_close,
.write_uncoded_frame = audio_write_frame,
.get_output_timestamp = audio_get_output_timestamp,
.flags = AVFMT_NOFILE,
};

View File

@ -28,7 +28,7 @@
#include "libavutil/version.h"
#define LIBAVDEVICE_VERSION_MAJOR 55
#define LIBAVDEVICE_VERSION_MINOR 8
#define LIBAVDEVICE_VERSION_MINOR 9
#define LIBAVDEVICE_VERSION_MICRO 100
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \

View File

@ -36,6 +36,7 @@
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavformat/internal.h"
#include "avdevice.h"
typedef struct {
@ -197,23 +198,19 @@ static int xv_write_header(AVFormatContext *s)
return ret;
}
static int xv_write_packet(AVFormatContext *s, AVPacket *pkt)
static int write_picture(AVFormatContext *s, AVPicture *pict)
{
XVContext *xv = s->priv_data;
XvImage *img = xv->yuv_image;
XWindowAttributes window_attrs;
AVPicture pict;
AVCodecContext *ctx = s->streams[0]->codec;
uint8_t *data[3] = {
img->data + img->offsets[0],
img->data + img->offsets[1],
img->data + img->offsets[2]
};
avpicture_fill(&pict, pkt->data, ctx->pix_fmt, ctx->width, ctx->height);
av_image_copy(data, img->pitches, (const uint8_t **)pict.data, pict.linesize,
av_image_copy(data, img->pitches, (const uint8_t **)pict->data, pict->linesize,
xv->image_format, img->width, img->height);
XGetWindowAttributes(xv->display, xv->window, &window_attrs);
if (XvShmPutImage(xv->display, xv->xv_port, xv->window, xv->gc,
xv->yuv_image, 0, 0, xv->image_width, xv->image_height, 0, 0,
@ -224,6 +221,24 @@ static int xv_write_packet(AVFormatContext *s, AVPacket *pkt)
return 0;
}
static int xv_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVPicture pict;
AVCodecContext *ctx = s->streams[0]->codec;
avpicture_fill(&pict, pkt->data, ctx->pix_fmt, ctx->width, ctx->height);
return write_picture(s, &pict);
}
static int xv_write_frame(AVFormatContext *s, int stream_index, AVFrame **frame,
unsigned flags)
{
/* xv_write_header() should have accepted only supported formats */
if ((flags & AV_WRITE_UNCODED_FRAME_QUERY))
return 0;
return write_picture(s, (AVPicture *)*frame);
}
#define OFFSET(x) offsetof(XVContext, x)
static const AVOption options[] = {
{ "display_name", "set display name", OFFSET(display_name), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
@ -250,6 +265,7 @@ AVOutputFormat ff_xv_muxer = {
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = xv_write_header,
.write_packet = xv_write_packet,
.write_uncoded_frame = xv_write_frame,
.write_trailer = xv_write_trailer,
.flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
.priv_class = &xv_class,

View File

@ -388,6 +388,7 @@ OBJS-$(CONFIG_TRUEHD_MUXER) += rawenc.o
OBJS-$(CONFIG_TTA_DEMUXER) += tta.o apetag.o img2.o
OBJS-$(CONFIG_TTY_DEMUXER) += tty.o sauce.o
OBJS-$(CONFIG_TXD_DEMUXER) += txd.o
OBJS-$(CONFIG_UNCODEDFRAMECRC_MUXER) += uncodedframecrcenc.o framehash.o
OBJS-$(CONFIG_VC1_DEMUXER) += rawdec.o
OBJS-$(CONFIG_VC1_MUXER) += rawenc.o
OBJS-$(CONFIG_VC1T_DEMUXER) += vc1test.o

View File

@ -284,6 +284,7 @@ void av_register_all(void)
REGISTER_DEMUXER (TTA, tta);
REGISTER_DEMUXER (TXD, txd);
REGISTER_DEMUXER (TTY, tty);
REGISTER_MUXER (UNCODEDFRAMECRC, uncodedframecrc);
REGISTER_MUXDEMUX(VC1, vc1);
REGISTER_MUXDEMUX(VC1T, vc1t);
REGISTER_DEMUXER (VIVO, vivo);

View File

@ -512,6 +512,17 @@ typedef struct AVOutputFormat {
*/
int (*control_message)(struct AVFormatContext *s, int type,
void *data, size_t data_size);
/**
* Write an uncoded AVFrame.
*
* See av_write_uncoded_frame() for details.
*
* The library will free *frame afterwards, but the muxer can prevent it
* by setting the pointer to NULL.
*/
int (*write_uncoded_frame)(struct AVFormatContext *, int stream_index,
AVFrame **frame, unsigned flags);
} AVOutputFormat;
/**
* @}
@ -2092,6 +2103,44 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt);
*/
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt);
/**
* Write a uncoded frame to an output media file.
*
* The frame must be correctly interleaved according to the container
* specification; if not, then av_interleaved_write_frame() must be used.
*
* See av_interleaved_write_frame() for details.
*/
int av_write_uncoded_frame(AVFormatContext *s, int stream_index,
AVFrame *frame);
/**
* Write a uncoded frame to an output media file.
*
* If the muxer supports it, this function allows to write an AVFrame
* structure directly, without encoding it into a packet.
* It is mostly useful for devices and similar special muxers that use raw
* video or PCM data and will not serialize it into a byte stream.
*
* To test whether it is possible to use it with a given muxer and stream,
* use av_write_uncoded_frame_query().
*
* The caller gives up ownership of the frame and must not access it
* afterwards.
*
* @return >=0 for success, a negative code on error
*/
int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index,
AVFrame *frame);
/**
* Test whether a muxer supports uncoded frame.
*
* @return >=0 if an uncoded frame can be written to that muxer and stream,
* <0 if not
*/
int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index);
/**
* Write the stream trailer to an output media file and free the
* file private data.

View File

@ -398,4 +398,18 @@ int ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t dts);
void ff_rfps_calculate(AVFormatContext *ic);
/**
* Flags for AVFormatContext.write_uncoded_frame()
*/
enum AVWriteUncodedFrameFlags {
/**
* Query whether the feature is possible on this stream.
* The frame argument is ignored.
*/
AV_WRITE_UNCODED_FRAME_QUERY = 0x0001,
};
#endif /* AVFORMAT_INTERNAL_H */

View File

@ -417,6 +417,15 @@ int avformat_write_header(AVFormatContext *s, AVDictionary **options)
return 0;
}
#define AV_PKT_FLAG_UNCODED_FRAME 0x2000
/* Note: using sizeof(AVFrame) from outside lavu is unsafe in general, but
it is only being used internally to this file as a consistency check.
The value is chosen to be very unlikely to appear on its own and to cause
immediate failure if used anywhere as a real size. */
#define UNCODED_FRAME_PACKET_SIZE (INT_MIN / 3 * 2 + (int)sizeof(AVFrame))
//FIXME merge with compute_pkt_fields
static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{
@ -482,7 +491,9 @@ static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt)
/* update pts */
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 1);
frame_size = (pkt->flags & AV_PKT_FLAG_UNCODED_FRAME) ?
((AVFrame *)pkt->data)->nb_samples :
ff_get_audio_frame_size(st->codec, pkt->size, 1);
/* HACK/FIXME, we skip the initial 0 size packets as they are most
* likely equal to the encoder delay, but it would be better if we
@ -549,7 +560,14 @@ static int write_packet(AVFormatContext *s, AVPacket *pkt)
}
did_split = av_packet_split_side_data(pkt);
ret = s->oformat->write_packet(s, pkt);
if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) {
AVFrame *frame = (AVFrame *)pkt->data;
av_assert0(pkt->size == UNCODED_FRAME_PACKET_SIZE);
ret = s->oformat->write_uncoded_frame(s, pkt->stream_index, &frame, 0);
av_frame_free(&frame);
} else {
ret = s->oformat->write_packet(s, pkt);
}
if (s->flush_packets && s->pb && ret >= 0 && s->flags & AVFMT_FLAG_FLUSH_PACKETS)
avio_flush(s->pb);
@ -632,8 +650,13 @@ FF_DISABLE_DEPRECATION_WARNINGS
FF_ENABLE_DEPRECATION_WARNINGS
#endif
pkt->buf = NULL;
av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-allocated memory
av_copy_packet_side_data(&this_pktl->pkt, &this_pktl->pkt); // copy side data
if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) {
av_assert0(pkt->size == UNCODED_FRAME_PACKET_SIZE);
av_assert0(((AVFrame *)pkt->data)->buf);
} else {
av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-allocated memory
av_copy_packet_side_data(&this_pktl->pkt, &this_pktl->pkt); // copy side data
}
if (s->streams[pkt->stream_index]->last_in_packet_buffer) {
next_point = &(st->last_in_packet_buffer->next);
@ -932,3 +955,51 @@ int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
dst->streams[dst_stream]->time_base);
return av_write_frame(dst, &local_pkt);
}
static int av_write_uncoded_frame_internal(AVFormatContext *s, int stream_index,
AVFrame *frame, int interleaved)
{
AVPacket pkt, *pktp;
av_assert0(s->oformat);
if (!s->oformat->write_uncoded_frame)
return AVERROR(ENOSYS);
if (!frame) {
pktp = NULL;
} else {
pktp = &pkt;
av_init_packet(&pkt);
pkt.data = (void *)frame;
pkt.size = UNCODED_FRAME_PACKET_SIZE;
pkt.pts =
pkt.dts = frame->pts;
pkt.duration = av_frame_get_pkt_duration(frame);
pkt.stream_index = stream_index;
pkt.flags |= AV_PKT_FLAG_UNCODED_FRAME;
}
return interleaved ? av_interleaved_write_frame(s, pktp) :
av_write_frame(s, pktp);
}
int av_write_uncoded_frame(AVFormatContext *s, int stream_index,
AVFrame *frame)
{
return av_write_uncoded_frame_internal(s, stream_index, frame, 0);
}
int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index,
AVFrame *frame)
{
return av_write_uncoded_frame_internal(s, stream_index, frame, 1);
}
int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index)
{
av_assert0(s->oformat);
if (!s->oformat->write_uncoded_frame)
return AVERROR(ENOSYS);
return s->oformat->write_uncoded_frame(s, stream_index, NULL,
AV_WRITE_UNCODED_FRAME_QUERY);
}

View File

@ -0,0 +1,172 @@
/*
* Copyright (c) 2013 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/adler32.h"
#include "libavutil/avassert.h"
#include "libavutil/bprint.h"
#include "libavutil/imgutils.h"
#include "libavutil/pixdesc.h"
#include "avformat.h"
#include "internal.h"
/* Identical to Adler32 when the type is uint8_t. */
#define DEFINE_CKSUM_LINE(name, type, conv) \
static void cksum_line_ ## name(unsigned *cksum, void *data, unsigned size) \
{ \
type *p = data; \
unsigned a = *cksum & 0xFFFF, b = *cksum >> 16; \
for (; size > 0; size--, p++) { \
a = (a + (unsigned)(conv)) % 65521; \
b = (b + a) % 65521; \
} \
*cksum = a | (b << 16); \
}
DEFINE_CKSUM_LINE(u8, uint8_t, *p)
DEFINE_CKSUM_LINE(s16, int16_t, *p + 0x8000)
DEFINE_CKSUM_LINE(s32, int32_t, *p + 0x80000000)
DEFINE_CKSUM_LINE(flt, float, *p * 0x80000000 + 0x80000000)
DEFINE_CKSUM_LINE(dbl, double, *p * 0x80000000 + 0x80000000)
static void video_frame_cksum(AVBPrint *bp, AVFrame *frame)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
int i, y;
uint8_t *data;
int linesize[5] = { 0 };
av_bprintf(bp, ", %d x %d", frame->width, frame->height);
if (!desc) {
av_bprintf(bp, ", unknown");
return;
}
if (av_image_fill_linesizes(linesize, frame->format, frame->width) < 0)
return;
av_bprintf(bp, ", %s", desc->name);
for (i = 0; linesize[i]; i++) {
unsigned cksum = 0;
int h = frame->height;
if ((i == 1 || i == 2) && desc->nb_components >= 3)
h = -((-h) >> desc->log2_chroma_h);
data = frame->data[i];
for (y = 0; y < h; y++) {
cksum = av_adler32_update(cksum, data, linesize[i]);
data += frame->linesize[i];
}
av_bprintf(bp, ", 0x%08x", cksum);
}
}
static void audio_frame_cksum(AVBPrint *bp, AVFrame *frame)
{
int nb_planes, nb_samples, p;
const char *name;
nb_planes = av_frame_get_channels(frame);
nb_samples = frame->nb_samples;
if (!av_sample_fmt_is_planar(frame->format)) {
nb_samples *= nb_planes;
nb_planes = 1;
}
name = av_get_sample_fmt_name(frame->format);
av_bprintf(bp, ", %d samples", frame->nb_samples);
av_bprintf(bp, ", %s", name ? name : "unknown");
for (p = 0; p < nb_planes; p++) {
uint32_t cksum = 0;
void *d = frame->extended_data[p];
switch (frame->format) {
case AV_SAMPLE_FMT_U8:
case AV_SAMPLE_FMT_U8P:
cksum_line_u8(&cksum, d, nb_samples);
break;
case AV_SAMPLE_FMT_S16:
case AV_SAMPLE_FMT_S16P:
cksum_line_s16(&cksum, d, nb_samples);
break;
case AV_SAMPLE_FMT_S32:
case AV_SAMPLE_FMT_S32P:
cksum_line_s32(&cksum, d, nb_samples);
break;
case AV_SAMPLE_FMT_FLT:
case AV_SAMPLE_FMT_FLTP:
cksum_line_flt(&cksum, d, nb_samples);
break;
case AV_SAMPLE_FMT_DBL:
case AV_SAMPLE_FMT_DBLP:
cksum_line_dbl(&cksum, d, nb_samples);
break;
default:
av_assert0(!"reached");
}
av_bprintf(bp, ", 0x%08x", cksum);
}
}
static int write_frame(struct AVFormatContext *s, int stream_index,
AVFrame **frame, unsigned flags)
{
AVBPrint bp;
int ret = 0;
enum AVMediaType type;
const char *type_name;
if ((flags & AV_WRITE_UNCODED_FRAME_QUERY))
return 0;
av_bprint_init(&bp, 0, AV_BPRINT_SIZE_UNLIMITED);
av_bprintf(&bp, "%d, %10"PRId64"",
stream_index, (*frame)->pts);
type = s->streams[stream_index]->codec->codec_type;
type_name = av_get_media_type_string(type);
av_bprintf(&bp, ", %s", type_name ? type_name : "unknown");
switch (type) {
case AVMEDIA_TYPE_VIDEO:
video_frame_cksum(&bp, *frame);
break;
case AVMEDIA_TYPE_AUDIO:
audio_frame_cksum(&bp, *frame);
break;
}
av_bprint_chars(&bp, '\n', 1);
if (av_bprint_is_complete(&bp))
avio_write(s->pb, bp.str, bp.len);
else
ret = AVERROR(ENOMEM);
av_bprint_finalize(&bp, NULL);
return ret;
}
static int write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
return AVERROR(ENOSYS);
}
AVOutputFormat ff_uncodedframecrc_muxer = {
.name = "uncodedframecrc",
.long_name = NULL_IF_CONFIG_SMALL("uncoded framecrc testing"),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = ff_framehash_write_header,
.write_packet = write_packet,
.write_uncoded_frame = write_frame,
.flags = AVFMT_VARIABLE_FPS | AVFMT_TS_NONSTRICT |
AVFMT_TS_NEGATIVE,
};

View File

@ -30,7 +30,7 @@
#include "libavutil/version.h"
#define LIBAVFORMAT_VERSION_MAJOR 55
#define LIBAVFORMAT_VERSION_MINOR 31
#define LIBAVFORMAT_VERSION_MINOR 32
#define LIBAVFORMAT_VERSION_MICRO 100
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \

279
tools/uncoded_frame.c Normal file
View File

@ -0,0 +1,279 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "libavutil/avassert.h"
#include "libavdevice/avdevice.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavformat/avformat.h"
typedef struct {
AVFormatContext *mux;
AVStream *stream;
AVFilterContext *sink;
AVFilterLink *link;
} Stream;
static int create_sink(Stream *st, AVFilterGraph *graph,
AVFilterContext *f, int idx)
{
enum AVMediaType type = avfilter_pad_get_type(f->output_pads, idx);
const char *sink_name;
int ret;
switch (type) {
case AVMEDIA_TYPE_VIDEO: sink_name = "buffersink"; break;
case AVMEDIA_TYPE_AUDIO: sink_name = "abuffersink"; break;
default:
av_log(NULL, AV_LOG_ERROR, "Stream type not supported\n");
return AVERROR(EINVAL);
}
ret = avfilter_graph_create_filter(&st->sink,
avfilter_get_by_name(sink_name),
NULL, NULL, NULL, graph);
if (ret < 0)
return ret;
ret = avfilter_link(f, idx, st->sink, 0);
if (ret < 0)
return ret;
st->link = st->sink->inputs[0];
return 0;
}
int main(int argc, char **argv)
{
char *in_graph_desc, **out_dev_name;
int nb_out_dev = 0, nb_streams = 0;
AVFilterGraph *in_graph = NULL;
Stream *streams = NULL, *st;
AVFrame *frame = NULL;
int i, j, run = 1, ret;
//av_log_set_level(AV_LOG_DEBUG);
if (argc < 3) {
av_log(NULL, AV_LOG_ERROR,
"Usage: %s filter_graph dev:out [dev2:out2...]\n\n"
"Examples:\n"
"%s movie=file.nut:s=v+a xv:- alsa:default\n"
"%s movie=file.nut:s=v+a uncodedframecrc:pipe:0\n",
argv[0], argv[0], argv[0]);
exit(1);
}
in_graph_desc = argv[1];
out_dev_name = argv + 2;
nb_out_dev = argc - 2;
av_register_all();
avdevice_register_all();
avfilter_register_all();
/* Create input graph */
if (!(in_graph = avfilter_graph_alloc())) {
ret = AVERROR(ENOMEM);
av_log(NULL, AV_LOG_ERROR, "Unable to alloc graph graph: %s\n",
av_err2str(ret));
goto fail;
}
ret = avfilter_graph_parse_ptr(in_graph, in_graph_desc, NULL, NULL, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Unable to parse graph: %s\n",
av_err2str(ret));
goto fail;
}
nb_streams = 0;
for (i = 0; i < in_graph->nb_filters; i++) {
AVFilterContext *f = in_graph->filters[i];
for (j = 0; j < f->nb_inputs; j++) {
if (!f->inputs[j]) {
av_log(NULL, AV_LOG_ERROR, "Graph has unconnected inputs\n");
ret = AVERROR(EINVAL);
goto fail;
}
}
for (j = 0; j < f->nb_outputs; j++)
if (!f->outputs[j])
nb_streams++;
}
if (!nb_streams) {
av_log(NULL, AV_LOG_ERROR, "Graph has no output stream\n");
ret = AVERROR(EINVAL);
goto fail;
}
if (nb_out_dev != 1 && nb_out_dev != nb_streams) {
av_log(NULL, AV_LOG_ERROR,
"Graph has %d output streams, %d devices given\n",
nb_streams, nb_out_dev);
ret = AVERROR(EINVAL);
goto fail;
}
if (!(streams = av_calloc(nb_streams, sizeof(*streams)))) {
ret = AVERROR(ENOMEM);
av_log(NULL, AV_LOG_ERROR, "Could not allocate streams\n");
}
st = streams;
for (i = 0; i < in_graph->nb_filters; i++) {
AVFilterContext *f = in_graph->filters[i];
for (j = 0; j < f->nb_outputs; j++) {
if (!f->outputs[j]) {
if ((ret = create_sink(st++, in_graph, f, j)) < 0)
goto fail;
}
}
}
av_assert0(st - streams == nb_streams);
if ((ret = avfilter_graph_config(in_graph, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to configure graph\n");
goto fail;
}
/* Create output devices */
for (i = 0; i < nb_out_dev; i++) {
char *fmt = NULL, *dev = out_dev_name[i];
st = &streams[i];
if ((dev = strchr(dev, ':'))) {
*(dev++) = 0;
fmt = out_dev_name[i];
}
ret = avformat_alloc_output_context2(&st->mux, NULL, fmt, dev);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to allocate output: %s\n",
av_err2str(ret));
goto fail;
}
if (!(st->mux->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open2(&st->mux->pb, st->mux->filename, AVIO_FLAG_WRITE,
NULL, NULL);
if (ret < 0) {
av_log(st->mux, AV_LOG_ERROR, "Failed to init output: %s\n",
av_err2str(ret));
goto fail;
}
}
}
for (; i < nb_streams; i++)
streams[i].mux = streams[0].mux;
/* Create output device streams */
for (i = 0; i < nb_streams; i++) {
st = &streams[i];
if (!(st->stream = avformat_new_stream(st->mux, NULL))) {
ret = AVERROR(ENOMEM);
av_log(NULL, AV_LOG_ERROR, "Failed to create output stream\n");
goto fail;
}
st->stream->codec->codec_type = st->link->type;
st->stream->time_base = st->stream->codec->time_base =
st->link->time_base;
switch (st->link->type) {
case AVMEDIA_TYPE_VIDEO:
st->stream->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->stream->avg_frame_rate =
st->stream-> r_frame_rate = av_buffersink_get_frame_rate(st->sink);
st->stream->codec->width = st->link->w;
st->stream->codec->height = st->link->h;
st->stream->codec->sample_aspect_ratio = st->link->sample_aspect_ratio;
st->stream->codec->pix_fmt = st->link->format;
break;
case AVMEDIA_TYPE_AUDIO:
st->stream->codec->codec_id =
av_get_pcm_codec(st->stream->codec->sample_fmt, -1);
st->stream->codec->channel_layout = st->link->channel_layout;
st->stream->codec->channels = avfilter_link_get_channels(st->link);
st->stream->codec->sample_rate = st->link->sample_rate;
st->stream->codec->sample_fmt = st->link->format;
break;
default:
av_assert0(!"reached");
}
}
/* Init output devices */
for (i = 0; i < nb_out_dev; i++) {
st = &streams[i];
if ((ret = avformat_write_header(st->mux, NULL)) < 0) {
av_log(st->mux, AV_LOG_ERROR, "Failed to init output: %s\n",
av_err2str(ret));
goto fail;
}
}
/* Check output devices */
for (i = 0; i < nb_streams; i++) {
st = &streams[i];
ret = av_write_uncoded_frame_query(st->mux, st->stream->index);
if (ret < 0) {
av_log(st->mux, AV_LOG_ERROR,
"Uncoded frames not supported on stream #%d: %s\n",
i, av_err2str(ret));
goto fail;
}
}
while (run) {
ret = avfilter_graph_request_oldest(in_graph);
if (ret < 0) {
if (ret == AVERROR_EOF) {
run = 0;
} else {
av_log(NULL, AV_LOG_ERROR, "Error filtering: %s\n",
av_err2str(ret));
break;
}
}
for (i = 0; i < nb_streams; i++) {
st = &streams[i];
while (1) {
if (!frame && !(frame = av_frame_alloc())) {
ret = AVERROR(ENOMEM);
av_log(NULL, AV_LOG_ERROR, "Could not allocate frame\n");
goto fail;
}
ret = av_buffersink_get_frame_flags(st->sink, frame,
AV_BUFFERSINK_FLAG_NO_REQUEST);
if (ret < 0) {
if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
av_log(NULL, AV_LOG_WARNING, "Error in sink: %s\n",
av_err2str(ret));
break;
}
if (frame->pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pts,
st->link ->time_base,
st->stream->time_base);
ret = av_interleaved_write_uncoded_frame(st->mux,
st->stream->index,
frame);
if (ret < 0) {
av_log(st->stream->codec, AV_LOG_ERROR,
"Error writing frame: %s\n", av_err2str(ret));
goto fail;
}
frame = NULL;
}
}
}
ret = 0;
for (i = 0; i < nb_out_dev; i++) {
st = &streams[i];
av_write_trailer(st->mux);
}
fail:
av_frame_free(&frame);
avfilter_graph_free(&in_graph);
if (streams) {
for (i = 0; i < nb_out_dev; i++) {
st = &streams[i];
if (st->mux) {
if (st->mux->pb)
avio_close(st->mux->pb);
avformat_free_context(st->mux);
}
}
}
av_freep(&streams);
return ret < 0;
}