Merge remote-tracking branch 'qatar/master'

* qatar/master: (32 commits)
  doc: create separate section for audio encoders
  swscale: Remove orphaned, commented-out function declaration.
  swscale: Eliminate rgb24toyv12_c() duplication.
  Remove h263_msmpeg4 from MpegEncContext.
  APIchanges: Fill in git hash for fps_probe_size (30315a8)
  avformat: Add fpsprobesize as an AVOption.
  avoptions: Return explicitly NAN or {0,0} if the option isn't found
  rtmp: Reindent
  rtmp: Don't try to do av_malloc(0)
  tty: replace AVFormatParameters.sample_rate abuse with a private option.
  Fix end time of last chapter in compute_chapters_end
  ffmpeg: get rid of useless AVInputStream.nb_streams.
  ffmpeg: simplify managing input files and streams
  ffmpeg: purge redundant AVInputStream.index.
  lavf: deprecate AVFormatParameters.channel.
  libdc1394: add a private option for channel.
  dv1394: add a private option for channel.
  v4l2: reindent.
  v4l2: add a private option for channel.
  lavf: deprecate AVFormatParameters.standard.
  ...

Conflicts:
	doc/APIchanges
	doc/encoders.texi
	ffmpeg.c
	libavdevice/alsa-audio.h
	libavformat/version.h
	libavutil/opt.c
	libswscale/rgb2rgb.h
	libswscale/rgb2rgb_template.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2011-05-26 03:00:37 +02:00
commit 39e4206dc6
31 changed files with 455 additions and 241 deletions

View File

@ -26,6 +26,9 @@ API changes, most recent first:
2011-05-XX - XXXXXX - lavfi 2.6.0 - avcodec.h
Add avfilter_get_video_buffer_ref_from_frame() to libavfilter/avcodec.h.
2011-05-25 - 30315a8 - lavf 53.1.0 - avformat.h
Add fps_probe_size to AVFormatContext.
2011-05-18 - 64150ff - lavc 53.4.0 - AVCodecContext.request_sample_fmt
Add request_sample_fmt field to AVCodecContext.

View File

@ -477,5 +477,4 @@ Selected by Encoder (default)
@end table
@c man end ENCODERS
@c man end AUDIO ENCODERS

194
ffmpeg.c
View File

@ -114,11 +114,9 @@ static const OptionDef options[];
#define MAX_STREAMS 1024 /* arbitrary sanity check value */
static const char *last_asked_format = NULL;
static AVFormatContext *input_files[MAX_FILES];
static int64_t input_files_ts_offset[MAX_FILES];
static double *input_files_ts_scale[MAX_FILES] = {NULL};
static AVCodec **input_codecs = NULL;
static int nb_input_files = 0;
static int nb_input_codecs = 0;
static int nb_input_files_ts_scale[MAX_FILES] = {0};
@ -319,7 +317,6 @@ static int nb_output_streams_for_file[MAX_FILES] = { 0 };
typedef struct AVInputStream {
int file_index;
int index;
AVStream *st;
int discard; /* true if stream data should be discarded */
int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
@ -339,10 +336,10 @@ typedef struct AVInputStream {
} AVInputStream;
typedef struct AVInputFile {
AVFormatContext *ctx;
int eof_reached; /* true if eof reached */
int ist_index; /* index of first stream in ist_table */
int buffer_size; /* current total buffer size */
int nb_streams; /* nb streams we are aware of */
} AVInputFile;
#if HAVE_TERMIOS_H
@ -351,6 +348,11 @@ typedef struct AVInputFile {
static struct termios oldtty;
#endif
static AVInputStream *input_streams = NULL;
static int nb_input_streams = 0;
static AVInputFile *input_files = NULL;
static int nb_input_files = 0;
#if CONFIG_AVFILTER
static int configure_video_filters(AVInputStream *ist, AVOutputStream *ost)
@ -537,7 +539,7 @@ static int ffmpeg_exit(int ret)
av_free(output_streams_for_file[i]);
}
for(i=0;i<nb_input_files;i++) {
av_close_input_file(input_files[i]);
av_close_input_file(input_files[i].ctx);
av_free(input_files_ts_scale[i]);
}
@ -554,6 +556,9 @@ static int ffmpeg_exit(int ret)
av_free(stream_maps);
av_free(meta_data_maps);
av_freep(&input_streams);
av_freep(&input_files);
av_free(video_codec_name);
av_free(audio_codec_name);
av_free(subtitle_codec_name);
@ -839,7 +844,7 @@ need_realloc:
if ((ost->audio_resample && !ost->resample) || resample_changed) {
if (resample_changed) {
av_log(NULL, AV_LOG_INFO, "Input stream #%d.%d frame changed from rate:%d fmt:%s ch:%d to rate:%d fmt:%s ch:%d\n",
ist->file_index, ist->index,
ist->file_index, ist->st->index,
ost->resample_sample_rate, av_get_sample_fmt_name(ost->resample_sample_fmt), ost->resample_channels,
dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt), dec->channels);
ost->resample_sample_fmt = dec->sample_fmt;
@ -1202,7 +1207,7 @@ static void do_video_out(AVFormatContext *s,
if (resample_changed) {
av_log(NULL, AV_LOG_INFO,
"Input stream #%d.%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
ist->file_index, ist->index,
ist->file_index, ist->st->index,
ost->resample_width, ost->resample_height, avcodec_get_pix_fmt_name(ost->resample_pix_fmt),
dec->width , dec->height , avcodec_get_pix_fmt_name(dec->pix_fmt));
ost->resample_width = dec->width;
@ -1903,7 +1908,7 @@ static void print_sdp(AVFormatContext **avc, int n)
static int copy_chapters(int infile, int outfile)
{
AVFormatContext *is = input_files[infile];
AVFormatContext *is = input_files[infile].ctx;
AVFormatContext *os = output_files[outfile];
int i;
@ -1969,16 +1974,16 @@ static void parse_forced_key_frames(char *kf, AVOutputStream *ost,
*/
static int transcode(AVFormatContext **output_files,
int nb_output_files,
AVFormatContext **input_files,
AVInputFile *input_files,
int nb_input_files,
AVStreamMap *stream_maps, int nb_stream_maps)
{
int ret = 0, i, j, k, n, nb_istreams = 0, nb_ostreams = 0, step;
int ret = 0, i, j, k, n, nb_istreams = 0, nb_ostreams = 0,step;
AVFormatContext *is, *os;
AVCodecContext *codec, *icodec;
AVOutputStream *ost, **ost_table = NULL;
AVInputStream *ist, **ist_table = NULL;
AVInputFile *file_table;
AVInputStream *ist;
char error[1024];
int key;
int want_sdp = 1;
@ -1987,46 +1992,9 @@ static int transcode(AVFormatContext **output_files,
int nb_frame_threshold[AVMEDIA_TYPE_NB]={0};
int nb_streams[AVMEDIA_TYPE_NB]={0};
file_table= av_mallocz(nb_input_files * sizeof(AVInputFile));
if (!file_table)
goto fail;
/* input stream init */
j = 0;
for(i=0;i<nb_input_files;i++) {
is = input_files[i];
file_table[i].ist_index = j;
file_table[i].nb_streams = is->nb_streams;
j += is->nb_streams;
}
nb_istreams = j;
ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *));
if (!ist_table)
goto fail;
for(i=0;i<nb_istreams;i++) {
ist = av_mallocz(sizeof(AVInputStream));
if (!ist)
goto fail;
ist_table[i] = ist;
}
j = 0;
for(i=0;i<nb_input_files;i++) {
is = input_files[i];
for(k=0;k<is->nb_streams;k++) {
ist = ist_table[j++];
ist->st = is->streams[k];
ist->file_index = i;
ist->index = k;
ist->discard = 1; /* the stream is discarded by default
(changed later) */
if (rate_emu) {
ist->start = av_gettime();
}
}
}
if (rate_emu)
for (i = 0; i < nb_input_streams; i++)
input_streams[i].start = av_gettime();
/* output stream init */
nb_ostreams = 0;
@ -2052,7 +2020,7 @@ static int transcode(AVFormatContext **output_files,
int si = stream_maps[i].stream_index;
if (fi < 0 || fi > nb_input_files - 1 ||
si < 0 || si > file_table[fi].nb_streams - 1) {
si < 0 || si > input_files[fi].ctx->nb_streams - 1) {
fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si);
ret = AVERROR(EINVAL);
goto fail;
@ -2060,7 +2028,7 @@ static int transcode(AVFormatContext **output_files,
fi = stream_maps[i].sync_file_index;
si = stream_maps[i].sync_stream_index;
if (fi < 0 || fi > nb_input_files - 1 ||
si < 0 || si > file_table[fi].nb_streams - 1) {
si < 0 || si > input_files[fi].ctx->nb_streams - 1) {
fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si);
ret = AVERROR(EINVAL);
goto fail;
@ -2082,12 +2050,12 @@ static int transcode(AVFormatContext **output_files,
for(j=0; j<AVMEDIA_TYPE_NB; j++)
nb_frame_threshold[j] += step;
for(j=0; j<nb_istreams; j++) {
for(j=0; j<nb_input_streams; j++) {
int skip=0;
ist = ist_table[j];
ist = &input_streams[j];
if(opt_programid){
int pi,si;
AVFormatContext *f= input_files[ ist->file_index ];
AVFormatContext *f= input_files[ ist->file_index ].ctx;
skip=1;
for(pi=0; pi<f->nb_programs; pi++){
AVProgram *p= f->programs[pi];
@ -2115,11 +2083,11 @@ static int transcode(AVFormatContext **output_files,
ost = ost_table[n] = output_streams_for_file[k][i];
ost->st = os->streams[i];
if (nb_stream_maps > 0) {
ost->source_index = file_table[stream_maps[n].file_index].ist_index +
ost->source_index = input_files[stream_maps[n].file_index].ist_index +
stream_maps[n].stream_index;
/* Sanity check that the stream types match */
if (ist_table[ost->source_index]->st->codec->codec_type != ost->st->codec->codec_type) {
if (input_streams[ost->source_index].st->codec->codec_type != ost->st->codec->codec_type) {
int i= ost->file_index;
av_dump_format(output_files[i], i, output_files[i]->filename, 1);
fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n",
@ -2131,12 +2099,12 @@ static int transcode(AVFormatContext **output_files,
} else {
/* get corresponding input stream index : we select the first one with the right type */
found = 0;
for(j=0;j<nb_istreams;j++) {
for (j = 0; j < nb_input_streams; j++) {
int skip=0;
ist = ist_table[j];
ist = &input_streams[j];
if(opt_programid){
int pi,si;
AVFormatContext *f= input_files[ ist->file_index ];
AVFormatContext *f = input_files[ist->file_index].ctx;
skip=1;
for(pi=0; pi<f->nb_programs; pi++){
AVProgram *p= f->programs[pi];
@ -2159,8 +2127,8 @@ static int transcode(AVFormatContext **output_files,
if (!found) {
if(! opt_programid) {
/* try again and reuse existing stream */
for(j=0;j<nb_istreams;j++) {
ist = ist_table[j];
for (j = 0; j < nb_input_streams; j++) {
ist = &input_streams[j];
if ( ist->st->codec->codec_type == ost->st->codec->codec_type
&& ist->st->discard != AVDISCARD_ALL) {
ost->source_index = j;
@ -2177,10 +2145,10 @@ static int transcode(AVFormatContext **output_files,
}
}
}
ist = ist_table[ost->source_index];
ist = &input_streams[ost->source_index];
ist->discard = 0;
ost->sync_ist = (nb_stream_maps > 0) ?
ist_table[file_table[stream_maps[n].sync_file_index].ist_index +
&input_streams[input_files[stream_maps[n].sync_file_index].ist_index +
stream_maps[n].sync_stream_index] : ist;
}
}
@ -2189,7 +2157,7 @@ static int transcode(AVFormatContext **output_files,
for(i=0;i<nb_ostreams;i++) {
ost = ost_table[i];
os = output_files[ost->file_index];
ist = ist_table[ost->source_index];
ist = &input_streams[ost->source_index];
codec = ost->st->codec;
icodec = ist->st->codec;
@ -2368,7 +2336,7 @@ static int transcode(AVFormatContext **output_files,
ost = ost_table[i];
if (ost->encoding_needed) {
AVCodec *codec = i < nb_output_codecs ? output_codecs[i] : NULL;
AVCodecContext *dec = ist_table[ost->source_index]->st->codec;
AVCodecContext *dec = input_streams[ost->source_index].st->codec;
if (!codec)
codec = avcodec_find_encoder(ost->st->codec->codec_id);
if (!codec) {
@ -2397,21 +2365,21 @@ static int transcode(AVFormatContext **output_files,
}
/* open each decoder */
for(i=0;i<nb_istreams;i++) {
ist = ist_table[i];
for (i = 0; i < nb_input_streams; i++) {
ist = &input_streams[i];
if (ist->decoding_needed) {
AVCodec *codec = i < nb_input_codecs ? input_codecs[i] : NULL;
if (!codec)
codec = avcodec_find_decoder(ist->st->codec->codec_id);
if (!codec) {
snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d",
ist->st->codec->codec_id, ist->file_index, ist->index);
ist->st->codec->codec_id, ist->file_index, ist->st->index);
ret = AVERROR(EINVAL);
goto dump_format;
}
if (avcodec_open(ist->st->codec, codec) < 0) {
snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d",
ist->file_index, ist->index);
ist->file_index, ist->st->index);
ret = AVERROR(EINVAL);
goto dump_format;
}
@ -2421,9 +2389,9 @@ static int transcode(AVFormatContext **output_files,
}
/* init pts */
for(i=0;i<nb_istreams;i++) {
for (i = 0; i < nb_input_streams; i++) {
AVStream *st;
ist = ist_table[i];
ist = &input_streams[i];
st= ist->st;
ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0;
ist->next_pts = AV_NOPTS_VALUE;
@ -2452,7 +2420,7 @@ static int transcode(AVFormatContext **output_files,
METADATA_CHECK_INDEX(in_file_index, nb_input_files, "input file")
files[0] = output_files[out_file_index];
files[1] = input_files[in_file_index];
files[1] = input_files[in_file_index].ctx;
for (j = 0; j < 2; j++) {
AVMetaDataMap *map = &meta_data_maps[i][j];
@ -2483,7 +2451,7 @@ static int transcode(AVFormatContext **output_files,
if (metadata_global_autocopy) {
for (i = 0; i < nb_output_files; i++)
av_metadata_copy(&output_files[i]->metadata, input_files[0]->metadata,
av_metadata_copy(&output_files[i]->metadata, input_files[0].ctx->metadata,
AV_METADATA_DONT_OVERWRITE);
}
@ -2510,7 +2478,7 @@ static int transcode(AVFormatContext **output_files,
/* copy chapters from the first input file that has them*/
if (!nb_chapter_maps)
for (i = 0; i < nb_input_files; i++) {
if (!input_files[i]->nb_chapters)
if (!input_files[i].ctx->nb_chapters)
continue;
for (j = 0; j < nb_output_files; j++)
@ -2545,14 +2513,14 @@ static int transcode(AVFormatContext **output_files,
for(i=0;i<nb_ostreams;i++) {
ost = ost_table[i];
fprintf(stderr, " Stream #%d.%d -> #%d.%d",
ist_table[ost->source_index]->file_index,
ist_table[ost->source_index]->index,
input_streams[ost->source_index].file_index,
input_streams[ost->source_index].st->index,
ost->file_index,
ost->index);
if (ost->sync_ist != ist_table[ost->source_index])
if (ost->sync_ist != &input_streams[ost->source_index])
fprintf(stderr, " [sync #%d.%d]",
ost->sync_ist->file_index,
ost->sync_ist->index);
ost->sync_ist->st->index);
fprintf(stderr, "\n");
}
}
@ -2607,16 +2575,14 @@ static int transcode(AVFormatContext **output_files,
if (key == 'd' || key == 'D'){
int debug=0;
if(key == 'D') {
ist = ist_table[0];
debug = ist->st->codec->debug<<1;
debug = input_streams[0].st->codec->debug<<1;
if(!debug) debug = 1;
while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
debug += debug;
}else
scanf("%d", &debug);
for(i=0;i<nb_istreams;i++) {
ist = ist_table[i];
ist->st->codec->debug = debug;
for(i=0;i<nb_input_streams;i++) {
input_streams[i].st->codec->debug = debug;
}
for(i=0;i<nb_ostreams;i++) {
ost = ost_table[i];
@ -2645,12 +2611,12 @@ static int transcode(AVFormatContext **output_files,
double ipts, opts;
ost = ost_table[i];
os = output_files[ost->file_index];
ist = ist_table[ost->source_index];
ist = &input_streams[ost->source_index];
if(ist->is_past_recording_time || no_packet[ist->file_index])
continue;
opts = ost->st->pts.val * av_q2d(ost->st->time_base);
ipts = (double)ist->pts;
if (!file_table[ist->file_index].eof_reached){
if (!input_files[ist->file_index].eof_reached){
if(ipts < ipts_min) {
ipts_min = ipts;
if(input_sync ) file_index = ist->file_index;
@ -2681,7 +2647,7 @@ static int transcode(AVFormatContext **output_files,
break;
/* read a frame from it and output it in the fifo */
is = input_files[file_index];
is = input_files[file_index].ctx;
ret= av_read_frame(is, &pkt);
if(ret == AVERROR(EAGAIN)){
no_packet[file_index]=1;
@ -2689,7 +2655,7 @@ static int transcode(AVFormatContext **output_files,
continue;
}
if (ret < 0) {
file_table[file_index].eof_reached = 1;
input_files[file_index].eof_reached = 1;
if (opt_shortest)
break;
else
@ -2705,10 +2671,10 @@ static int transcode(AVFormatContext **output_files,
}
/* the following test is needed in case new streams appear
dynamically in stream : we ignore them */
if (pkt.stream_index >= file_table[file_index].nb_streams)
if (pkt.stream_index >= input_files[file_index].ctx->nb_streams)
goto discard_packet;
ist_index = file_table[file_index].ist_index + pkt.stream_index;
ist = ist_table[ist_index];
ist_index = input_files[file_index].ist_index + pkt.stream_index;
ist = &input_streams[ist_index];
if (ist->discard)
goto discard_packet;
@ -2751,12 +2717,12 @@ static int transcode(AVFormatContext **output_files,
goto discard_packet;
}
//fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->index, pkt.size);
//fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
if (verbose >= 0)
fprintf(stderr, "Error while decoding stream #%d.%d\n",
ist->file_index, ist->index);
ist->file_index, ist->st->index);
if (exit_on_error)
ffmpeg_exit(1);
av_free_packet(&pkt);
@ -2771,8 +2737,8 @@ static int transcode(AVFormatContext **output_files,
}
/* at the end of stream, we must flush the decoder buffers */
for(i=0;i<nb_istreams;i++) {
ist = ist_table[i];
for (i = 0; i < nb_input_streams; i++) {
ist = &input_streams[i];
if (ist->decoding_needed) {
output_packet(ist, i, ost_table, nb_ostreams, NULL);
}
@ -2802,8 +2768,8 @@ static int transcode(AVFormatContext **output_files,
}
/* close each decoder */
for(i=0;i<nb_istreams;i++) {
ist = ist_table[i];
for (i = 0; i < nb_input_streams; i++) {
ist = &input_streams[i];
if (ist->decoding_needed) {
avcodec_close(ist->st->codec);
}
@ -2814,15 +2780,7 @@ static int transcode(AVFormatContext **output_files,
fail:
av_freep(&bit_buffer);
av_free(file_table);
if (ist_table) {
for(i=0;i<nb_istreams;i++) {
ist = ist_table[i];
av_free(ist);
}
av_free(ist_table);
}
if (ost_table) {
for(i=0;i<nb_ostreams;i++) {
ost = ost_table[i];
@ -3375,8 +3333,17 @@ static void opt_input_file(const char *filename)
for(i=0;i<ic->nb_streams;i++) {
AVStream *st = ic->streams[i];
AVCodecContext *dec = st->codec;
AVInputStream *ist;
dec->thread_count = thread_count;
input_codecs = grow_array(input_codecs, sizeof(*input_codecs), &nb_input_codecs, nb_input_codecs + 1);
input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
ist = &input_streams[nb_input_streams - 1];
ist->st = st;
ist->file_index = nb_input_files;
ist->discard = 1;
switch (dec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
input_codecs[nb_input_codecs-1] = avcodec_find_decoder_by_name(audio_codec_name);
@ -3449,13 +3416,14 @@ static void opt_input_file(const char *filename)
}
}
input_files[nb_input_files] = ic;
input_files_ts_offset[nb_input_files] = input_ts_offset - (copy_ts ? 0 : timestamp);
/* dump the file content */
if (verbose >= 0)
av_dump_format(ic, nb_input_files, filename, 0);
nb_input_files++;
input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
input_files[nb_input_files - 1].ctx = ic;
input_files[nb_input_files - 1].ist_index = nb_input_streams - ic->nb_streams;
video_channel = 0;
top_field_first = -1;
@ -3481,7 +3449,7 @@ static void check_inputs(int *has_video_ptr,
has_data = 0;
for(j=0;j<nb_input_files;j++) {
ic = input_files[j];
ic = input_files[j].ctx;
for(i=0;i<ic->nb_streams;i++) {
AVCodecContext *enc = ic->streams[i]->codec;
switch(enc->codec_type) {
@ -4159,9 +4127,9 @@ static void opt_target(const char *arg)
/* Try to determine PAL/NTSC by peeking in the input files */
if(nb_input_files) {
int i, j;
for(j = 0; j < nb_input_files; j++) {
for(i = 0; i < input_files[j]->nb_streams; i++) {
AVCodecContext *c = input_files[j]->streams[i]->codec;
for (j = 0; j < nb_input_files; j++) {
for (i = 0; i < input_files[j].ctx->nb_streams; i++) {
AVCodecContext *c = input_files[j].ctx->streams[i]->codec;
if(c->codec_type != AVMEDIA_TYPE_VIDEO)
continue;
fr = c->time_base.den * 1000 / c->time_base.num;

View File

@ -70,33 +70,27 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
case CODEC_ID_MPEG4:
break;
case CODEC_ID_MSMPEG4V1:
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->msmpeg4_version=1;
break;
case CODEC_ID_MSMPEG4V2:
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->msmpeg4_version=2;
break;
case CODEC_ID_MSMPEG4V3:
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->msmpeg4_version=3;
break;
case CODEC_ID_WMV1:
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->msmpeg4_version=4;
break;
case CODEC_ID_WMV2:
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->msmpeg4_version=5;
break;
case CODEC_ID_VC1:
case CODEC_ID_WMV3:
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->msmpeg4_version=6;
avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
@ -672,7 +666,7 @@ retry:
decode_slice(s);
}
if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==AV_PICTURE_TYPE_I)
if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type==AV_PICTURE_TYPE_I)
if(!CONFIG_MSMPEG4_DECODER || msmpeg4_decode_ext_header(s, buf_size) < 0){
s->error_status_table[s->mb_num-1]= AC_ERROR|DC_ERROR|MV_ERROR;
}

View File

@ -2155,7 +2155,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
}
/* add dct residue */
if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
|| (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);

View File

@ -209,7 +209,6 @@ typedef struct MpegEncContext {
/* the following codec id fields are deprecated in favor of codec_id */
int h263_plus; ///< h263 plus headers
int h263_msmpeg4; ///< generate MSMPEG4 compatible stream (deprecated, use msmpeg4_version instead)
int h263_flv; ///< use flv h263 header
enum CodecID codec_id; /* see CODEC_ID_xxx */
@ -843,4 +842,3 @@ void ff_wmv2_encode_mb(MpegEncContext * s,
int motion_x, int motion_y);
#endif /* AVCODEC_MPEGVIDEO_H */

View File

@ -640,7 +640,6 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
break;
case CODEC_ID_MSMPEG4V2:
s->out_format = FMT_H263;
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version= 2;
@ -649,7 +648,6 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
break;
case CODEC_ID_MSMPEG4V3:
s->out_format = FMT_H263;
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version= 3;
@ -659,7 +657,6 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
break;
case CODEC_ID_WMV1:
s->out_format = FMT_H263;
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version= 4;
@ -669,7 +666,6 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
break;
case CODEC_ID_WMV2:
s->out_format = FMT_H263;
s->h263_msmpeg4 = 1;
s->h263_pred = 1;
s->unrestricted_mv = 1;
s->msmpeg4_version= 5;
@ -2773,7 +2769,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
/* we need to initialize some time vars before we can encode b-frames */
// RAL: Condition added for MPEG1VIDEO
if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4))
if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
set_frame_distances(s);
if(CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4)
ff_set_mpeg4_time(s);
@ -2942,7 +2938,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
case FMT_H263:
if (CONFIG_WMV2_ENCODER && s->codec_id == CODEC_ID_WMV2)
ff_wmv2_encode_picture_header(s, picture_number);
else if (CONFIG_MSMPEG4_ENCODER && s->h263_msmpeg4)
else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
msmpeg4_encode_picture_header(s, picture_number);
else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
mpeg4_encode_picture_header(s, picture_number);

View File

@ -47,6 +47,7 @@
#include <alsa/asoundlib.h>
#include "libavformat/avformat.h"
#include "libavutil/opt.h"
#include "alsa-audio.h"
@ -56,21 +57,16 @@ static av_cold int audio_read_header(AVFormatContext *s1,
AlsaData *s = s1->priv_data;
AVStream *st;
int ret;
unsigned int sample_rate;
enum CodecID codec_id;
snd_pcm_sw_params_t *sw_params;
if (ap->sample_rate <= 0) {
av_log(s1, AV_LOG_ERROR, "Bad sample rate %d\n", ap->sample_rate);
#if FF_API_FORMAT_PARAMETERS
if (ap->sample_rate > 0)
s->sample_rate = ap->sample_rate;
return AVERROR(EIO);
}
if (ap->channels <= 0) {
av_log(s1, AV_LOG_ERROR, "Bad channels number %d\n", ap->channels);
return AVERROR(EIO);
}
if (ap->channels > 0)
s->channels = ap->channels;
#endif
st = av_new_stream(s1, 0);
if (!st) {
@ -78,10 +74,9 @@ static av_cold int audio_read_header(AVFormatContext *s1,
return AVERROR(ENOMEM);
}
sample_rate = ap->sample_rate;
codec_id = s1->audio_codec_id;
ret = ff_alsa_open(s1, SND_PCM_STREAM_CAPTURE, &sample_rate, ap->channels,
ret = ff_alsa_open(s1, SND_PCM_STREAM_CAPTURE, &s->sample_rate, s->channels,
&codec_id);
if (ret < 0) {
return AVERROR(EIO);
@ -113,8 +108,8 @@ static av_cold int audio_read_header(AVFormatContext *s1,
/* take real parameters */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = codec_id;
st->codec->sample_rate = sample_rate;
st->codec->channels = ap->channels;
st->codec->sample_rate = s->sample_rate;
st->codec->channels = s->channels;
av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
return 0;
@ -163,6 +158,19 @@ static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
return 0;
}
static const AVOption options[] = {
{ "sample_rate", "", offsetof(AlsaData, sample_rate), FF_OPT_TYPE_INT, {.dbl = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(AlsaData, channels), FF_OPT_TYPE_INT, {.dbl = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass alsa_demuxer_class = {
.class_name = "ALSA demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_alsa_demuxer = {
"alsa",
NULL_IF_CONFIG_SMALL("ALSA audio input"),
@ -172,4 +180,5 @@ AVInputFormat ff_alsa_demuxer = {
audio_read_packet,
ff_alsa_close,
.flags = AVFMT_NOFILE,
.priv_class = &alsa_demuxer_class,
};

View File

@ -33,6 +33,7 @@
#include <alsa/asoundlib.h>
#include "config.h"
#include "libavformat/avformat.h"
#include "libavutil/log.h"
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
@ -42,12 +43,15 @@
typedef void (*ff_reorder_func)(const void *, void *, int);
typedef struct {
AVClass *class;
snd_pcm_t *h;
int frame_size; ///< preferred size for reads and writes
int period_size; ///< bytes per sample * channels
ff_reorder_func reorder_func;
void *reorder_buf;
int reorder_buf_size; ///< in frames
int sample_rate; ///< sample rate set by user
int channels; ///< number of channels set by user
} AlsaData;
/**

View File

@ -25,6 +25,8 @@
*/
#include "libavformat/avformat.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#if HAVE_DEV_BKTR_IOCTL_METEOR_H && HAVE_DEV_BKTR_IOCTL_BT848_H
# include <dev/bktr/ioctl_meteor.h>
# include <dev/bktr/ioctl_bt848.h>
@ -47,12 +49,14 @@
#include <strings.h>
typedef struct {
AVClass *class;
int video_fd;
int tuner_fd;
int width, height;
int frame_rate;
int frame_rate_base;
uint64_t per_frame;
int standard;
} VideoData;
@ -245,7 +249,6 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
int width, height;
int frame_rate;
int frame_rate_base;
int format = -1;
if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0)
return -1;
@ -274,16 +277,18 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
st->codec->time_base.den = frame_rate;
st->codec->time_base.num = frame_rate_base;
#if FF_API_FORMAT_PARAMETERS
if (ap->standard) {
if (!strcasecmp(ap->standard, "pal"))
format = PAL;
s->standard = PAL;
else if (!strcasecmp(ap->standard, "secam"))
format = SECAM;
s->standard = SECAM;
else if (!strcasecmp(ap->standard, "ntsc"))
format = NTSC;
s->standard = NTSC;
}
#endif
if (bktr_init(s1->filename, width, height, format,
if (bktr_init(s1->filename, width, height, s->standard,
&(s->video_fd), &(s->tuner_fd), -1, 0.0) < 0)
return AVERROR(EIO);
@ -311,6 +316,24 @@ static int grab_read_close(AVFormatContext *s1)
return 0;
}
static const AVOption options[] = {
{ "standard", "", offsetof(VideoData, standard), FF_OPT_TYPE_INT, {.dbl = VIDEO_FORMAT}, PAL, NTSCJ, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PAL", "", 0, FF_OPT_TYPE_CONST, {.dbl = PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSC", "", 0, FF_OPT_TYPE_CONST, {.dbl = NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "SECAM", "", 0, FF_OPT_TYPE_CONST, {.dbl = SECAM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PALN", "", 0, FF_OPT_TYPE_CONST, {.dbl = PALN}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PALM", "", 0, FF_OPT_TYPE_CONST, {.dbl = PALM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSCJ", "", 0, FF_OPT_TYPE_CONST, {.dbl = NTSCJ}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ NULL },
};
static const AVClass bktr_class = {
.class_name = "BKTR grab interface",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_bktr_demuxer = {
"bktr",
NULL_IF_CONFIG_SMALL("video grab"),
@ -320,4 +343,5 @@ AVInputFormat ff_bktr_demuxer = {
grab_read_packet,
grab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &bktr_class,
};

View File

@ -30,6 +30,8 @@
#include <time.h>
#include <strings.h>
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavformat/avformat.h"
#undef DV1394_DEBUG
@ -38,6 +40,7 @@
#include "dv1394.h"
struct dv1394_data {
AVClass *class;
int fd;
int channel;
int format;
@ -90,15 +93,17 @@ static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap
if (!dv->dv_demux)
goto failed;
if (ap->standard && !strcasecmp(ap->standard, "pal"))
dv->format = DV1394_PAL;
else
dv->format = DV1394_NTSC;
#if FF_API_FORMAT_PARAMETERS
if (ap->standard) {
if (!strcasecmp(ap->standard, "pal"))
dv->format = DV1394_PAL;
else
dv->format = DV1394_NTSC;
}
if (ap->channel)
dv->channel = ap->channel;
else
dv->channel = DV1394_DEFAULT_CHANNEL;
#endif
/* Open and initialize DV1394 device */
dv->fd = open(context->filename, O_RDONLY);
@ -227,6 +232,21 @@ static int dv1394_close(AVFormatContext * context)
return 0;
}
static const AVOption options[] = {
{ "standard", "", offsetof(struct dv1394_data, format), FF_OPT_TYPE_INT, {.dbl = DV1394_NTSC}, DV1394_PAL, DV1394_NTSC, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PAL", "", 0, FF_OPT_TYPE_CONST, {.dbl = DV1394_PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSC", "", 0, FF_OPT_TYPE_CONST, {.dbl = DV1394_NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "channel", "", offsetof(struct dv1394_data, channel), FF_OPT_TYPE_INT, {.dbl = DV1394_DEFAULT_CHANNEL}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass dv1394_class = {
.class_name = "DV1394 indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_dv1394_demuxer = {
.name = "dv1394",
.long_name = NULL_IF_CONFIG_SMALL("DV1394 A/V grab"),
@ -234,5 +254,6 @@ AVInputFormat ff_dv1394_demuxer = {
.read_header = dv1394_read_header,
.read_packet = dv1394_read_packet,
.read_close = dv1394_close,
.flags = AVFMT_NOFILE
.flags = AVFMT_NOFILE,
.priv_class = &dv1394_class,
};

View File

@ -22,6 +22,8 @@
#include "config.h"
#include "libavformat/avformat.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#if HAVE_LIBDC1394_2
#include <dc1394/dc1394.h>
@ -45,9 +47,11 @@
#undef free
typedef struct dc1394_data {
AVClass *class;
#if HAVE_LIBDC1394_1
raw1394handle_t handle;
dc1394_cameracapture camera;
int channel;
#elif HAVE_LIBDC1394_2
dc1394_t *d;
dc1394camera_t *camera;
@ -155,6 +159,11 @@ static int dc1394_v1_read_header(AVFormatContext *c, AVFormatParameters * ap)
if (dc1394_read_common(c,ap,&fmt,&fps) != 0)
return -1;
#if FF_API_FORMAT_PARAMETERS
if (ap->channel)
dc1394->channel = ap->channel;
#endif
/* Now let us prep the hardware. */
dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */
if (!dc1394->handle) {
@ -162,11 +171,11 @@ static int dc1394_v1_read_header(AVFormatContext *c, AVFormatParameters * ap)
goto out;
}
camera_nodes = dc1394_get_camera_nodes(dc1394->handle, &res, 1);
if (!camera_nodes || camera_nodes[ap->channel] == DC1394_NO_CAMERA) {
av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", ap->channel);
if (!camera_nodes || camera_nodes[dc1394->channel] == DC1394_NO_CAMERA) {
av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", dc1394->channel);
goto out_handle;
}
res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[ap->channel],
res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[dc1394->channel],
0,
FORMAT_VGA_NONCOMPRESSED,
fmt->frame_size_id,
@ -236,6 +245,20 @@ static int dc1394_v1_close(AVFormatContext * context)
return 0;
}
static const AVOption options[] = {
#if HAVE_LIBDC1394_1
{ "channel", "", offsetof(dc1394_data, channel), FF_OPT_TYPE_INT, {.dbl = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
#endif
{ NULL },
};
static const AVClass libdc1394_class = {
.class_name = "libdc1394 indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
#elif HAVE_LIBDC1394_2
static int dc1394_v2_read_header(AVFormatContext *c, AVFormatParameters * ap)
{
@ -356,6 +379,7 @@ AVInputFormat ff_libdc1394_demuxer = {
.read_packet = dc1394_v2_read_packet,
.read_close = dc1394_v2_close,
.flags = AVFMT_NOFILE
.priv_class = &libdc1394_class,
};
#endif
@ -367,6 +391,7 @@ AVInputFormat ff_libdc1394_demuxer = {
.read_header = dc1394_v1_read_header,
.read_packet = dc1394_v1_read_packet,
.read_close = dc1394_v1_close,
.flags = AVFMT_NOFILE
.flags = AVFMT_NOFILE,
.priv_class = &libdc1394_class,
};
#endif

View File

@ -37,12 +37,14 @@
#include <sys/select.h>
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#define AUDIO_BLOCK_SIZE 4096
typedef struct {
AVClass *class;
int fd;
int sample_rate;
int channels;
@ -214,15 +216,17 @@ static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
AVStream *st;
int ret;
if (ap->sample_rate <= 0 || ap->channels <= 0)
return -1;
#if FF_API_FORMAT_PARAMETERS
if (ap->sample_rate > 0)
s->sample_rate = ap->sample_rate;
if (ap->channels > 0)
s->channels = ap->channels;
#endif
st = av_new_stream(s1, 0);
if (!st) {
return AVERROR(ENOMEM);
}
s->sample_rate = ap->sample_rate;
s->channels = ap->channels;
ret = audio_open(s1, 0, s1->filename);
if (ret < 0) {
@ -291,6 +295,19 @@ static int audio_read_close(AVFormatContext *s1)
}
#if CONFIG_OSS_INDEV
static const AVOption options[] = {
{ "sample_rate", "", offsetof(AudioData, sample_rate), FF_OPT_TYPE_INT, {.dbl = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(AudioData, channels), FF_OPT_TYPE_INT, {.dbl = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass oss_demuxer_class = {
.class_name = "OSS demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_oss_demuxer = {
"oss",
NULL_IF_CONFIG_SMALL("Open Sound System capture"),
@ -300,6 +317,7 @@ AVInputFormat ff_oss_demuxer = {
audio_read_packet,
audio_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &oss_demuxer_class,
};
#endif

View File

@ -26,8 +26,10 @@
#include <sndio.h>
#include "libavformat/avformat.h"
#include "libavutil/log.h"
typedef struct {
AVClass *class;
struct sio_hdl *hdl;
enum CodecID codec_id;
int64_t hwpos;

View File

@ -23,6 +23,7 @@
#include <sndio.h>
#include "libavformat/avformat.h"
#include "libavutil/opt.h"
#include "sndio_common.h"
@ -33,16 +34,17 @@ static av_cold int audio_read_header(AVFormatContext *s1,
AVStream *st;
int ret;
if (ap->sample_rate <= 0 || ap->channels <= 0)
return AVERROR(EINVAL);
#if FF_API_FORMAT_PARAMETERS
if (ap->sample_rate > 0)
s->sample_rate = ap->sample_rate;
if (ap->channels > 0)
s->channels = ap->channels;
#endif
st = av_new_stream(s1, 0);
if (!st)
return AVERROR(ENOMEM);
s->sample_rate = ap->sample_rate;
s->channels = ap->channels;
ret = ff_sndio_open(s1, 0, s1->filename);
if (ret < 0)
return ret;
@ -97,6 +99,19 @@ static av_cold int audio_read_close(AVFormatContext *s1)
return 0;
}
static const AVOption options[] = {
{ "sample_rate", "", offsetof(SndioData, sample_rate), FF_OPT_TYPE_INT, {.dbl = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(SndioData, channels), FF_OPT_TYPE_INT, {.dbl = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass sndio_demuxer_class = {
.class_name = "sndio indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_sndio_demuxer = {
.name = "sndio",
.long_name = NULL_IF_CONFIG_SMALL("sndio audio capture"),
@ -105,4 +120,5 @@ AVInputFormat ff_sndio_demuxer = {
.read_packet = audio_read_packet,
.read_close = audio_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &sndio_demuxer_class,
};

View File

@ -23,6 +23,8 @@
#include "config.h"
#include "libavutil/rational.h"
#include "libavutil/imgutils.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavformat/avformat.h"
#include "libavcodec/dsputil.h"
#include <unistd.h>
@ -36,6 +38,7 @@
#include <strings.h>
typedef struct {
AVClass *class;
int fd;
int frame_format; /* see VIDEO_PALETTE_xxx */
int use_mmap;
@ -49,6 +52,7 @@ typedef struct {
struct video_mbuf gb_buffers;
struct video_mmap gb_buf;
int gb_frame;
int standard;
} VideoData;
static const struct {
@ -131,13 +135,18 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
}
/* set tv standard */
if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
if (!strcasecmp(ap->standard, "pal"))
tuner.mode = VIDEO_MODE_PAL;
else if (!strcasecmp(ap->standard, "secam"))
tuner.mode = VIDEO_MODE_SECAM;
else
tuner.mode = VIDEO_MODE_NTSC;
if (!ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
#if FF_API_FORMAT_PARAMETERS
if (ap->standard) {
if (!strcasecmp(ap->standard, "pal"))
s->standard = VIDEO_MODE_PAL;
else if (!strcasecmp(ap->standard, "secam"))
s->standard = VIDEO_MODE_SECAM;
else
s->standard = VIDEO_MODE_NTSC;
}
#endif
tuner.mode = s->standard;
ioctl(video_fd, VIDIOCSTUNER, &tuner);
}
@ -339,6 +348,21 @@ static int grab_read_close(AVFormatContext *s1)
return 0;
}
static const AVOption options[] = {
{ "standard", "", offsetof(VideoData, standard), FF_OPT_TYPE_INT, {.dbl = VIDEO_MODE_NTSC}, VIDEO_MODE_PAL, VIDEO_MODE_NTSC, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PAL", "", 0, FF_OPT_TYPE_CONST, {.dbl = VIDEO_MODE_PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "SECAM", "", 0, FF_OPT_TYPE_CONST, {.dbl = VIDEO_MODE_SECAM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSC", "", 0, FF_OPT_TYPE_CONST, {.dbl = VIDEO_MODE_NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ NULL },
};
static const AVClass v4l_class = {
.class_name = "V4L indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_v4l_demuxer = {
"video4linux",
NULL_IF_CONFIG_SMALL("Video4Linux device grab"),
@ -348,4 +372,5 @@ AVInputFormat ff_v4l_demuxer = {
grab_read_packet,
grab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &v4l_class,
};

View File

@ -44,6 +44,8 @@
#include <time.h>
#include <strings.h>
#include "libavutil/imgutils.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
static const int desired_video_buffers = 256;
@ -54,6 +56,7 @@ enum io_method {
};
struct video_data {
AVClass *class;
int fd;
int frame_format; /* V4L2_PIX_FMT_* */
enum io_method io_method;
@ -64,6 +67,8 @@ struct video_data {
int buffers;
void **buf_start;
unsigned int *buf_len;
char *standard;
int channel;
};
struct buff_data {
@ -448,50 +453,61 @@ static int v4l2_set_parameters(AVFormatContext *s1, AVFormatParameters *ap)
streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ap->channel>=0) {
/* set tv video input */
memset (&input, 0, sizeof (input));
input.index = ap->channel;
if (ioctl(s->fd, VIDIOC_ENUMINPUT, &input) < 0) {
av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl enum input failed:\n");
return AVERROR(EIO);
}
#if FF_API_FORMAT_PARAMETERS
if (ap->channel > 0)
s->channel = ap->channel;
#endif
av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set input_id: %d, input: %s\n",
ap->channel, input.name);
if (ioctl(s->fd, VIDIOC_S_INPUT, &input.index) < 0) {
av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set input(%d) failed\n",
ap->channel);
return AVERROR(EIO);
}
/* set tv video input */
memset (&input, 0, sizeof (input));
input.index = s->channel;
if (ioctl(s->fd, VIDIOC_ENUMINPUT, &input) < 0) {
av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl enum input failed:\n");
return AVERROR(EIO);
}
av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set input_id: %d, input: %s\n",
s->channel, input.name);
if (ioctl(s->fd, VIDIOC_S_INPUT, &input.index) < 0) {
av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set input(%d) failed\n",
s->channel);
return AVERROR(EIO);
}
#if FF_API_FORMAT_PARAMETERS
if (ap->standard) {
av_freep(&s->standard);
s->standard = av_strdup(ap->standard);
}
#endif
if (s->standard) {
av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set standard: %s\n",
ap->standard);
s->standard);
/* set tv standard */
memset (&standard, 0, sizeof (standard));
for(i=0;;i++) {
standard.index = i;
if (ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set standard(%s) failed\n",
ap->standard);
s->standard);
return AVERROR(EIO);
}
if (!strcasecmp(standard.name, ap->standard)) {
if (!strcasecmp(standard.name, s->standard)) {
break;
}
}
av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set standard: %s, id: %"PRIu64"\n",
ap->standard, (uint64_t)standard.id);
s->standard, (uint64_t)standard.id);
if (ioctl(s->fd, VIDIOC_S_STD, &standard.id) < 0) {
av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set standard(%s) failed\n",
ap->standard);
s->standard);
return AVERROR(EIO);
}
}
av_freep(&s->standard);
if (ap->time_base.num && ap->time_base.den) {
av_log(s1, AV_LOG_DEBUG, "Setting time per frame to %d/%d\n",
@ -680,6 +696,19 @@ static int v4l2_read_close(AVFormatContext *s1)
return 0;
}
static const AVOption options[] = {
{ "standard", "", offsetof(struct video_data, standard), FF_OPT_TYPE_STRING, {.str = "NTSC" }, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
{ "channel", "", offsetof(struct video_data, channel), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass v4l2_class = {
.class_name = "V4L2 indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_v4l2_demuxer = {
"video4linux2",
NULL_IF_CONFIG_SMALL("Video4Linux2 device grab"),
@ -689,4 +718,5 @@ AVInputFormat ff_v4l2_demuxer = {
v4l2_read_packet,
v4l2_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &v4l2_class,
};

View File

@ -228,17 +228,20 @@ typedef struct AVProbeData {
typedef struct AVFormatParameters {
AVRational time_base;
int sample_rate;
int channels;
#if FF_API_FORMAT_PARAMETERS
attribute_deprecated int sample_rate;
attribute_deprecated int channels;
#endif
int width;
int height;
enum PixelFormat pix_fmt;
int channel; /**< Used to select DV channel. */
const char *standard; /**< TV standard, NTSC, PAL, SECAM */
unsigned int mpeg2ts_raw:1; /**< Force raw MPEG-2 transport stream output, if possible. */
unsigned int mpeg2ts_compute_pcr:1; /**< Compute exact PCR for each transport
stream packet (only meaningful if
mpeg2ts_raw is TRUE). */
#if FF_API_FORMAT_PARAMETERS
attribute_deprecated int channel; /**< Used to select DV channel. */
attribute_deprecated const char *standard; /**< deprecated, use demuxer-specific options instead. */
attribute_deprecated unsigned int mpeg2ts_raw:1; /**< deprecated, use mpegtsraw demuxer */
/**< deprecated, use mpegtsraw demuxer-specific options instead */
attribute_deprecated unsigned int mpeg2ts_compute_pcr:1;
#endif
unsigned int initial_pause:1; /**< Do not begin to play the stream
immediately (RTSP only). */
unsigned int prealloced_context:1;
@ -824,6 +827,11 @@ typedef struct AVFormatContext {
* - decoding: Unused.
*/
int64_t start_time_realtime;
/**
* decoding: number of frames used to probe fps
*/
int fps_probe_size;
} AVFormatContext;
typedef struct AVPacketList {

View File

@ -25,6 +25,8 @@
#include "libavutil/crc.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "mpegts.h"
@ -86,6 +88,7 @@ struct Program {
};
struct MpegTSContext {
const AVClass *class;
/* user data */
AVFormatContext *stream;
/** raw packet size, including FEC if present */
@ -122,6 +125,19 @@ struct MpegTSContext {
MpegTSFilter *pids[NB_PID_MAX];
};
static const AVOption options[] = {
{"compute_pcr", "Compute exact PCR for each transport stream packet.", offsetof(MpegTSContext, mpeg2ts_compute_pcr), FF_OPT_TYPE_INT,
{.dbl = 0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass mpegtsraw_class = {
.class_name = "mpegtsraw demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
/* TS stream handling */
enum MpegTSState {
@ -1455,13 +1471,16 @@ static int mpegts_read_header(AVFormatContext *s,
int len;
int64_t pos;
#if FF_API_FORMAT_PARAMETERS
if (ap) {
ts->mpeg2ts_compute_pcr = ap->mpeg2ts_compute_pcr;
if (ap->mpeg2ts_compute_pcr)
ts->mpeg2ts_compute_pcr = ap->mpeg2ts_compute_pcr;
if(ap->mpeg2ts_raw){
av_log(s, AV_LOG_ERROR, "use mpegtsraw_demuxer!\n");
return -1;
}
}
#endif
/* read the first 1024 bytes to get packet size */
pos = avio_tell(pb);
@ -1883,4 +1902,5 @@ AVInputFormat ff_mpegtsraw_demuxer = {
#ifdef USE_SYNCPOINT_SEARCH
.read_seek2 = read_seek2,
#endif
.priv_class = &mpegtsraw_class,
};

View File

@ -60,6 +60,7 @@ static const AVOption options[]={
{"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, {.dbl = DEFAULT }, 0, INT_MAX, E|D, "fdebug"},
{"ts", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = FF_FDEBUG_TS }, INT_MIN, INT_MAX, E|D, "fdebug"},
{"max_delay", "maximum muxing or demuxing delay in microseconds", OFFSET(max_delay), FF_OPT_TYPE_INT, {.dbl = DEFAULT }, 0, INT_MAX, E|D},
{"fpsprobesize", "number of frames used to probe fps", OFFSET(fps_probe_size), FF_OPT_TYPE_INT, -1, -1, INT_MAX-1, D},
{NULL},
};

View File

@ -50,7 +50,7 @@ static int raw_read_packet(AVFormatContext *s, AVPacket *pkt)
AVInputFormat ff_pcm_ ## name ## _demuxer = {\
#name,\
NULL_IF_CONFIG_SMALL(long_name),\
0,\
sizeof(RawAudioDemuxerContext),\
NULL,\
ff_raw_read_header,\
raw_read_packet,\
@ -59,6 +59,7 @@ AVInputFormat ff_pcm_ ## name ## _demuxer = {\
.flags= AVFMT_GENERIC_INDEX,\
.extensions = ext,\
.value = codec,\
.priv_class = &ff_rawaudio_demuxer_class,\
};
PCMDEF(f64be, "PCM 64 bit floating-point big-endian format",

View File

@ -23,6 +23,7 @@
#include "avformat.h"
#include "avio_internal.h"
#include "rawdec.h"
#include "libavutil/opt.h"
/* raw input */
int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap)
@ -43,15 +44,28 @@ int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap)
st->codec->codec_id = id;
switch(st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
st->codec->sample_rate = ap->sample_rate;
if(ap->channels) st->codec->channels = ap->channels;
else st->codec->channels = 1;
case AVMEDIA_TYPE_AUDIO: {
RawAudioDemuxerContext *s1 = s->priv_data;
#if FF_API_FORMAT_PARAMETERS
if (ap->sample_rate)
st->codec->sample_rate = ap->sample_rate;
if (ap->channels)
st->codec->channels = ap->channels;
else st->codec->channels = 1;
#endif
if (s1->sample_rate)
st->codec->sample_rate = s1->sample_rate;
if (s1->channels)
st->codec->channels = s1->channels;
st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id);
assert(st->codec->bits_per_coded_sample > 0);
st->codec->block_align = st->codec->bits_per_coded_sample*st->codec->channels/8;
av_set_pts_info(st, 64, 1, st->codec->sample_rate);
break;
}
case AVMEDIA_TYPE_VIDEO:
if(ap->time_base.num)
av_set_pts_info(st, 64, ap->time_base.num, ap->time_base.den);
@ -139,17 +153,31 @@ int ff_raw_video_read_header(AVFormatContext *s,
/* Note: Do not forget to add new entries to the Makefile as well. */
static const AVOption audio_options[] = {
{ "sample_rate", "", offsetof(RawAudioDemuxerContext, sample_rate), FF_OPT_TYPE_INT, {.dbl = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(RawAudioDemuxerContext, channels), FF_OPT_TYPE_INT, {.dbl = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
const AVClass ff_rawaudio_demuxer_class = {
.class_name = "rawaudio demuxer",
.item_name = av_default_item_name,
.option = audio_options,
.version = LIBAVUTIL_VERSION_INT,
};
#if CONFIG_G722_DEMUXER
AVInputFormat ff_g722_demuxer = {
"g722",
NULL_IF_CONFIG_SMALL("raw G.722"),
0,
sizeof(RawAudioDemuxerContext),
NULL,
ff_raw_read_header,
ff_raw_read_partial_packet,
.flags= AVFMT_GENERIC_INDEX,
.extensions = "g722,722",
.value = CODEC_ID_ADPCM_G722,
.priv_class = &ff_rawaudio_demuxer_class,
};
#endif

View File

@ -23,6 +23,15 @@
#define AVFORMAT_RAWDEC_H
#include "avformat.h"
#include "libavutil/log.h"
typedef struct RawAudioDemuxerContext {
AVClass *class;
int sample_rate;
int channels;
} RawAudioDemuxerContext;
extern const AVClass ff_rawaudio_demuxer_class;
int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap);

View File

@ -233,9 +233,11 @@ int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt,
int ff_rtmp_packet_create(RTMPPacket *pkt, int channel_id, RTMPPacketType type,
int timestamp, int size)
{
pkt->data = av_malloc(size);
if (!pkt->data)
return AVERROR(ENOMEM);
if (size) {
pkt->data = av_malloc(size);
if (!pkt->data)
return AVERROR(ENOMEM);
}
pkt->data_size = size;
pkt->channel_id = channel_id;
pkt->type = type;

View File

@ -683,7 +683,7 @@ static int get_packet(URLContext *s, int for_header)
return AVERROR_EOF;
for (;;) {
RTMPPacket rpkt;
RTMPPacket rpkt = { 0 };
if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt,
rt->chunk_size, rt->prev_pkt[0])) <= 0) {
if (ret == 0) {

View File

@ -26,12 +26,13 @@
#include "libavutil/intreadwrite.h"
#include "libavutil/avstring.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "avformat.h"
#include "sauce.h"
#define LINE_RATE 6000 /* characters per second */
typedef struct {
AVClass *class;
int chars_per_frame;
uint64_t fsize; /**< file size less metadata buffer */
} TtyDemuxContext;
@ -86,7 +87,11 @@ static int read_header(AVFormatContext *avctx,
}
/* simulate tty display speed */
s->chars_per_frame = FFMAX(av_q2d(st->time_base) * (ap->sample_rate ? ap->sample_rate : LINE_RATE), 1);
#if FF_API_FORMAT_PARAMETERS
if (ap->sample_rate)
s->chars_per_frame = ap->sample_rate;
#endif
s->chars_per_frame = FFMAX(av_q2d(st->time_base)*s->chars_per_frame, 1);
if (avctx->pb->seekable) {
s->fsize = avio_size(avctx->pb);
@ -124,6 +129,18 @@ static int read_packet(AVFormatContext *avctx, AVPacket *pkt)
return 0;
}
static const AVOption options[] = {
{ "chars_per_frame", "", offsetof(TtyDemuxContext, chars_per_frame), FF_OPT_TYPE_INT, {.dbl = 6000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM},
{ NULL },
};
static const AVClass tty_demuxer_class = {
.class_name = "TTY demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVInputFormat ff_tty_demuxer = {
.name = "tty",
.long_name = NULL_IF_CONFIG_SMALL("Tele-typewriter"),
@ -131,4 +148,5 @@ AVInputFormat ff_tty_demuxer = {
.read_header = read_header,
.read_packet = read_packet,
.extensions = "ans,art,asc,diz,ice,nfo,txt,vt",
.priv_class = &tty_demuxer_class,
};

View File

@ -2272,6 +2272,8 @@ int av_find_stream_info(AVFormatContext *ic)
the correct fps */
if (av_q2d(st->time_base) > 0.0005)
fps_analyze_framecount *= 2;
if (ic->fps_probe_size >= 0)
fps_analyze_framecount = ic->fps_probe_size;
/* variable fps and no guess at the real fps */
if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
&& st->info->duration_count < fps_analyze_framecount

View File

@ -71,5 +71,8 @@
#ifndef FF_API_ALLOC_OUTPUT_CONTEXT
#define FF_API_ALLOC_OUTPUT_CONTEXT (LIBAVFORMAT_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_FORMAT_PARAMETERS
#define FF_API_FORMAT_PARAMETERS (LIBAVFORMAT_VERSION_MAJOR < 54)
#endif
#endif /* AVFORMAT_VERSION_H */

View File

@ -290,7 +290,8 @@ double av_get_double(void *obj, const char *name, const AVOption **o_out)
double num=1;
int den=1;
av_get_number(obj, name, o_out, &num, &den, &intnum);
if (av_get_number(obj, name, o_out, &num, &den, &intnum) < 0)
return NAN;
return num*intnum/den;
}
@ -300,7 +301,8 @@ AVRational av_get_q(void *obj, const char *name, const AVOption **o_out)
double num=1;
int den=1;
av_get_number(obj, name, o_out, &num, &den, &intnum);
if (av_get_number(obj, name, o_out, &num, &den, &intnum) < 0)
return (AVRational){0, 0};
if (num == 1.0 && (int)intnum == intnum)
return (AVRational){intnum, den};
else

View File

@ -68,13 +68,6 @@ void shuffle_bytes_1230(const uint8_t *src, uint8_t *dst, long src_size);
void shuffle_bytes_3012(const uint8_t *src, uint8_t *dst, long src_size);
void shuffle_bytes_3210(const uint8_t *src, uint8_t *dst, long src_size);
void rgb24toyv12_c(const uint8_t *src, uint8_t *ydst,
uint8_t *udst, uint8_t *vdst,
long width, long height,
long lumStride, long chromStride,
long srcStride);
#if LIBSWSCALE_VERSION_MAJOR < 1
/* deprecated, use the public versions in swscale.h */
attribute_deprecated void palette8topacked32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
@ -85,13 +78,10 @@ attribute_deprecated void palette8torgb16(const uint8_t *src, uint8_t *dst, long
attribute_deprecated void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
#endif
/**
* Height should be a multiple of 2 and width should be a multiple of 16.
* (If this is a problem for anyone then tell me, and I will fix it.)
* Chrominance data is only taken from every second line, others are ignored.
* FIXME: Write high quality version.
*/
//void uyvytoyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
void rgb24toyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst,
uint8_t *vdst, long width, long height, long lumStride,
long chromStride, long srcStride);
/**
* Height should be a multiple of 2 and width should be a multiple of 16.

View File

@ -633,11 +633,9 @@ static inline void uyvytoyv12_c(const uint8_t *src, uint8_t *ydst,
* others are ignored in the C version.
* FIXME: Write HQ version.
*/
void rgb24toyv12_c(const uint8_t *src, uint8_t *ydst,
uint8_t *udst, uint8_t *vdst,
long width, long height,
long lumStride, long chromStride,
long srcStride)
void rgb24toyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst,
uint8_t *vdst, long width, long height, long lumStride,
long chromStride, long srcStride)
{
long y;
const int chromWidth = width >> 1;