Merge remote-tracking branch 'qatar/master'

* qatar/master:
  make av_interleaved_write_frame() flush packets when pkt is NULL
  mpegts: Fix dead error checks
  vc1: Do not read from array if index is invalid.
  targa: convert to bytestream2.
  rv34: set mb_num_left to 0 after finishing a frame

Conflicts:
	libavcodec/targa.c
	libavcodec/vc1data.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-03-27 11:52:08 +02:00
commit 6999f8bcf5
8 changed files with 159 additions and 123 deletions

View File

@ -1580,6 +1580,7 @@ static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
ff_er_frame_end(s);
ff_MPV_frame_end(s);
s->mb_num_left = 0;
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
@ -1778,6 +1779,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
* only complete frames */
ff_er_frame_end(s);
ff_MPV_frame_end(s);
s->mb_num_left = 0;
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
return AVERROR_INVALIDDATA;
}

View File

@ -27,83 +27,76 @@
typedef struct TargaContext {
AVFrame picture;
GetByteContext gb;
int width, height;
int bpp;
int color_type;
int compression_type;
} TargaContext;
#define CHECK_BUFFER_SIZE(buf, buf_end, needed, where) \
if(needed > buf_end - buf){ \
av_log(avctx, AV_LOG_ERROR, "Problem: unexpected end of data while reading " where "\n"); \
return -1; \
} \
static int targa_decode_rle(AVCodecContext *avctx, TargaContext *s, const uint8_t *src, int src_size, uint8_t *dst, int w, int h, int stride, int bpp)
static int targa_decode_rle(AVCodecContext *avctx, TargaContext *s,
uint8_t *dst, int w, int h, int stride, int bpp)
{
int i, x, y;
int x, y;
int depth = (bpp + 1) >> 3;
int type, count;
int diff;
const uint8_t *src_end = src + src_size;
diff = stride - w * depth;
x = y = 0;
while(y < h){
CHECK_BUFFER_SIZE(src, src_end, 1, "image type");
type = *src++;
while (y < h) {
if (bytestream2_get_bytes_left(&s->gb) <= 0) {
av_log(avctx, AV_LOG_ERROR,
"Ran ouf of data before end-of-image\n");
return AVERROR_INVALIDDATA;
}
type = bytestream2_get_byteu(&s->gb);
count = (type & 0x7F) + 1;
type &= 0x80;
if(x + count > (h - y) * w){
av_log(avctx, AV_LOG_ERROR, "Packet went out of bounds: position (%i,%i) size %i\n", x, y, count);
return -1;
av_log(avctx, AV_LOG_ERROR,
"Packet went out of bounds: position (%i,%i) size %i\n",
x, y, count);
return AVERROR_INVALIDDATA;
}
if(type){
CHECK_BUFFER_SIZE(src, src_end, depth, "image data");
}else{
CHECK_BUFFER_SIZE(src, src_end, count * depth, "image data");
if (!type) {
do {
int n = FFMIN(count, w - x);
bytestream2_get_buffer(&s->gb, dst, n * depth);
count -= n;
dst += n * depth;
x += n;
if (x == w) {
x = 0;
y++;
dst += diff;
}
} while (count > 0);
} else {
uint8_t tmp[4];
bytestream2_get_buffer(&s->gb, tmp, depth);
do {
int n = FFMIN(count, w - x);
count -= n;
x += n;
do {
memcpy(dst, tmp, depth);
dst += depth;
} while (--n);
if (x == w) {
x = 0;
y++;
dst += diff;
}
} while (count > 0);
}
for(i = 0; i < count; i++){
switch(depth){
case 1:
*dst = *src;
break;
case 2:
AV_WN16A(dst, AV_RN16A(src));
break;
case 3:
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
break;
case 4:
AV_WN32A(dst, AV_RN32A(src));
break;
}
dst += depth;
if(!type)
src += depth;
x++;
if(x == w){
x = 0;
y++;
dst += diff;
}
}
if(type)
src += depth;
}
return src_size;
return 0;
}
static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = avpkt->data + avpkt->size;
TargaContext * const s = avctx->priv_data;
AVFrame *picture = data;
AVFrame * const p = &s->picture;
@ -112,32 +105,38 @@ static int decode_frame(AVCodecContext *avctx,
int idlen, pal, compr, y, w, h, bpp, flags;
int first_clr, colors, csize;
bytestream2_init(&s->gb, avpkt->data, avpkt->size);
/* parse image header */
CHECK_BUFFER_SIZE(buf, buf_end, 18, "header");
idlen = *buf++;
pal = *buf++;
compr = *buf++;
first_clr = bytestream_get_le16(&buf);
colors = bytestream_get_le16(&buf);
csize = *buf++;
idlen = bytestream2_get_byte(&s->gb);
pal = bytestream2_get_byte(&s->gb);
compr = bytestream2_get_byte(&s->gb);
first_clr = bytestream2_get_le16(&s->gb);
colors = bytestream2_get_le16(&s->gb);
csize = bytestream2_get_byte(&s->gb);
bytestream2_skip(&s->gb, 4); /* 2: x, 2: y */
w = bytestream2_get_le16(&s->gb);
h = bytestream2_get_le16(&s->gb);
bpp = bytestream2_get_byte(&s->gb);
if (bytestream2_get_bytes_left(&s->gb) <= idlen) {
av_log(avctx, AV_LOG_ERROR,
"Not enough data to read header\n");
return AVERROR_INVALIDDATA;
}
flags = bytestream2_get_byte(&s->gb);
if (!pal && (first_clr || colors || csize)) {
av_log(avctx, AV_LOG_WARNING, "File without colormap has colormap information set.\n");
// specification says we should ignore those value in this case
first_clr = colors = csize = 0;
}
buf += 2; /* x */
y = bytestream_get_le16(&buf);
w = bytestream_get_le16(&buf);
h = bytestream_get_le16(&buf);
bpp = *buf++;
flags = *buf++;
//skip identifier if any
CHECK_BUFFER_SIZE(buf, buf_end, idlen, "identifiers");
buf += idlen;
s->bpp = bpp;
s->width = w;
s->height = h;
switch(s->bpp){
// skip identifier if any
bytestream2_skip(&s->gb, idlen);
switch(bpp){
case 8:
avctx->pix_fmt = ((compr & (~TGA_RLE)) == TGA_BW) ? PIX_FMT_GRAY8 : PIX_FMT_PAL8;
break;
@ -152,7 +151,7 @@ static int decode_frame(AVCodecContext *avctx,
avctx->pix_fmt = PIX_FMT_BGRA;
break;
default:
av_log(avctx, AV_LOG_ERROR, "Bit depth %i is not supported\n", s->bpp);
av_log(avctx, AV_LOG_ERROR, "Bit depth %i is not supported\n", bpp);
return -1;
}
@ -190,23 +189,27 @@ static int decode_frame(AVCodecContext *avctx,
return -1;
}
pal_size = colors * pal_sample_size;
CHECK_BUFFER_SIZE(buf, buf_end, pal_size, "color table");
if(avctx->pix_fmt != PIX_FMT_PAL8)//should not occur but skip palette anyway
buf += pal_size;
bytestream2_skip(&s->gb, pal_size);
else{
int t;
uint32_t *pal = ((uint32_t *)p->data[1]) + first_clr;
if (bytestream2_get_bytes_left(&s->gb) < pal_size) {
av_log(avctx, AV_LOG_ERROR,
"Not enough data to read palette\n");
return AVERROR_INVALIDDATA;
}
switch (pal_sample_size) {
case 3:
/* RGB24 */
for (t = 0; t < colors; t++)
*pal++ = (0xffU<<24) | bytestream_get_le24(&buf);
*pal++ = (0xffU<<24) | bytestream2_get_le24u(&s->gb);
break;
case 2:
/* RGB555 */
for (t = 0; t < colors; t++) {
uint32_t v = bytestream_get_le16(&buf);
uint32_t v = bytestream2_get_le16u(&s->gb);
v = ((v & 0x7C00) << 9) |
((v & 0x03E0) << 6) |
((v & 0x001F) << 3);
@ -219,44 +222,45 @@ static int decode_frame(AVCodecContext *avctx,
p->palette_has_changed = 1;
}
}
if((compr & (~TGA_RLE)) == TGA_NODATA)
memset(p->data[0], 0, p->linesize[0] * s->height);
else{
if ((compr & (~TGA_RLE)) == TGA_NODATA) {
memset(p->data[0], 0, p->linesize[0] * h);
} else {
if(compr & TGA_RLE){
int res = targa_decode_rle(avctx, s, buf, buf_end - buf, dst, avctx->width, avctx->height, stride, bpp);
int res = targa_decode_rle(avctx, s, dst, w, h, stride, bpp);
if (res < 0)
return -1;
buf += res;
}else{
size_t img_size = s->width * ((s->bpp + 1) >> 3);
CHECK_BUFFER_SIZE(buf, buf_end, img_size * s->height , "image data");
for(y = 0; y < s->height; y++){
memcpy(dst, buf, img_size);
return res;
} else {
size_t img_size = w * ((bpp + 1) >> 3);
if (bytestream2_get_bytes_left(&s->gb) < img_size * h) {
av_log(avctx, AV_LOG_ERROR,
"Not enough data available for image\n");
return AVERROR_INVALIDDATA;
}
for (y = 0; y < h; y++) {
bytestream2_get_bufferu(&s->gb, dst, img_size);
dst += stride;
buf += img_size;
}
}
}
if(flags & 0x10){ // right-to-left, needs horizontal flip
int x;
for(y = 0; y < s->height; y++){
for(y = 0; y < h; y++){
void *line = &p->data[0][y * p->linesize[0]];
for(x = 0; x < s->width >> 1; x++){
switch(s->bpp){
for(x = 0; x < w >> 1; x++){
switch(bpp){
case 32:
FFSWAP(uint32_t, ((uint32_t *)line)[x], ((uint32_t *)line)[s->width - x - 1]);
FFSWAP(uint32_t, ((uint32_t *)line)[x], ((uint32_t *)line)[w - x - 1]);
break;
case 24:
FFSWAP(uint8_t, ((uint8_t *)line)[3 * x ], ((uint8_t *)line)[3 * s->width - 3 * x - 3]);
FFSWAP(uint8_t, ((uint8_t *)line)[3 * x + 1], ((uint8_t *)line)[3 * s->width - 3 * x - 2]);
FFSWAP(uint8_t, ((uint8_t *)line)[3 * x + 2], ((uint8_t *)line)[3 * s->width - 3 * x - 1]);
FFSWAP(uint8_t, ((uint8_t *)line)[3 * x ], ((uint8_t *)line)[3 * w - 3 * x - 3]);
FFSWAP(uint8_t, ((uint8_t *)line)[3 * x + 1], ((uint8_t *)line)[3 * w - 3 * x - 2]);
FFSWAP(uint8_t, ((uint8_t *)line)[3 * x + 2], ((uint8_t *)line)[3 * w - 3 * x - 1]);
break;
case 16:
FFSWAP(uint16_t, ((uint16_t *)line)[x], ((uint16_t *)line)[s->width - x - 1]);
FFSWAP(uint16_t, ((uint16_t *)line)[x], ((uint16_t *)line)[w - x - 1]);
break;
case 8:
FFSWAP(uint8_t, ((uint8_t *)line)[x], ((uint8_t *)line)[s->width - x - 1]);
FFSWAP(uint8_t, ((uint8_t *)line)[x], ((uint8_t *)line)[w - x - 1]);
}
}
}

View File

@ -493,7 +493,7 @@ static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
int nr, dr;
nr = get_bits(gb, 8);
dr = get_bits(gb, 4);
if (nr && nr < 8 && dr && dr < 3) {
if (nr > 0 && nr < 8 && dr > 0 && dr < 3) {
v->s.avctx->time_base.num = ff_vc1_fps_dr[dr - 1];
v->s.avctx->time_base.den = ff_vc1_fps_nr[nr - 1] * 1000;
}

View File

@ -84,7 +84,7 @@ const uint8_t ff_vc1_mbmode_intfrp[2][15][4] = {
}
};
const int ff_vc1_fps_nr[7] = { 24, 25, 30, 50, 60, 48, 72},
const int ff_vc1_fps_nr[7] = { 24, 25, 30, 50, 60, 48, 72 },
ff_vc1_fps_dr[2] = { 1000, 1001 };
const uint8_t ff_vc1_pquant_table[3][32] = {
/* Implicit quantizer */

View File

@ -2374,6 +2374,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
int16_t *dc_val;
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
int q1, q2 = 0;
int dqscale_index;
wrap = s->block_wrap[n];
dc_val = s->dc_val[0] + s->block_index[n];
@ -2386,15 +2387,18 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
a = dc_val[ - wrap];
/* scale predictors if needed */
q1 = s->current_picture.f.qscale_table[mb_pos];
dqscale_index = s->y_dc_scale_table[q1] - 1;
if (dqscale_index < 0)
return 0;
if (c_avail && (n != 1 && n != 3)) {
q2 = s->current_picture.f.qscale_table[mb_pos - 1];
if (q2 && q2 != q1)
c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
}
if (a_avail && (n != 2 && n != 3)) {
q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
if (q2 && q2 != q1)
a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
}
if (a_avail && c_avail && (n != 3)) {
int off = mb_pos;
@ -2404,7 +2408,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
off -= s->mb_stride;
q2 = s->current_picture.f.qscale_table[off];
if (q2 && q2 != q1)
b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
}
if (a_avail && c_avail) {
@ -2821,6 +2825,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
if (q1 < 1)
return AVERROR_INVALIDDATA;
if (dc_pred_dir) { // left
for (k = 1; k < 8; k++)
block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
@ -2863,6 +2869,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
if (q2 && q1 != q2) {
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
if (q1 < 1)
return AVERROR_INVALIDDATA;
for (k = 1; k < 8; k++)
ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
@ -2873,6 +2881,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
if (q2 && q1 != q2) {
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
if (q1 < 1)
return AVERROR_INVALIDDATA;
for (k = 1; k < 8; k++)
ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
@ -3031,6 +3041,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
if (q1 < 1)
return AVERROR_INVALIDDATA;
if (dc_pred_dir) { // left
for (k = 1; k < 8; k++)
block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
@ -3073,6 +3085,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
if (q2 && q1 != q2) {
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
if (q1 < 1)
return AVERROR_INVALIDDATA;
for (k = 1; k < 8; k++)
ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
@ -3083,6 +3097,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
if (q2 && q1 != q2) {
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
if (q1 < 1)
return AVERROR_INVALIDDATA;
for (k = 1; k < 8; k++)
ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}

View File

@ -1607,6 +1607,8 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt);
* @param s media file handle
* @param pkt The packet containing the data to be written. Libavformat takes
* ownership of the data and will free it when it sees fit using the packet's
* This can be NULL (at any time, not just at the end), to flush the
* interleaving queues.
* @ref AVPacket.destruct "destruct" field. The caller must not access the data
* after this function returns, as it may already be freed.
* Packet's @ref AVPacket.stream_index "stream_index" field must be set to the

View File

@ -1428,17 +1428,19 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
return;
clear_program(ts, h->id);
pcr_pid = get16(&p, p_end) & 0x1fff;
pcr_pid = get16(&p, p_end);
if (pcr_pid < 0)
return;
pcr_pid &= 0x1fff;
add_pid_to_pmt(ts, h->id, pcr_pid);
set_pcr_pid(ts->stream, h->id, pcr_pid);
av_dlog(ts->stream, "pcr_pid=0x%x\n", pcr_pid);
program_info_length = get16(&p, p_end) & 0xfff;
program_info_length = get16(&p, p_end);
if (program_info_length < 0)
return;
program_info_length &= 0xfff;
while(program_info_length >= 2) {
uint8_t tag, len;
tag = get8(&p, p_end);
@ -1476,9 +1478,10 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
stream_type = get8(&p, p_end);
if (stream_type < 0)
break;
pid = get16(&p, p_end) & 0x1fff;
pid = get16(&p, p_end);
if (pid < 0)
break;
pid &= 0x1fff;
/* now create stream */
if (ts->pids[pid] && ts->pids[pid]->type == MPEGTS_PES) {
@ -1516,9 +1519,10 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
ff_program_add_stream_index(ts->stream, h->id, st->index);
desc_list_len = get16(&p, p_end) & 0xfff;
desc_list_len = get16(&p, p_end);
if (desc_list_len < 0)
break;
desc_list_len &= 0xfff;
desc_list_end = p + desc_list_len;
if (desc_list_end > p_end)
break;
@ -1565,9 +1569,10 @@ static void pat_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
sid = get16(&p, p_end);
if (sid < 0)
break;
pmt_pid = get16(&p, p_end) & 0x1fff;
pmt_pid = get16(&p, p_end);
if (pmt_pid < 0)
break;
pmt_pid &= 0x1fff;
av_dlog(ts->stream, "sid=0x%x pid=0x%x\n", sid, pmt_pid);
@ -1617,9 +1622,10 @@ static void sdt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
val = get8(&p, p_end);
if (val < 0)
break;
desc_list_len = get16(&p, p_end) & 0xfff;
desc_list_len = get16(&p, p_end);
if (desc_list_len < 0)
break;
desc_list_len &= 0xfff;
desc_list_end = p + desc_list_len;
if (desc_list_end > p_end)
break;

View File

@ -3481,24 +3481,30 @@ static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, in
}
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
AVStream *st= s->streams[ pkt->stream_index];
int ret;
int ret, flush = 0;
//FIXME/XXX/HACK drop zero sized packets
if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
return 0;
if (pkt) {
AVStream *st= s->streams[ pkt->stream_index];
av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
pkt->size, pkt->dts, pkt->pts);
if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return ret;
//FIXME/XXX/HACK drop zero sized packets
if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
return 0;
if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return AVERROR(EINVAL);
av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
pkt->size, pkt->dts, pkt->pts);
if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return ret;
if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return AVERROR(EINVAL);
} else {
av_dlog(s, "av_interleaved_write_frame FLUSH\n");
flush = 1;
}
for(;;){
AVPacket opkt;
int ret= interleave_packet(s, &opkt, pkt, 0);
int ret= interleave_packet(s, &opkt, pkt, flush);
if(ret<=0) //FIXME cleanup needed for ret<0 ?
return ret;