[SCM] libav/upstream: Imported Upstream version 0.8.2
siretart at users.alioth.debian.org
siretart at users.alioth.debian.org
Sat May 5 08:55:58 UTC 2012
The following commit has been merged in the upstream branch:
commit 6df7c4bea3661ee04bbe4e0958b924092069d727
Author: Reinhard Tartler <siretart at tauware.de>
Date: Sat May 5 10:55:34 2012 +0200
Imported Upstream version 0.8.2
diff --git a/Changelog b/Changelog
index cb04ee4..846aa5a 100644
--- a/Changelog
+++ b/Changelog
@@ -1,6 +1,20 @@
Entries are sorted chronologically from oldest to youngest within each release,
releases are sorted from youngest to oldest.
+version 0.8.2:
+
+- Several bugs and crashes have been fixed in the following codecs: AAC,
+ APE, H.263, H.264, Indeo 4, Mimic, MJPEG, Motion Pixels Video, RAW,
+ TTA, VC1, VQA, WMA Voice, vqavideo (CVE-2012-0947).
+
+- Several bugs and crashes have been fixed in the following formats:
+ ASF, ID3v2, MOV, xWMA
+
+- This release additionally updates the following codecs to the
+ bytestream2 API, and therefore benefit from additional overflow
+ checks: truemotion2, utvideo, vqavideo
+
+
version 0.8.1:
- Several bugs and crashes have been fixed in the following codecs: AAC,
diff --git a/RELEASE b/RELEASE
index 6f4eebd..100435b 100644
--- a/RELEASE
+++ b/RELEASE
@@ -1 +1 @@
-0.8.1
+0.8.2
diff --git a/VERSION b/VERSION
index 6f4eebd..100435b 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-0.8.1
+0.8.2
diff --git a/cmdutils.c b/cmdutils.c
index a489a0f..e96fa81 100644
--- a/cmdutils.c
+++ b/cmdutils.c
@@ -883,12 +883,12 @@ FILE *get_preset_file(char *filename, size_t filename_size,
for (i = 0; i < 3 && !f; i++) {
if (!base[i])
continue;
- snprintf(filename, filename_size, "%s%s/%s.ffpreset", base[i],
+ snprintf(filename, filename_size, "%s%s/%s.avpreset", base[i],
i != 1 ? "" : "/.avconv", preset_name);
f = fopen(filename, "r");
if (!f && codec_name) {
snprintf(filename, filename_size,
- "%s%s/%s-%s.ffpreset",
+ "%s%s/%s-%s.avpreset",
base[i], i != 1 ? "" : "/.avconv", codec_name,
preset_name);
f = fopen(filename, "r");
diff --git a/cmdutils.h b/cmdutils.h
index eb96645..c69bb02 100644
--- a/cmdutils.h
+++ b/cmdutils.h
@@ -345,11 +345,11 @@ int64_t guess_correct_pts(PtsCorrectionContext *ctx, int64_t pts, int64_t dts);
* Get a file corresponding to a preset file.
*
* If is_path is non-zero, look for the file in the path preset_name.
- * Otherwise search for a file named arg.ffpreset in the directories
+ * Otherwise search for a file named arg.avpreset in the directories
* $AVCONV_DATADIR (if set), $HOME/.avconv, and in the datadir defined
* at configuration time, in that order. If no such file is found and
* codec_name is defined, then search for a file named
- * codec_name-preset_name.ffpreset in the above-mentioned directories.
+ * codec_name-preset_name.avpreset in the above-mentioned directories.
*
* @param filename buffer where the name of the found filename is written
* @param filename_size size in bytes of the filename buffer
diff --git a/libavcodec/aacps.c b/libavcodec/aacps.c
index 3da912c..6c9dcf2 100644
--- a/libavcodec/aacps.c
+++ b/libavcodec/aacps.c
@@ -275,6 +275,10 @@ int ff_ps_read_data(AVCodecContext *avctx, GetBitContext *gb_host, PSContext *ps
err:
ps->start = 0;
skip_bits_long(gb_host, bits_left);
+ memset(ps->iid_par, 0, sizeof(ps->iid_par));
+ memset(ps->icc_par, 0, sizeof(ps->icc_par));
+ memset(ps->ipd_par, 0, sizeof(ps->ipd_par));
+ memset(ps->opd_par, 0, sizeof(ps->opd_par));
return bits_left;
}
diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c
index fa50d61..0abf05b 100644
--- a/libavcodec/apedec.c
+++ b/libavcodec/apedec.c
@@ -404,9 +404,12 @@ static inline int ape_decode_value(APEContext *ctx, APERice *rice)
if (tmpk <= 16)
x = range_decode_bits(ctx, tmpk);
- else {
+ else if (tmpk <= 32) {
x = range_decode_bits(ctx, 16);
x |= (range_decode_bits(ctx, tmpk - 16) << 16);
+ } else {
+ av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
+ return AVERROR_INVALIDDATA;
}
x += overflow << tmpk;
} else {
diff --git a/libavcodec/bytestream.h b/libavcodec/bytestream.h
index 503598a..6814620 100644
--- a/libavcodec/bytestream.h
+++ b/libavcodec/bytestream.h
@@ -1,6 +1,7 @@
/*
* Bytestream functions
* copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier at free.fr>
+ * Copyright (c) 2012 Aneesh Dogra (lionaneesh) <lionaneesh at gmail.com>
*
* This file is part of Libav.
*
@@ -23,6 +24,7 @@
#define AVCODEC_BYTESTREAM_H
#include <string.h>
+
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
@@ -30,35 +32,57 @@ typedef struct {
const uint8_t *buffer, *buffer_end, *buffer_start;
} GetByteContext;
-#define DEF_T(type, name, bytes, read, write) \
-static av_always_inline type bytestream_get_ ## name(const uint8_t **b){\
- (*b) += bytes;\
- return read(*b - bytes);\
-}\
-static av_always_inline void bytestream_put_ ##name(uint8_t **b, const type value){\
- write(*b, value);\
- (*b) += bytes;\
-}\
-static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g)\
-{\
- return bytestream_get_ ## name(&g->buffer);\
-}\
-static av_always_inline type bytestream2_get_ ## name(GetByteContext *g)\
-{\
- if (g->buffer_end - g->buffer < bytes)\
- return 0;\
- return bytestream2_get_ ## name ## u(g);\
-}\
-static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g)\
-{\
- if (g->buffer_end - g->buffer < bytes)\
- return 0;\
- return read(g->buffer);\
-}
-
-#define DEF(name, bytes, read, write) \
+typedef struct {
+ uint8_t *buffer, *buffer_end, *buffer_start;
+ int eof;
+} PutByteContext;
+
+#define DEF_T(type, name, bytes, read, write) \
+static av_always_inline type bytestream_get_ ## name(const uint8_t **b) \
+{ \
+ (*b) += bytes; \
+ return read(*b - bytes); \
+} \
+static av_always_inline void bytestream_put_ ## name(uint8_t **b, \
+ const type value) \
+{ \
+ write(*b, value); \
+ (*b) += bytes; \
+} \
+static av_always_inline void bytestream2_put_ ## name ## u(PutByteContext *p, \
+ const type value) \
+{ \
+ bytestream_put_ ## name(&p->buffer, value); \
+} \
+static av_always_inline void bytestream2_put_ ## name(PutByteContext *p, \
+ const type value) \
+{ \
+ if (!p->eof && (p->buffer_end - p->buffer >= bytes)) { \
+ write(p->buffer, value); \
+ p->buffer += bytes; \
+ } else \
+ p->eof = 1; \
+} \
+static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g) \
+{ \
+ return bytestream_get_ ## name(&g->buffer); \
+} \
+static av_always_inline type bytestream2_get_ ## name(GetByteContext *g) \
+{ \
+ if (g->buffer_end - g->buffer < bytes) \
+ return 0; \
+ return bytestream2_get_ ## name ## u(g); \
+} \
+static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g) \
+{ \
+ if (g->buffer_end - g->buffer < bytes) \
+ return 0; \
+ return read(g->buffer); \
+}
+
+#define DEF(name, bytes, read, write) \
DEF_T(unsigned int, name, bytes, read, write)
-#define DEF64(name, bytes, read, write) \
+#define DEF64(name, bytes, read, write) \
DEF_T(uint64_t, name, bytes, read, write)
DEF64(le64, 8, AV_RL64, AV_WL64)
@@ -112,11 +136,22 @@ DEF (byte, 1, AV_RB8 , AV_WB8 )
#endif
static av_always_inline void bytestream2_init(GetByteContext *g,
- const uint8_t *buf, int buf_size)
+ const uint8_t *buf,
+ int buf_size)
{
- g->buffer = buf;
+ g->buffer = buf;
g->buffer_start = buf;
- g->buffer_end = buf + buf_size;
+ g->buffer_end = buf + buf_size;
+}
+
+static av_always_inline void bytestream2_init_writer(PutByteContext *p,
+ uint8_t *buf,
+ int buf_size)
+{
+ p->buffer = buf;
+ p->buffer_start = buf;
+ p->buffer_end = buf + buf_size;
+ p->eof = 0;
}
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
@@ -124,32 +159,61 @@ static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *
return g->buffer_end - g->buffer;
}
+static av_always_inline unsigned int bytestream2_get_bytes_left_p(PutByteContext *p)
+{
+ return p->buffer_end - p->buffer;
+}
+
static av_always_inline void bytestream2_skip(GetByteContext *g,
unsigned int size)
{
g->buffer += FFMIN(g->buffer_end - g->buffer, size);
}
+static av_always_inline void bytestream2_skipu(GetByteContext *g,
+ unsigned int size)
+{
+ g->buffer += size;
+}
+
+static av_always_inline void bytestream2_skip_p(PutByteContext *p,
+ unsigned int size)
+{
+ int size2;
+ if (p->eof)
+ return;
+ size2 = FFMIN(p->buffer_end - p->buffer, size);
+ if (size2 != size)
+ p->eof = 1;
+ p->buffer += size2;
+}
+
static av_always_inline int bytestream2_tell(GetByteContext *g)
{
return (int)(g->buffer - g->buffer_start);
}
-static av_always_inline int bytestream2_seek(GetByteContext *g, int offset,
+static av_always_inline int bytestream2_tell_p(PutByteContext *p)
+{
+ return (int)(p->buffer - p->buffer_start);
+}
+
+static av_always_inline int bytestream2_seek(GetByteContext *g,
+ int offset,
int whence)
{
switch (whence) {
case SEEK_CUR:
- offset = av_clip(offset, -(g->buffer - g->buffer_start),
- g->buffer_end - g->buffer);
+ offset = av_clip(offset, -(g->buffer - g->buffer_start),
+ g->buffer_end - g->buffer);
g->buffer += offset;
break;
case SEEK_END:
- offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0);
+ offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0);
g->buffer = g->buffer_end + offset;
break;
case SEEK_SET:
- offset = av_clip(offset, 0, g->buffer_end - g->buffer_start);
+ offset = av_clip(offset, 0, g->buffer_end - g->buffer_start);
g->buffer = g->buffer_start + offset;
break;
default:
@@ -158,6 +222,37 @@ static av_always_inline int bytestream2_seek(GetByteContext *g, int offset,
return bytestream2_tell(g);
}
+static av_always_inline int bytestream2_seek_p(PutByteContext *p,
+ int offset,
+ int whence)
+{
+ p->eof = 0;
+ switch (whence) {
+ case SEEK_CUR:
+ if (p->buffer_end - p->buffer < offset)
+ p->eof = 1;
+ offset = av_clip(offset, -(p->buffer - p->buffer_start),
+ p->buffer_end - p->buffer);
+ p->buffer += offset;
+ break;
+ case SEEK_END:
+ if (offset > 0)
+ p->eof = 1;
+ offset = av_clip(offset, -(p->buffer_end - p->buffer_start), 0);
+ p->buffer = p->buffer_end + offset;
+ break;
+ case SEEK_SET:
+ if (p->buffer_end - p->buffer_start < offset)
+ p->eof = 1;
+ offset = av_clip(offset, 0, p->buffer_end - p->buffer_start);
+ p->buffer = p->buffer_start + offset;
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+ return bytestream2_tell_p(p);
+}
+
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
uint8_t *dst,
unsigned int size)
@@ -168,14 +263,78 @@ static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
return size2;
}
-static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
+static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g,
+ uint8_t *dst,
+ unsigned int size)
+{
+ memcpy(dst, g->buffer, size);
+ g->buffer += size;
+ return size;
+}
+
+static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p,
+ const uint8_t *src,
+ unsigned int size)
+{
+ int size2;
+ if (p->eof)
+ return 0;
+ size2 = FFMIN(p->buffer_end - p->buffer, size);
+ if (size2 != size)
+ p->eof = 1;
+ memcpy(p->buffer, src, size2);
+ p->buffer += size2;
+ return size2;
+}
+
+static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p,
+ const uint8_t *src,
+ unsigned int size)
+{
+ memcpy(p->buffer, src, size);
+ p->buffer += size;
+ return size;
+}
+
+static av_always_inline void bytestream2_set_buffer(PutByteContext *p,
+ const uint8_t c,
+ unsigned int size)
+{
+ int size2;
+ if (p->eof)
+ return;
+ size2 = FFMIN(p->buffer_end - p->buffer, size);
+ if (size2 != size)
+ p->eof = 1;
+ memset(p->buffer, c, size2);
+ p->buffer += size2;
+}
+
+static av_always_inline void bytestream2_set_bufferu(PutByteContext *p,
+ const uint8_t c,
+ unsigned int size)
+{
+ memset(p->buffer, c, size);
+ p->buffer += size;
+}
+
+static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
+{
+ return p->eof;
+}
+
+static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b,
+ uint8_t *dst,
+ unsigned int size)
{
memcpy(dst, *b, size);
(*b) += size;
return size;
}
-static av_always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size)
+static av_always_inline void bytestream_put_buffer(uint8_t **b,
+ const uint8_t *src,
+ unsigned int size)
{
memcpy(*b, src, size);
(*b) += size;
diff --git a/libavcodec/celp_filters.c b/libavcodec/celp_filters.c
index 25a6744..849cda4 100644
--- a/libavcodec/celp_filters.c
+++ b/libavcodec/celp_filters.c
@@ -133,9 +133,8 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
out2 -= val * old_out2;
out3 -= val * old_out3;
- old_out3 = out[-5];
-
for (i = 5; i <= filter_length; i += 2) {
+ old_out3 = out[-i];
val = filter_coeffs[i-1];
out0 -= val * old_out3;
@@ -154,7 +153,6 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
FFSWAP(float, old_out0, old_out2);
old_out1 = old_out3;
- old_out3 = out[-i-2];
}
tmp0 = out0;
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index 5556214..7f0934a 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -430,6 +430,13 @@ retry:
if (ret < 0){
av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
return -1;
+ } else if ((s->width != avctx->coded_width ||
+ s->height != avctx->coded_height ||
+ (s->width + 15) >> 4 != s->mb_width ||
+ (s->height + 15) >> 4 != s->mb_height) &&
+ (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))) {
+ av_log_missing_feature(s->avctx, "Width/height/bit depth/chroma idc changing with threads is", 0);
+ return AVERROR_PATCHWELCOME; // width / height changed during parallelized decoding
}
avctx->has_b_frames= !s->low_delay;
@@ -571,11 +578,6 @@ retry:
/* H.263 could change picture size any time */
ParseContext pc= s->parse_context; //FIXME move these demuxng hack to avformat
- if (HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME)) {
- av_log_missing_feature(s->avctx, "Width/height/bit depth/chroma idc changing with threads is", 0);
- return -1; // width / height changed during parallelized decoding
- }
-
s->parse_context.buffer=0;
MPV_common_end(s);
s->parse_context= pc;
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index e0eb8e1..b229510 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -2723,9 +2723,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
if (s->context_initialized
&& ( s->width != s->avctx->width || s->height != s->avctx->height
|| av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio))) {
- if(h != h0) {
+ if(h != h0 || (HAVE_THREADS && h->s.avctx->active_thread_type & FF_THREAD_FRAME)) {
av_log_missing_feature(s->avctx, "Width/height changing with threads is", 0);
- return -1; // width / height changed during parallelized decoding
+ return AVERROR_PATCHWELCOME; // width / height changed during parallelized decoding
}
free_tables(h, 0);
flush_dpb(s->avctx);
diff --git a/libavcodec/h264_ps.c b/libavcodec/h264_ps.c
index 276eb77..c6623a9 100644
--- a/libavcodec/h264_ps.c
+++ b/libavcodec/h264_ps.c
@@ -471,6 +471,9 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length){
if(pps_id >= MAX_PPS_COUNT) {
av_log(h->s.avctx, AV_LOG_ERROR, "pps_id (%d) out of range\n", pps_id);
return -1;
+ } else if (h->sps.bit_depth_luma > 10) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "Unimplemented luma bit depth=%d (max=10)\n", h->sps.bit_depth_luma);
+ return AVERROR_PATCHWELCOME;
}
pps= av_mallocz(sizeof(PPS));
diff --git a/libavcodec/indeo4.c b/libavcodec/indeo4.c
index 573718e..3e8a398 100644
--- a/libavcodec/indeo4.c
+++ b/libavcodec/indeo4.c
@@ -372,7 +372,8 @@ static int decode_band_hdr(IVI4DecContext *ctx, IVIBandDesc *band,
if (!get_bits1(&ctx->gb) || ctx->frame_type == FRAMETYPE_INTRA) {
transform_id = get_bits(&ctx->gb, 5);
- if (!transforms[transform_id].inv_trans) {
+ if (transform_id >= FF_ARRAY_ELEMS(transforms) ||
+ !transforms[transform_id].inv_trans) {
av_log_ask_for_sample(avctx, "Unimplemented transform: %d!\n", transform_id);
return AVERROR_PATCHWELCOME;
}
diff --git a/libavcodec/lagarith.c b/libavcodec/lagarith.c
index 757873e..6828ba8 100644
--- a/libavcodec/lagarith.c
+++ b/libavcodec/lagarith.c
@@ -247,24 +247,26 @@ static void lag_pred_line(LagarithContext *l, uint8_t *buf,
{
int L, TL;
- /* Left pixel is actually prev_row[width] */
- L = buf[width - stride - 1];
if (!line) {
/* Left prediction only for first line */
L = l->dsp.add_hfyu_left_prediction(buf + 1, buf + 1,
width - 1, buf[0]);
- return;
- } else if (line == 1) {
- /* Second line, left predict first pixel, the rest of the line is median predicted
- * NOTE: In the case of RGB this pixel is top predicted */
- TL = l->avctx->pix_fmt == PIX_FMT_YUV420P ? buf[-stride] : L;
} else {
- /* Top left is 2 rows back, last pixel */
- TL = buf[width - (2 * stride) - 1];
- }
+ /* Left pixel is actually prev_row[width] */
+ L = buf[width - stride - 1];
+
+ if (line == 1) {
+ /* Second line, left predict first pixel, the rest of the line is median predicted
+ * NOTE: In the case of RGB this pixel is top predicted */
+ TL = l->avctx->pix_fmt == PIX_FMT_YUV420P ? buf[-stride] : L;
+ } else {
+ /* Top left is 2 rows back, last pixel */
+ TL = buf[width - (2 * stride) - 1];
+ }
- add_lag_median_prediction(buf, buf - stride, buf,
- width, &L, &TL);
+ add_lag_median_prediction(buf, buf - stride, buf,
+ width, &L, &TL);
+ }
}
static int lag_decode_line(LagarithContext *l, lag_rac *rac,
@@ -310,13 +312,13 @@ handle_zeros:
}
static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
- const uint8_t *src, int width,
- int esc_count)
+ const uint8_t *src, const uint8_t *src_end,
+ int width, int esc_count)
{
int i = 0;
int count;
uint8_t zero_run = 0;
- const uint8_t *start = src;
+ const uint8_t *src_start = src;
uint8_t mask1 = -(esc_count < 2);
uint8_t mask2 = -(esc_count < 3);
uint8_t *end = dst + (width - 2);
@@ -333,6 +335,8 @@ output_zeros:
i = 0;
while (!zero_run && dst + i < end) {
i++;
+ if (src + i >= src_end)
+ return AVERROR_INVALIDDATA;
zero_run =
!(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
}
@@ -348,9 +352,10 @@ output_zeros:
} else {
memcpy(dst, src, i);
src += i;
+ dst += i;
}
}
- return start - src;
+ return src_start - src;
}
@@ -366,6 +371,7 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
int esc_count = src[0];
GetBitContext gb;
lag_rac rac;
+ const uint8_t *src_end = src + src_size;
rac.avctx = l->avctx;
l->zeros = 0;
@@ -396,10 +402,16 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
esc_count -= 4;
if (esc_count > 0) {
/* Zero run coding only, no range coding. */
- for (i = 0; i < height; i++)
- src += lag_decode_zero_run_line(l, dst + (i * stride), src,
- width, esc_count);
+ for (i = 0; i < height; i++) {
+ int res = lag_decode_zero_run_line(l, dst + (i * stride), src,
+ src_end, width, esc_count);
+ if (res < 0)
+ return res;
+ src += res;
+ }
} else {
+ if (src_size < width * height)
+ return AVERROR_INVALIDDATA; // buffer not big enough
/* Plane is stored uncompressed */
for (i = 0; i < height; i++) {
memcpy(dst + (i * stride), src, width);
@@ -506,11 +518,19 @@ static int lag_decode_frame(AVCodecContext *avctx,
}
for (i = 0; i < planes; i++)
srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride;
+ if (offset_ry >= buf_size ||
+ offset_gu >= buf_size ||
+ offset_bv >= buf_size ||
+ (planes == 4 && offs[3] >= buf_size)) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid frame offsets\n");
+ return AVERROR_INVALIDDATA;
+ }
for (i = 0; i < planes; i++)
lag_decode_arith_plane(l, srcs[i],
avctx->width, avctx->height,
-l->rgb_stride, buf + offs[i],
- buf_size);
+ buf_size - offs[i]);
dst = p->data[0];
for (i = 0; i < planes; i++)
srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height;
@@ -544,15 +564,23 @@ static int lag_decode_frame(AVCodecContext *avctx,
return -1;
}
+ if (offset_ry >= buf_size ||
+ offset_gu >= buf_size ||
+ offset_bv >= buf_size) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid frame offsets\n");
+ return AVERROR_INVALIDDATA;
+ }
+
lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
p->linesize[0], buf + offset_ry,
- buf_size);
+ buf_size - offset_ry);
lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
avctx->height / 2, p->linesize[2],
- buf + offset_gu, buf_size);
+ buf + offset_gu, buf_size - offset_gu);
lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
avctx->height / 2, p->linesize[1],
- buf + offset_bv, buf_size);
+ buf + offset_bv, buf_size - offset_bv);
break;
default:
av_log(avctx, AV_LOG_ERROR,
diff --git a/libavcodec/lagarithrac.c b/libavcodec/lagarithrac.c
index ab7a600..f85e012 100644
--- a/libavcodec/lagarithrac.c
+++ b/libavcodec/lagarithrac.c
@@ -32,15 +32,16 @@
void lag_rac_init(lag_rac *l, GetBitContext *gb, int length)
{
- int i, j;
+ int i, j, left;
/* According to reference decoder "1st byte is garbage",
* however, it gets skipped by the call to align_get_bits()
*/
align_get_bits(gb);
+ left = get_bits_left(gb) >> 3;
l->bytestream_start =
l->bytestream = gb->buffer + get_bits_count(gb) / 8;
- l->bytestream_end = l->bytestream_start + length;
+ l->bytestream_end = l->bytestream_start + FFMIN(length, left);
l->range = 0x80;
l->low = *l->bytestream >> 1;
diff --git a/libavcodec/lzw.c b/libavcodec/lzw.c
index 873b314..b674d4e 100644
--- a/libavcodec/lzw.c
+++ b/libavcodec/lzw.c
@@ -101,9 +101,14 @@ void ff_lzw_decode_tail(LZWState *p)
struct LZWState *s = (struct LZWState *)p;
if(s->mode == FF_LZW_GIF) {
- while(s->pbuf < s->ebuf && s->bs>0){
- s->pbuf += s->bs;
- s->bs = *s->pbuf++;
+ while (s->bs > 0) {
+ if (s->pbuf + s->bs >= s->ebuf) {
+ s->pbuf = s->ebuf;
+ break;
+ } else {
+ s->pbuf += s->bs;
+ s->bs = *s->pbuf++;
+ }
}
}else
s->pbuf= s->ebuf;
diff --git a/libavcodec/mimic.c b/libavcodec/mimic.c
index b93f51f..fd03b97 100644
--- a/libavcodec/mimic.c
+++ b/libavcodec/mimic.c
@@ -259,8 +259,8 @@ static int decode(MimicContext *ctx, int quality, int num_coeffs,
int index = (ctx->cur_index+backref)&15;
uint8_t *p = ctx->flipped_ptrs[index].data[0];
- ff_thread_await_progress(&ctx->buf_ptrs[index], cur_row, 0);
- if(p) {
+ if (index != ctx->cur_index && p) {
+ ff_thread_await_progress(&ctx->buf_ptrs[index], cur_row, 0);
p += src -
ctx->flipped_ptrs[ctx->prev_index].data[plane];
ctx->dsp.put_pixels_tab[1][0](dst, p, stride, 8);
@@ -310,6 +310,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
int width, height;
int quality, num_coeffs;
int swap_buf_size = buf_size - MIMIC_HEADER_SIZE;
+ int res;
if(buf_size < MIMIC_HEADER_SIZE) {
av_log(avctx, AV_LOG_ERROR, "insufficient data\n");
@@ -377,10 +378,10 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
swap_buf_size>>2);
init_get_bits(&ctx->gb, ctx->swap_buf, swap_buf_size << 3);
- if(!decode(ctx, quality, num_coeffs, !is_pframe)) {
- if (avctx->active_thread_type&FF_THREAD_FRAME)
- ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], INT_MAX, 0);
- else {
+ res = decode(ctx, quality, num_coeffs, !is_pframe);
+ ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], INT_MAX, 0);
+ if (!res) {
+ if (!(avctx->active_thread_type & FF_THREAD_FRAME)) {
ff_thread_release_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index]);
return -1;
}
diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c
index a795028..7f12fc1 100644
--- a/libavcodec/mjpegdec.c
+++ b/libavcodec/mjpegdec.c
@@ -306,9 +306,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
s->first_picture = 0;
}
- if (s->interlaced && (s->bottom_field == !s->interlace_polarity))
- return 0;
-
+ if (!(s->interlaced && (s->bottom_field == !s->interlace_polarity))) {
/* XXX: not complete test ! */
pix_fmt_id = (s->h_count[0] << 28) | (s->v_count[0] << 24) |
(s->h_count[1] << 20) | (s->v_count[1] << 16) |
@@ -375,6 +373,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
if (len != (8 + (3 * nb_components)))
av_log(s->avctx, AV_LOG_DEBUG, "decode_sof0: error, len(%d) mismatch\n", len);
+ }
/* totally blank picture as progressive JPEG will only add details to it */
if (s->progressive) {
diff --git a/libavcodec/motionpixels.c b/libavcodec/motionpixels.c
index d054e00..8259447 100644
--- a/libavcodec/motionpixels.c
+++ b/libavcodec/motionpixels.c
@@ -190,10 +190,13 @@ static void mp_decode_line(MotionPixelsContext *mp, GetBitContext *gb, int y)
p = mp_get_yuv_from_rgb(mp, x - 1, y);
} else {
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
+ p.y = av_clip(p.y, 0, 31);
if ((x & 3) == 0) {
if ((y & 3) == 0) {
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
+ p.v = av_clip(p.v, -32, 31);
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
+ p.u = av_clip(p.u, -32, 31);
mp->hpt[((y / 4) * mp->avctx->width + x) / 4] = p;
} else {
p.v = mp->hpt[((y / 4) * mp->avctx->width + x) / 4].v;
@@ -217,9 +220,12 @@ static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb)
p = mp_get_yuv_from_rgb(mp, 0, y);
} else {
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
+ p.y = av_clip(p.y, 0, 31);
if ((y & 3) == 0) {
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
+ p.v = av_clip(p.v, -32, 31);
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
+ p.u = av_clip(p.u, -32, 31);
}
mp->vpt[y] = p;
mp_set_rgb_from_yuv(mp, 0, y, &p);
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 50e6ad6..7aaf398 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -1415,8 +1415,7 @@ void MPV_frame_end(MpegEncContext *s)
s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
- ff_thread_report_progress((AVFrame *) s->current_picture_ptr,
- s->mb_height - 1, 0);
+ ff_thread_report_progress((AVFrame *) s->current_picture_ptr, INT_MAX, 0);
}
}
diff --git a/libavcodec/rawdec.c b/libavcodec/rawdec.c
index 83b2a21..c0508d8 100644
--- a/libavcodec/rawdec.c
+++ b/libavcodec/rawdec.c
@@ -119,6 +119,7 @@ static int raw_decode(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
RawVideoContext *context = avctx->priv_data;
+ int res;
AVFrame * frame = (AVFrame *) data;
AVPicture * picture = (AVPicture *) data;
@@ -156,7 +157,9 @@ static int raw_decode(AVCodecContext *avctx,
avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
buf += buf_size - context->length;
- avpicture_fill(picture, buf, avctx->pix_fmt, avctx->width, avctx->height);
+ if ((res = avpicture_fill(picture, buf, avctx->pix_fmt,
+ avctx->width, avctx->height)) < 0)
+ return res;
if((avctx->pix_fmt==PIX_FMT_PAL8 && buf_size < context->length) ||
(avctx->pix_fmt!=PIX_FMT_PAL8 &&
(av_pix_fmt_descriptors[avctx->pix_fmt].flags & PIX_FMT_PAL))){
diff --git a/libavcodec/truemotion2.c b/libavcodec/truemotion2.c
index 29d2e4d..81dc84a 100644
--- a/libavcodec/truemotion2.c
+++ b/libavcodec/truemotion2.c
@@ -25,6 +25,7 @@
*/
#include "avcodec.h"
+#include "bytestream.h"
#include "get_bits.h"
#include "dsputil.h"
@@ -56,7 +57,9 @@ typedef struct TM2Context{
int *clast;
/* data for current and previous frame */
+ int *Y1_base, *U1_base, *V1_base, *Y2_base, *U2_base, *V2_base;
int *Y1, *U1, *V1, *Y2, *U2, *V2;
+ int y_stride, uv_stride;
int cur;
} TM2Context;
@@ -127,7 +130,7 @@ static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
/* check for correct codes parameters */
if((huff.val_bits < 1) || (huff.val_bits > 32) ||
- (huff.max_bits < 0) || (huff.max_bits > 32)) {
+ (huff.max_bits < 0) || (huff.max_bits > 25)) {
av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal length: %i, max code length: %i\n",
huff.val_bits, huff.max_bits);
return -1;
@@ -248,13 +251,14 @@ static int tm2_read_deltas(TM2Context *ctx, int stream_id) {
static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
{
int i;
- int cur = 0;
int skip = 0;
- int len, toks;
+ int len, toks, pos;
TM2Codes codes;
+ GetByteContext gb;
/* get stream length in dwords */
- len = AV_RB32(buf); buf += 4; cur += 4;
+ bytestream2_init(&gb, buf, buf_size);
+ len = bytestream2_get_be32(&gb);
skip = len * 4 + 4;
if(len == 0)
@@ -265,36 +269,37 @@ static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, i
return -1;
}
- toks = AV_RB32(buf); buf += 4; cur += 4;
+ toks = bytestream2_get_be32(&gb);
if(toks & 1) {
- len = AV_RB32(buf); buf += 4; cur += 4;
+ len = bytestream2_get_be32(&gb);
if(len == TM2_ESCAPE) {
- len = AV_RB32(buf); buf += 4; cur += 4;
+ len = bytestream2_get_be32(&gb);
}
if(len > 0) {
- if (skip <= cur)
+ pos = bytestream2_tell(&gb);
+ if (skip <= pos)
return -1;
- init_get_bits(&ctx->gb, buf, (skip - cur) * 8);
+ init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
if(tm2_read_deltas(ctx, stream_id) == -1)
return -1;
- buf += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2;
- cur += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2;
+ bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
}
}
/* skip unused fields */
- if(AV_RB32(buf) == TM2_ESCAPE) {
- buf += 4; cur += 4; /* some unknown length - could be escaped too */
+ len = bytestream2_get_be32(&gb);
+ if(len == TM2_ESCAPE) { /* some unknown length - could be escaped too */
+ bytestream2_skip(&gb, 8); /* unused by decoder */
+ } else {
+ bytestream2_skip(&gb, 4); /* unused by decoder */
}
- buf += 4; cur += 4;
- buf += 4; cur += 4; /* unused by decoder */
- if (skip <= cur)
+ pos = bytestream2_tell(&gb);
+ if (skip <= pos)
return -1;
- init_get_bits(&ctx->gb, buf, (skip - cur) * 8);
+ init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
if(tm2_build_huff_table(ctx, &codes) == -1)
return -1;
- buf += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2;
- cur += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2;
+ bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
toks >>= 1;
/* check if we have sane number of tokens */
@@ -305,21 +310,33 @@ static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, i
}
ctx->tokens[stream_id] = av_realloc(ctx->tokens[stream_id], toks * sizeof(int));
ctx->tok_lens[stream_id] = toks;
- len = AV_RB32(buf); buf += 4; cur += 4;
+ len = bytestream2_get_be32(&gb);
if(len > 0) {
- if (skip <= cur)
+ pos = bytestream2_tell(&gb);
+ if (skip <= pos)
return -1;
- init_get_bits(&ctx->gb, buf, (skip - cur) * 8);
+ init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
for(i = 0; i < toks; i++) {
if (get_bits_left(&ctx->gb) <= 0) {
av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
return -1;
}
ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes);
+ if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) {
+ av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
+ ctx->tokens[stream_id][i], stream_id, i);
+ return AVERROR_INVALIDDATA;
+ }
}
} else {
- for(i = 0; i < toks; i++)
+ for(i = 0; i < toks; i++) {
ctx->tokens[stream_id][i] = codes.recode[0];
+ if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) {
+ av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
+ ctx->tokens[stream_id][i], stream_id, i);
+ return AVERROR_INVALIDDATA;
+ }
+ }
}
tm2_free_codes(&codes);
@@ -344,9 +361,9 @@ static inline int GET_TOK(TM2Context *ctx,int type) {
int *Y, *U, *V;\
int Ystride, Ustride, Vstride;\
\
- Ystride = ctx->avctx->width;\
- Vstride = (ctx->avctx->width + 1) >> 1;\
- Ustride = (ctx->avctx->width + 1) >> 1;\
+ Ystride = ctx->y_stride;\
+ Vstride = ctx->uv_stride;\
+ Ustride = ctx->uv_stride;\
Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\
V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\
U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\
@@ -634,6 +651,8 @@ static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int b
mx = GET_TOK(ctx, TM2_MOT);
my = GET_TOK(ctx, TM2_MOT);
+ mx = av_clip(mx, -(bx * 4 + 4), ctx->avctx->width - bx * 4);
+ my = av_clip(my, -(by * 4 + 4), ctx->avctx->height - by * 4);
Yo += my * oYstride + mx;
Uo += (my >> 1) * oUstride + (mx >> 1);
@@ -674,15 +693,12 @@ static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int b
static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p)
{
int i, j;
- int bw, bh;
+ int w = ctx->avctx->width, h = ctx->avctx->height, bw = w >> 2, bh = h >> 2, cw = w >> 1;
int type;
int keyframe = 1;
int *Y, *U, *V;
uint8_t *dst;
- bw = ctx->avctx->width >> 2;
- bh = ctx->avctx->height >> 2;
-
for(i = 0; i < TM2_NUM_STREAMS; i++)
ctx->tok_ptrs[i] = 0;
@@ -735,17 +751,54 @@ static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p)
U = (ctx->cur?ctx->U2:ctx->U1);
V = (ctx->cur?ctx->V2:ctx->V1);
dst = p->data[0];
- for(j = 0; j < ctx->avctx->height; j++){
- for(i = 0; i < ctx->avctx->width; i++){
+ for(j = 0; j < h; j++){
+ for(i = 0; i < w; i++){
int y = Y[i], u = U[i >> 1], v = V[i >> 1];
dst[3*i+0] = av_clip_uint8(y + v);
dst[3*i+1] = av_clip_uint8(y);
dst[3*i+2] = av_clip_uint8(y + u);
}
- Y += ctx->avctx->width;
+
+ /* horizontal edge extension */
+ Y[-4] = Y[-3] = Y[-2] = Y[-1] = Y[0];
+ Y[w + 3] = Y[w + 2] = Y[w + 1] = Y[w] = Y[w - 1];
+
+ /* vertical edge extension */
+ if (j == 0) {
+ memcpy(Y - 4 - 1 * ctx->y_stride, Y - 4, ctx->y_stride);
+ memcpy(Y - 4 - 2 * ctx->y_stride, Y - 4, ctx->y_stride);
+ memcpy(Y - 4 - 3 * ctx->y_stride, Y - 4, ctx->y_stride);
+ memcpy(Y - 4 - 4 * ctx->y_stride, Y - 4, ctx->y_stride);
+ } else if (j == h - 1) {
+ memcpy(Y - 4 + 1 * ctx->y_stride, Y - 4, ctx->y_stride);
+ memcpy(Y - 4 + 2 * ctx->y_stride, Y - 4, ctx->y_stride);
+ memcpy(Y - 4 + 3 * ctx->y_stride, Y - 4, ctx->y_stride);
+ memcpy(Y - 4 + 4 * ctx->y_stride, Y - 4, ctx->y_stride);
+ }
+
+ Y += ctx->y_stride;
if (j & 1) {
- U += ctx->avctx->width >> 1;
- V += ctx->avctx->width >> 1;
+ /* horizontal edge extension */
+ U[-2] = U[-1] = U[0];
+ V[-2] = V[-1] = V[0];
+ U[cw + 1] = U[cw] = U[cw - 1];
+ V[cw + 1] = V[cw] = V[cw - 1];
+
+ /* vertical edge extension */
+ if (j == 1) {
+ memcpy(U - 2 - 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
+ memcpy(V - 2 - 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
+ memcpy(U - 2 - 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
+ memcpy(V - 2 - 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
+ } else if (j == h - 1) {
+ memcpy(U - 2 + 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
+ memcpy(V - 2 + 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
+ memcpy(U - 2 + 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
+ memcpy(V - 2 + 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
+ }
+
+ U += ctx->uv_stride;
+ V += ctx->uv_stride;
}
dst += p->linesize[0];
}
@@ -762,7 +815,7 @@ static int decode_frame(AVCodecContext *avctx,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size;
+ int buf_size = avpkt->size & ~3;
TM2Context * const l = avctx->priv_data;
AVFrame * const p= (AVFrame*)&l->pic;
int i, skip, t;
@@ -790,10 +843,14 @@ static int decode_frame(AVCodecContext *avctx,
}
for(i = 0; i < TM2_NUM_STREAMS; i++){
- t = tm2_read_stream(l, swbuf + skip, tm2_stream_order[i], buf_size);
- if(t == -1){
+ if (skip >= buf_size) {
av_free(swbuf);
- return -1;
+ return AVERROR_INVALIDDATA;
+ }
+ t = tm2_read_stream(l, swbuf + skip, tm2_stream_order[i], buf_size - skip);
+ if(t < 0){
+ av_free(swbuf);
+ return t;
}
skip += t;
}
@@ -813,7 +870,7 @@ static int decode_frame(AVCodecContext *avctx,
static av_cold int decode_init(AVCodecContext *avctx){
TM2Context * const l = avctx->priv_data;
- int i;
+ int i, w = avctx->width, h = avctx->height;
if((avctx->width & 3) || (avctx->height & 3)){
av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n");
@@ -826,21 +883,46 @@ static av_cold int decode_init(AVCodecContext *avctx){
dsputil_init(&l->dsp, avctx);
- l->last = av_malloc(4 * sizeof(int) * (avctx->width >> 2));
- l->clast = av_malloc(4 * sizeof(int) * (avctx->width >> 2));
+ l->last = av_malloc(4 * sizeof(*l->last) * (w >> 2));
+ l->clast = av_malloc(4 * sizeof(*l->clast) * (w >> 2));
for(i = 0; i < TM2_NUM_STREAMS; i++) {
l->tokens[i] = NULL;
l->tok_lens[i] = 0;
}
- l->Y1 = av_malloc(sizeof(int) * avctx->width * avctx->height);
- l->U1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1));
- l->V1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1));
- l->Y2 = av_malloc(sizeof(int) * avctx->width * avctx->height);
- l->U2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1));
- l->V2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1));
+ w += 8;
+ h += 8;
+ l->Y1_base = av_malloc(sizeof(*l->Y1_base) * w * h);
+ l->Y2_base = av_malloc(sizeof(*l->Y2_base) * w * h);
+ l->y_stride = w;
+ w = (w + 1) >> 1;
+ h = (h + 1) >> 1;
+ l->U1_base = av_malloc(sizeof(*l->U1_base) * w * h);
+ l->V1_base = av_malloc(sizeof(*l->V1_base) * w * h);
+ l->U2_base = av_malloc(sizeof(*l->U2_base) * w * h);
+ l->V2_base = av_malloc(sizeof(*l->V1_base) * w * h);
+ l->uv_stride = w;
l->cur = 0;
+ if (!l->Y1_base || !l->Y2_base || !l->U1_base ||
+ !l->V1_base || !l->U2_base || !l->V2_base ||
+ !l->last || !l->clast) {
+ av_freep(l->Y1_base);
+ av_freep(l->Y2_base);
+ av_freep(l->U1_base);
+ av_freep(l->U2_base);
+ av_freep(l->V1_base);
+ av_freep(l->V2_base);
+ av_freep(l->last);
+ av_freep(l->clast);
+ return AVERROR(ENOMEM);
+ }
+ l->Y1 = l->Y1_base + l->y_stride * 4 + 4;
+ l->Y2 = l->Y2_base + l->y_stride * 4 + 4;
+ l->U1 = l->U1_base + l->uv_stride * 2 + 2;
+ l->U2 = l->U2_base + l->uv_stride * 2 + 2;
+ l->V1 = l->V1_base + l->uv_stride * 2 + 2;
+ l->V2 = l->V2_base + l->uv_stride * 2 + 2;
return 0;
}
@@ -855,12 +937,12 @@ static av_cold int decode_end(AVCodecContext *avctx){
for(i = 0; i < TM2_NUM_STREAMS; i++)
av_free(l->tokens[i]);
if(l->Y1){
- av_free(l->Y1);
- av_free(l->U1);
- av_free(l->V1);
- av_free(l->Y2);
- av_free(l->U2);
- av_free(l->V2);
+ av_free(l->Y1_base);
+ av_free(l->U1_base);
+ av_free(l->V1_base);
+ av_free(l->Y2_base);
+ av_free(l->U2_base);
+ av_free(l->V2_base);
}
if (pic->data[0])
diff --git a/libavcodec/tta.c b/libavcodec/tta.c
index c8daff2..1743f7d 100644
--- a/libavcodec/tta.c
+++ b/libavcodec/tta.c
@@ -32,6 +32,7 @@
#include <limits.h>
#include "avcodec.h"
#include "get_bits.h"
+#include "libavutil/crc.h"
#define FORMAT_SIMPLE 1
#define FORMAT_ENCRYPTED 2
@@ -58,8 +59,10 @@ typedef struct TTAContext {
AVCodecContext *avctx;
AVFrame frame;
GetBitContext gb;
+ const AVCRC *crc_table;
- int format, channels, bps, data_length;
+ int format, channels, bps;
+ unsigned data_length;
int frame_length, last_frame_length, total_frames;
int32_t *decode_buffer;
@@ -188,10 +191,23 @@ static int tta_get_unary(GetBitContext *gb)
return ret;
}
+static int tta_check_crc(TTAContext *s, const uint8_t *buf, int buf_size)
+{
+ uint32_t crc, CRC;
+
+ CRC = AV_RL32(buf + buf_size);
+ crc = av_crc(s->crc_table, 0xFFFFFFFFU, buf, buf_size);
+ if (CRC != (crc ^ 0xFFFFFFFFU)) {
+ av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ return 0;
+}
+
static av_cold int tta_decode_init(AVCodecContext * avctx)
{
TTAContext *s = avctx->priv_data;
- int i;
s->avctx = avctx;
@@ -202,8 +218,14 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8);
if (show_bits_long(&s->gb, 32) == AV_RL32("TTA1"))
{
+ if (avctx->err_recognition & AV_EF_CRCCHECK) {
+ s->crc_table = av_crc_get_table(AV_CRC_32_IEEE_LE);
+ if (tta_check_crc(s, avctx->extradata, 18))
+ return AVERROR_INVALIDDATA;
+ }
+
/* signature */
- skip_bits(&s->gb, 32);
+ skip_bits_long(&s->gb, 32);
s->format = get_bits(&s->gb, 16);
if (s->format > 2) {
@@ -219,7 +241,7 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
s->bps = (avctx->bits_per_coded_sample + 7) / 8;
avctx->sample_rate = get_bits_long(&s->gb, 32);
s->data_length = get_bits_long(&s->gb, 32);
- skip_bits(&s->gb, 32); // CRC32 of header
+ skip_bits_long(&s->gb, 32); // CRC32 of header
if (s->channels == 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of channels\n");
@@ -244,7 +266,7 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
}
// prevent overflow
- if (avctx->sample_rate > 0x7FFFFF) {
+ if (avctx->sample_rate > 0x7FFFFFu) {
av_log(avctx, AV_LOG_ERROR, "sample_rate too large\n");
return AVERROR(EINVAL);
}
@@ -261,9 +283,15 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
s->data_length, s->frame_length, s->last_frame_length, s->total_frames);
// FIXME: seek table
- for (i = 0; i < s->total_frames; i++)
- skip_bits(&s->gb, 32);
- skip_bits(&s->gb, 32); // CRC32 of seektable
+ if (avctx->extradata_size <= 26 || s->total_frames > INT_MAX / 4 ||
+ avctx->extradata_size - 26 < s->total_frames * 4)
+ av_log(avctx, AV_LOG_WARNING, "Seek table missing or too small\n");
+ else if (avctx->err_recognition & AV_EF_CRCCHECK) {
+ if (tta_check_crc(s, avctx->extradata + 22, s->total_frames * 4))
+ return AVERROR_INVALIDDATA;
+ }
+ skip_bits_long(&s->gb, 32 * s->total_frames);
+ skip_bits_long(&s->gb, 32); // CRC32 of seektable
if(s->frame_length >= UINT_MAX / (s->channels * sizeof(int32_t))){
av_log(avctx, AV_LOG_ERROR, "frame_length too large\n");
@@ -301,6 +329,11 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
int cur_chan = 0, framelen = s->frame_length;
int32_t *p;
+ if (avctx->err_recognition & AV_EF_CRCCHECK) {
+ if (buf_size < 4 || tta_check_crc(s, buf, buf_size - 4))
+ return AVERROR_INVALIDDATA;
+ }
+
init_get_bits(&s->gb, buf, buf_size*8);
// FIXME: seeking
@@ -404,7 +437,7 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
if (get_bits_left(&s->gb) < 32)
return -1;
- skip_bits(&s->gb, 32); // frame crc
+ skip_bits_long(&s->gb, 32); // frame crc
// convert to output buffer
if (s->bps == 2) {
diff --git a/libavcodec/utvideo.c b/libavcodec/utvideo.c
index 89854c2..7fe024d 100644
--- a/libavcodec/utvideo.c
+++ b/libavcodec/utvideo.c
@@ -358,13 +358,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- const uint8_t *buf_end = buf + buf_size;
UtvideoContext *c = avctx->priv_data;
- const uint8_t *ptr;
int i, j;
const uint8_t *plane_start[5];
int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
int ret;
+ GetByteContext gb;
if (c->pic.data[0])
ff_thread_release_buffer(avctx, &c->pic);
@@ -379,20 +378,21 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
ff_thread_finish_setup(avctx);
/* parse plane structure to retrieve frame flags and validate slice offsets */
- ptr = buf;
+ bytestream2_init(&gb, buf, buf_size);
for (i = 0; i < c->planes; i++) {
- plane_start[i] = ptr;
- if (buf_end - ptr < 256 + 4 * c->slices) {
+ plane_start[i] = gb.buffer;
+ if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
return AVERROR_INVALIDDATA;
}
- ptr += 256;
+ bytestream2_skipu(&gb, 256);
slice_start = 0;
slice_end = 0;
for (j = 0; j < c->slices; j++) {
- slice_end = bytestream_get_le32(&ptr);
+ slice_end = bytestream2_get_le32u(&gb);
slice_size = slice_end - slice_start;
- if (slice_size < 0) {
+ if (slice_end <= 0 || slice_size <= 0 ||
+ bytestream2_get_bytes_left(&gb) < slice_end) {
av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
return AVERROR_INVALIDDATA;
}
@@ -400,18 +400,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
max_slice_size = FFMAX(max_slice_size, slice_size);
}
plane_size = slice_end;
- if (buf_end - ptr < plane_size) {
- av_log(avctx, AV_LOG_ERROR, "Plane size is bigger than available data\n");
- return AVERROR_INVALIDDATA;
- }
- ptr += plane_size;
+ bytestream2_skipu(&gb, plane_size);
}
- plane_start[c->planes] = ptr;
- if (buf_end - ptr < c->frame_info_size) {
+ plane_start[c->planes] = gb.buffer;
+ if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
return AVERROR_INVALIDDATA;
}
- c->frame_info = AV_RL32(ptr);
+ c->frame_info = bytestream2_get_le32u(&gb);
av_log(avctx, AV_LOG_DEBUG, "frame information flags %X\n", c->frame_info);
c->frame_pred = (c->frame_info >> 8) & 3;
diff --git a/libavcodec/vc1.c b/libavcodec/vc1.c
index d728f9b..a1c3f07 100644
--- a/libavcodec/vc1.c
+++ b/libavcodec/vc1.c
@@ -492,7 +492,7 @@ static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
int nr, dr;
nr = get_bits(gb, 8);
dr = get_bits(gb, 4);
- if (nr && nr < 8 && dr && dr < 3) {
+ if (nr > 0 && nr < 8 && dr > 0 && dr < 3) {
v->s.avctx->time_base.num = ff_vc1_fps_dr[dr - 1];
v->s.avctx->time_base.den = ff_vc1_fps_nr[nr - 1] * 1000;
}
diff --git a/libavcodec/vc1data.c b/libavcodec/vc1data.c
index 69d71ad..e1e2cbf 100644
--- a/libavcodec/vc1data.c
+++ b/libavcodec/vc1data.c
@@ -84,7 +84,7 @@ const uint8_t ff_vc1_mbmode_intfrp[2][15][4] = {
}
};
-const int ff_vc1_fps_nr[5] = { 24, 25, 30, 50, 60 },
+const int ff_vc1_fps_nr[7] = { 24, 25, 30, 50, 60, 48, 72 },
ff_vc1_fps_dr[2] = { 1000, 1001 };
const uint8_t ff_vc1_pquant_table[3][32] = {
/* Implicit quantizer */
diff --git a/libavcodec/vc1data.h b/libavcodec/vc1data.h
index da8f0a1..9e4074c 100644
--- a/libavcodec/vc1data.h
+++ b/libavcodec/vc1data.h
@@ -41,7 +41,7 @@ extern const int ff_vc1_ttfrm_to_tt[4];
extern const uint8_t ff_vc1_mv_pmode_table[2][5];
extern const uint8_t ff_vc1_mv_pmode_table2[2][4];
-extern const int ff_vc1_fps_nr[5], ff_vc1_fps_dr[2];
+extern const int ff_vc1_fps_nr[7], ff_vc1_fps_dr[2];
extern const uint8_t ff_vc1_pquant_table[3][32];
/* MBMODE table for interlaced frame P-picture */
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index 3e84464..3d1abc7 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -2512,6 +2512,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
int16_t *dc_val;
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
int q1, q2 = 0;
+ int dqscale_index;
wrap = s->block_wrap[n];
dc_val = s->dc_val[0] + s->block_index[n];
@@ -2524,15 +2525,18 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
a = dc_val[ - wrap];
/* scale predictors if needed */
q1 = s->current_picture.f.qscale_table[mb_pos];
+ dqscale_index = s->y_dc_scale_table[q1] - 1;
+ if (dqscale_index < 0)
+ return 0;
if (c_avail && (n != 1 && n != 3)) {
q2 = s->current_picture.f.qscale_table[mb_pos - 1];
if (q2 && q2 != q1)
- c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
+ c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
}
if (a_avail && (n != 2 && n != 3)) {
q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
if (q2 && q2 != q1)
- a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
+ a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
}
if (a_avail && c_avail && (n != 3)) {
int off = mb_pos;
@@ -2542,7 +2546,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
off -= s->mb_stride;
q2 = s->current_picture.f.qscale_table[off];
if (q2 && q2 != q1)
- b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
+ b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
}
if (a_avail && c_avail) {
@@ -2959,6 +2963,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+ if (q1 < 1)
+ return AVERROR_INVALIDDATA;
if (dc_pred_dir) { // left
for (k = 1; k < 8; k++)
block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
@@ -3001,6 +3007,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
if (q2 && q1 != q2) {
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+ if (q1 < 1)
+ return AVERROR_INVALIDDATA;
for (k = 1; k < 8; k++)
ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
@@ -3011,6 +3019,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n,
if (q2 && q1 != q2) {
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+ if (q1 < 1)
+ return AVERROR_INVALIDDATA;
for (k = 1; k < 8; k++)
ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
@@ -3169,6 +3179,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+ if (q1 < 1)
+ return AVERROR_INVALIDDATA;
if (dc_pred_dir) { // left
for (k = 1; k < 8; k++)
block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
@@ -3211,6 +3223,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
if (q2 && q1 != q2) {
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+ if (q1 < 1)
+ return AVERROR_INVALIDDATA;
for (k = 1; k < 8; k++)
ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
@@ -3221,6 +3235,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n,
if (q2 && q1 != q2) {
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+ if (q1 < 1)
+ return AVERROR_INVALIDDATA;
for (k = 1; k < 8; k++)
ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
diff --git a/libavcodec/vqavideo.c b/libavcodec/vqavideo.c
index 7a6308a..4826650 100644
--- a/libavcodec/vqavideo.c
+++ b/libavcodec/vqavideo.c
@@ -70,10 +70,10 @@
#include "libavutil/intreadwrite.h"
#include "libavutil/imgutils.h"
#include "avcodec.h"
+#include "bytestream.h"
#define PALETTE_COUNT 256
#define VQA_HEADER_SIZE 0x2A
-#define CHUNK_PREAMBLE_SIZE 8
/* allocate the maximum vector space, regardless of the file version:
* (0xFF00 codebook vectors + 0x100 solid pixel vectors) * (4x4 pixels/block) */
@@ -94,9 +94,7 @@ typedef struct VqaContext {
AVCodecContext *avctx;
AVFrame frame;
-
- const unsigned char *buf;
- int size;
+ GetByteContext gb;
uint32_t palette[PALETTE_COUNT];
@@ -123,7 +121,6 @@ typedef struct VqaContext {
static av_cold int vqa_decode_init(AVCodecContext *avctx)
{
VqaContext *s = avctx->priv_data;
- unsigned char *vqa_header;
int i, j, codebook_index;
s->avctx = avctx;
@@ -136,17 +133,16 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx)
}
/* load up the VQA parameters from the header */
- vqa_header = (unsigned char *)s->avctx->extradata;
- s->vqa_version = vqa_header[0];
- s->width = AV_RL16(&vqa_header[6]);
- s->height = AV_RL16(&vqa_header[8]);
+ s->vqa_version = s->avctx->extradata[0];
+ s->width = AV_RL16(&s->avctx->extradata[6]);
+ s->height = AV_RL16(&s->avctx->extradata[8]);
if(av_image_check_size(s->width, s->height, 0, avctx)){
s->width= s->height= 0;
return -1;
}
- s->vector_width = vqa_header[10];
- s->vector_height = vqa_header[11];
- s->partial_count = s->partial_countdown = vqa_header[13];
+ s->vector_width = s->avctx->extradata[10];
+ s->vector_height = s->avctx->extradata[11];
+ s->partial_count = s->partial_countdown = s->avctx->extradata[13];
/* the vector dimensions have to meet very stringent requirements */
if ((s->vector_width != 4) ||
@@ -155,6 +151,12 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx)
return -1;
}
+ if (s->width & (s->vector_width - 1) ||
+ s->height & (s->vector_height - 1)) {
+ av_log(avctx, AV_LOG_ERROR, "Image size not multiple of block size\n");
+ return AVERROR_INVALIDDATA;
+ }
+
/* allocate codebooks */
s->codebook_size = MAX_CODEBOOK_SIZE;
s->codebook = av_malloc(s->codebook_size);
@@ -189,84 +191,88 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx)
av_log(NULL, AV_LOG_ERROR, " VQA video: decode_format80 problem: next op would overflow dest_index\n"); \
av_log(NULL, AV_LOG_ERROR, " VQA video: current dest_index = %d, count = %d, dest_size = %d\n", \
dest_index, count, dest_size); \
- return; \
+ return AVERROR_INVALIDDATA; \
+ }
+
+#define CHECK_COPY(idx) \
+ if (idx < 0 || idx + count > dest_size) { \
+ av_log(NULL, AV_LOG_ERROR, " VQA video: decode_format80 problem: next op would overflow dest_index\n"); \
+ av_log(NULL, AV_LOG_ERROR, " VQA video: current src_pos = %d, count = %d, dest_size = %d\n", \
+ src_pos, count, dest_size); \
+ return AVERROR_INVALIDDATA; \
}
-static void decode_format80(const unsigned char *src, int src_size,
+
+static int decode_format80(GetByteContext *gb, int src_size,
unsigned char *dest, int dest_size, int check_size) {
- int src_index = 0;
int dest_index = 0;
- int count;
+ int count, opcode, start;
int src_pos;
unsigned char color;
int i;
- while (src_index < src_size) {
-
- av_dlog(NULL, " opcode %02X: ", src[src_index]);
+ start = bytestream2_tell(gb);
+ while (bytestream2_tell(gb) - start < src_size) {
+ opcode = bytestream2_get_byte(gb);
+ av_dlog(NULL, " opcode %02X: ", opcode);
/* 0x80 means that frame is finished */
- if (src[src_index] == 0x80)
- return;
+ if (opcode == 0x80)
+ return 0;
if (dest_index >= dest_size) {
av_log(NULL, AV_LOG_ERROR, " VQA video: decode_format80 problem: dest_index (%d) exceeded dest_size (%d)\n",
dest_index, dest_size);
- return;
+ return AVERROR_INVALIDDATA;
}
- if (src[src_index] == 0xFF) {
+ if (opcode == 0xFF) {
- src_index++;
- count = AV_RL16(&src[src_index]);
- src_index += 2;
- src_pos = AV_RL16(&src[src_index]);
- src_index += 2;
+ count = bytestream2_get_le16(gb);
+ src_pos = bytestream2_get_le16(gb);
av_dlog(NULL, "(1) copy %X bytes from absolute pos %X\n", count, src_pos);
CHECK_COUNT();
+ CHECK_COPY(src_pos);
for (i = 0; i < count; i++)
dest[dest_index + i] = dest[src_pos + i];
dest_index += count;
- } else if (src[src_index] == 0xFE) {
+ } else if (opcode == 0xFE) {
- src_index++;
- count = AV_RL16(&src[src_index]);
- src_index += 2;
- color = src[src_index++];
+ count = bytestream2_get_le16(gb);
+ color = bytestream2_get_byte(gb);
av_dlog(NULL, "(2) set %X bytes to %02X\n", count, color);
CHECK_COUNT();
memset(&dest[dest_index], color, count);
dest_index += count;
- } else if ((src[src_index] & 0xC0) == 0xC0) {
+ } else if ((opcode & 0xC0) == 0xC0) {
- count = (src[src_index++] & 0x3F) + 3;
- src_pos = AV_RL16(&src[src_index]);
- src_index += 2;
+ count = (opcode & 0x3F) + 3;
+ src_pos = bytestream2_get_le16(gb);
av_dlog(NULL, "(3) copy %X bytes from absolute pos %X\n", count, src_pos);
CHECK_COUNT();
+ CHECK_COPY(src_pos);
for (i = 0; i < count; i++)
dest[dest_index + i] = dest[src_pos + i];
dest_index += count;
- } else if (src[src_index] > 0x80) {
+ } else if (opcode > 0x80) {
- count = src[src_index++] & 0x3F;
+ count = opcode & 0x3F;
av_dlog(NULL, "(4) copy %X bytes from source to dest\n", count);
CHECK_COUNT();
- memcpy(&dest[dest_index], &src[src_index], count);
- src_index += count;
+ bytestream2_get_buffer(gb, &dest[dest_index], count);
dest_index += count;
} else {
- count = ((src[src_index] & 0x70) >> 4) + 3;
- src_pos = AV_RB16(&src[src_index]) & 0x0FFF;
- src_index += 2;
+ count = ((opcode & 0x70) >> 4) + 3;
+ src_pos = bytestream2_get_byte(gb) | ((opcode & 0x0F) << 8);
av_dlog(NULL, "(5) copy %X bytes from relpos %X\n", count, src_pos);
CHECK_COUNT();
+ CHECK_COPY(dest_index - src_pos);
for (i = 0; i < count; i++)
dest[dest_index + i] = dest[dest_index - src_pos + i];
dest_index += count;
@@ -281,9 +287,11 @@ static void decode_format80(const unsigned char *src, int src_size,
if (dest_index < dest_size)
av_log(NULL, AV_LOG_ERROR, " VQA video: decode_format80 problem: decode finished with dest_index (%d) < dest_size (%d)\n",
dest_index, dest_size);
+
+ return 0; // let's display what we decoded anyway
}
-static void vqa_decode_chunk(VqaContext *s)
+static int vqa_decode_chunk(VqaContext *s)
{
unsigned int chunk_type;
unsigned int chunk_size;
@@ -292,6 +300,7 @@ static void vqa_decode_chunk(VqaContext *s)
int i;
unsigned char r, g, b;
int index_shift;
+ int res;
int cbf0_chunk = -1;
int cbfz_chunk = -1;
@@ -311,10 +320,11 @@ static void vqa_decode_chunk(VqaContext *s)
int hibytes = s->decode_buffer_size / 2;
/* first, traverse through the frame and find the subchunks */
- while (index < s->size) {
+ while (bytestream2_get_bytes_left(&s->gb) >= 8) {
- chunk_type = AV_RB32(&s->buf[index]);
- chunk_size = AV_RB32(&s->buf[index + 4]);
+ chunk_type = bytestream2_get_be32u(&s->gb);
+ index = bytestream2_tell(&s->gb);
+ chunk_size = bytestream2_get_be32u(&s->gb);
switch (chunk_type) {
@@ -357,7 +367,7 @@ static void vqa_decode_chunk(VqaContext *s)
}
byte_skip = chunk_size & 0x01;
- index += (CHUNK_PREAMBLE_SIZE + chunk_size + byte_skip);
+ bytestream2_skip(&s->gb, chunk_size + byte_skip);
}
/* next, deal with the palette */
@@ -365,7 +375,7 @@ static void vqa_decode_chunk(VqaContext *s)
/* a chunk should not have both chunk types */
av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: found both CPL0 and CPLZ chunks\n");
- return;
+ return AVERROR_INVALIDDATA;
}
/* decompress the palette chunk */
@@ -378,19 +388,19 @@ static void vqa_decode_chunk(VqaContext *s)
/* convert the RGB palette into the machine's endian format */
if (cpl0_chunk != -1) {
- chunk_size = AV_RB32(&s->buf[cpl0_chunk + 4]);
+ bytestream2_seek(&s->gb, cpl0_chunk, SEEK_SET);
+ chunk_size = bytestream2_get_be32(&s->gb);
/* sanity check the palette size */
- if (chunk_size / 3 > 256) {
+ if (chunk_size / 3 > 256 || chunk_size > bytestream2_get_bytes_left(&s->gb)) {
av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: found a palette chunk with %d colors\n",
chunk_size / 3);
- return;
+ return AVERROR_INVALIDDATA;
}
- cpl0_chunk += CHUNK_PREAMBLE_SIZE;
for (i = 0; i < chunk_size / 3; i++) {
/* scale by 4 to transform 6-bit palette -> 8-bit */
- r = s->buf[cpl0_chunk++] * 4;
- g = s->buf[cpl0_chunk++] * 4;
- b = s->buf[cpl0_chunk++] * 4;
+ r = bytestream2_get_byteu(&s->gb) * 4;
+ g = bytestream2_get_byteu(&s->gb) * 4;
+ b = bytestream2_get_byteu(&s->gb) * 4;
s->palette[i] = (r << 16) | (g << 8) | (b);
}
}
@@ -400,31 +410,32 @@ static void vqa_decode_chunk(VqaContext *s)
/* a chunk should not have both chunk types */
av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: found both CBF0 and CBFZ chunks\n");
- return;
+ return AVERROR_INVALIDDATA;
}
/* decompress the full codebook chunk */
if (cbfz_chunk != -1) {
- chunk_size = AV_RB32(&s->buf[cbfz_chunk + 4]);
- cbfz_chunk += CHUNK_PREAMBLE_SIZE;
- decode_format80(&s->buf[cbfz_chunk], chunk_size,
- s->codebook, s->codebook_size, 0);
+ bytestream2_seek(&s->gb, cbfz_chunk, SEEK_SET);
+ chunk_size = bytestream2_get_be32(&s->gb);
+ if ((res = decode_format80(&s->gb, chunk_size, s->codebook,
+ s->codebook_size, 0)) < 0)
+ return res;
}
/* copy a full codebook */
if (cbf0_chunk != -1) {
- chunk_size = AV_RB32(&s->buf[cbf0_chunk + 4]);
+ bytestream2_seek(&s->gb, cbf0_chunk, SEEK_SET);
+ chunk_size = bytestream2_get_be32(&s->gb);
/* sanity check the full codebook size */
if (chunk_size > MAX_CODEBOOK_SIZE) {
av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: CBF0 chunk too large (0x%X bytes)\n",
chunk_size);
- return;
+ return AVERROR_INVALIDDATA;
}
- cbf0_chunk += CHUNK_PREAMBLE_SIZE;
- memcpy(s->codebook, &s->buf[cbf0_chunk], chunk_size);
+ bytestream2_get_buffer(&s->gb, s->codebook, chunk_size);
}
/* decode the frame */
@@ -432,13 +443,14 @@ static void vqa_decode_chunk(VqaContext *s)
/* something is wrong if there is no VPTZ chunk */
av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: no VPTZ chunk found\n");
- return;
+ return AVERROR_INVALIDDATA;
}
- chunk_size = AV_RB32(&s->buf[vptz_chunk + 4]);
- vptz_chunk += CHUNK_PREAMBLE_SIZE;
- decode_format80(&s->buf[vptz_chunk], chunk_size,
- s->decode_buffer, s->decode_buffer_size, 1);
+ bytestream2_seek(&s->gb, vptz_chunk, SEEK_SET);
+ chunk_size = bytestream2_get_be32(&s->gb);
+ if ((res = decode_format80(&s->gb, chunk_size,
+ s->decode_buffer, s->decode_buffer_size, 1)) < 0)
+ return res;
/* render the final PAL8 frame */
if (s->vector_height == 4)
@@ -502,17 +514,17 @@ static void vqa_decode_chunk(VqaContext *s)
if ((cbp0_chunk != -1) && (cbpz_chunk != -1)) {
/* a chunk should not have both chunk types */
av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: found both CBP0 and CBPZ chunks\n");
- return;
+ return AVERROR_INVALIDDATA;
}
if (cbp0_chunk != -1) {
- chunk_size = AV_RB32(&s->buf[cbp0_chunk + 4]);
- cbp0_chunk += CHUNK_PREAMBLE_SIZE;
+ bytestream2_seek(&s->gb, cbp0_chunk, SEEK_SET);
+ chunk_size = bytestream2_get_be32(&s->gb);
/* accumulate partial codebook */
- memcpy(&s->next_codebook_buffer[s->next_codebook_buffer_index],
- &s->buf[cbp0_chunk], chunk_size);
+ bytestream2_get_buffer(&s->gb, &s->next_codebook_buffer[s->next_codebook_buffer_index],
+ chunk_size);
s->next_codebook_buffer_index += chunk_size;
s->partial_countdown--;
@@ -530,39 +542,39 @@ static void vqa_decode_chunk(VqaContext *s)
if (cbpz_chunk != -1) {
- chunk_size = AV_RB32(&s->buf[cbpz_chunk + 4]);
- cbpz_chunk += CHUNK_PREAMBLE_SIZE;
+ bytestream2_seek(&s->gb, cbpz_chunk, SEEK_SET);
+ chunk_size = bytestream2_get_be32(&s->gb);
/* accumulate partial codebook */
- memcpy(&s->next_codebook_buffer[s->next_codebook_buffer_index],
- &s->buf[cbpz_chunk], chunk_size);
+ bytestream2_get_buffer(&s->gb, &s->next_codebook_buffer[s->next_codebook_buffer_index],
+ chunk_size);
s->next_codebook_buffer_index += chunk_size;
s->partial_countdown--;
if (s->partial_countdown == 0) {
+ GetByteContext gb;
+ bytestream2_init(&gb, s->next_codebook_buffer, s->next_codebook_buffer_index);
/* decompress codebook */
- decode_format80(s->next_codebook_buffer,
- s->next_codebook_buffer_index,
- s->codebook, s->codebook_size, 0);
+ if ((res = decode_format80(&gb, s->next_codebook_buffer_index,
+ s->codebook, s->codebook_size, 0)) < 0)
+ return res;
/* reset accounting */
s->next_codebook_buffer_index = 0;
s->partial_countdown = s->partial_count;
}
}
+
+ return 0;
}
static int vqa_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
- const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size;
VqaContext *s = avctx->priv_data;
-
- s->buf = buf;
- s->size = buf_size;
+ int res;
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
@@ -572,7 +584,9 @@ static int vqa_decode_frame(AVCodecContext *avctx,
return -1;
}
- vqa_decode_chunk(s);
+ bytestream2_init(&s->gb, avpkt->data, avpkt->size);
+ if ((res = vqa_decode_chunk(s)) < 0)
+ return res;
/* make the palette available on the way out */
memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4);
@@ -582,7 +596,7 @@ static int vqa_decode_frame(AVCodecContext *avctx,
*(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */
- return buf_size;
+ return avpkt->size;
}
static av_cold int vqa_decode_end(AVCodecContext *avctx)
diff --git a/libavcodec/wmavoice.c b/libavcodec/wmavoice.c
index 8854e35..86e6996 100644
--- a/libavcodec/wmavoice.c
+++ b/libavcodec/wmavoice.c
@@ -1440,8 +1440,7 @@ static int synth_frame(AVCodecContext *ctx, GetBitContext *gb, int frame_idx,
int pitch[MAX_BLOCKS], last_block_pitch;
/* Parse frame type ("frame header"), see frame_descs */
- int bd_idx = s->vbm_tree[get_vlc2(gb, frame_type_vlc.table, 6, 3)],
- block_nsamples = MAX_FRAMESIZE / frame_descs[bd_idx].n_blocks;
+ int bd_idx = s->vbm_tree[get_vlc2(gb, frame_type_vlc.table, 6, 3)], block_nsamples;
if (bd_idx < 0) {
av_log(ctx, AV_LOG_ERROR,
@@ -1449,6 +1448,8 @@ static int synth_frame(AVCodecContext *ctx, GetBitContext *gb, int frame_idx,
return -1;
}
+ block_nsamples = MAX_FRAMESIZE / frame_descs[bd_idx].n_blocks;
+
/* Pitch calculation for ACB_TYPE_ASYMMETRIC ("pitch-per-frame") */
if (frame_descs[bd_idx].acb_type == ACB_TYPE_ASYMMETRIC) {
/* Pitch is provided per frame, which is interpreted as the pitch of
diff --git a/libavformat/asfdec.c b/libavformat/asfdec.c
index 969ab28..3b48788 100644
--- a/libavformat/asfdec.c
+++ b/libavformat/asfdec.c
@@ -1092,6 +1092,8 @@ static int ff_asf_parse_packet(AVFormatContext *s, AVIOContext *pb, AVPacket *pk
//printf("packet %d %d\n", asf_st->pkt.size, asf->packet_frag_size);
asf_st->pkt.size = 0;
asf_st->pkt.data = 0;
+ asf_st->pkt.side_data_elems = 0;
+ asf_st->pkt.side_data = NULL;
break; // packet completed
}
}
diff --git a/libavformat/id3v2.c b/libavformat/id3v2.c
index deb652d..6499872 100644
--- a/libavformat/id3v2.c
+++ b/libavformat/id3v2.c
@@ -448,8 +448,17 @@ static void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t
unsync = flags & 0x80;
- if (isv34 && flags & 0x40) /* Extended header present, just skip over it */
- avio_skip(s->pb, get_size(s->pb, 4));
+ if (isv34 && flags & 0x40) { /* Extended header present, just skip over it */
+ int extlen = get_size(s->pb, 4);
+ if (version == 4)
+ extlen -= 4; // in v2.4 the length includes the length field we just read
+
+ if (extlen < 0) {
+ reason = "invalid extended header length";
+ goto error;
+ }
+ avio_skip(s->pb, extlen);
+ }
while (len >= taghdrlen) {
unsigned int tflags = 0;
diff --git a/libavformat/mov.c b/libavformat/mov.c
index 089cdea..f6be6a8 100644
--- a/libavformat/mov.c
+++ b/libavformat/mov.c
@@ -1699,6 +1699,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
unsigned int stps_index = 0;
unsigned int i, j;
uint64_t stream_size = 0;
+ AVIndexEntry *mem;
/* adjust first dts according to edit list */
if (sc->time_offset && mov->time_scale > 0) {
@@ -1727,12 +1728,13 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
if (!sc->sample_count)
return;
- if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries))
+ if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries)
return;
- st->index_entries = av_malloc(sc->sample_count*sizeof(*st->index_entries));
- if (!st->index_entries)
+ mem = av_realloc(st->index_entries, (st->nb_index_entries + sc->sample_count) * sizeof(*st->index_entries));
+ if (!mem)
return;
- st->index_entries_allocated_size = sc->sample_count*sizeof(*st->index_entries);
+ st->index_entries = mem;
+ st->index_entries_allocated_size = (st->nb_index_entries + sc->sample_count) * sizeof(*st->index_entries);
for (i = 0; i < sc->chunk_count; i++) {
current_offset = sc->chunk_offsets[i];
@@ -1815,12 +1817,13 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
}
av_dlog(mov->fc, "chunk count %d\n", total);
- if (total >= UINT_MAX / sizeof(*st->index_entries))
+ if (total >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries)
return;
- st->index_entries = av_malloc(total*sizeof(*st->index_entries));
- if (!st->index_entries)
+ mem = av_realloc(st->index_entries, (st->nb_index_entries + total) * sizeof(*st->index_entries));
+ if (!mem)
return;
- st->index_entries_allocated_size = total*sizeof(*st->index_entries);
+ st->index_entries = mem;
+ st->index_entries_allocated_size = (st->nb_index_entries + total) * sizeof(*st->index_entries);
// populate index
for (i = 0; i < sc->chunk_count; i++) {
@@ -2668,7 +2671,7 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
pkt->stream_index = sc->ffindex;
pkt->dts = sample->timestamp;
- if (sc->ctts_data) {
+ if (sc->ctts_data && sc->ctts_index < sc->ctts_count) {
pkt->pts = pkt->dts + sc->dts_shift + sc->ctts_data[sc->ctts_index].duration;
/* update ctts context */
sc->ctts_sample++;
diff --git a/libavformat/xwma.c b/libavformat/xwma.c
index 5839bdc..94abfc7 100644
--- a/libavformat/xwma.c
+++ b/libavformat/xwma.c
@@ -115,6 +115,17 @@ static int xwma_read_header(AVFormatContext *s, AVFormatParameters *ap)
}
}
+ if (!st->codec->channels) {
+ av_log(s, AV_LOG_WARNING, "Invalid channel count: %d\n",
+ st->codec->channels);
+ return AVERROR_INVALIDDATA;
+ }
+ if (!st->codec->bits_per_coded_sample) {
+ av_log(s, AV_LOG_WARNING, "Invalid bits_per_coded_sample: %d\n",
+ st->codec->bits_per_coded_sample);
+ return AVERROR_INVALIDDATA;
+ }
+
/* set the sample rate */
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
--
Libav/FFmpeg packaging
More information about the pkg-multimedia-commits
mailing list