summaryrefslogtreecommitdiffstats
path: root/libavcodec/adpcmenc.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/adpcmenc.c')
-rw-r--r--libavcodec/adpcmenc.c75
1 files changed, 38 insertions, 37 deletions
diff --git a/libavcodec/adpcmenc.c b/libavcodec/adpcmenc.c
index 0757624..668939c 100644
--- a/libavcodec/adpcmenc.c
+++ b/libavcodec/adpcmenc.c
@@ -5,20 +5,20 @@
* fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
* by Mike Melanson (melanson@pcisys.net)
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -58,6 +58,8 @@ typedef struct ADPCMEncodeContext {
#define FREEZE_INTERVAL 128
+static av_cold int adpcm_encode_close(AVCodecContext *avctx);
+
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
{
ADPCMEncodeContext *s = avctx->priv_data;
@@ -99,6 +101,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
/* seems frame_size isn't taken into account...
have to buffer the samples :-( */
avctx->block_align = BLKSIZE;
+ avctx->bits_per_coded_sample = 4;
break;
case AV_CODEC_ID_ADPCM_IMA_QT:
avctx->frame_size = 64;
@@ -107,8 +110,8 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
case AV_CODEC_ID_ADPCM_MS:
/* each 16 bits sample gives one nibble
and we have 7 bytes per channel overhead */
- avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 /
- avctx->channels + 2;
+ avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
+ avctx->bits_per_coded_sample = 4;
avctx->block_align = BLKSIZE;
if (!(avctx->extradata = av_malloc(32 + AV_INPUT_BUFFER_PADDING_SIZE)))
goto error;
@@ -143,10 +146,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
return 0;
error:
- av_freep(&s->paths);
- av_freep(&s->node_buf);
- av_freep(&s->nodep_buf);
- av_freep(&s->trellis_hash);
+ adpcm_encode_close(avctx);
return ret;
}
@@ -179,24 +179,27 @@ static inline uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
int16_t sample)
{
int delta = sample - c->prev_sample;
- int mask, step = ff_adpcm_step_table[c->step_index];
- int diff = step >> 3;
- int nibble = 0;
+ int diff, step = ff_adpcm_step_table[c->step_index];
+ int nibble = 8*(delta < 0);
- if (delta < 0) {
- nibble = 8;
- delta = -delta;
- }
+ delta= abs(delta);
+ diff = delta + (step >> 3);
- for (mask = 4; mask;) {
- if (delta >= step) {
- nibble |= mask;
- delta -= step;
- diff += step;
- }
- step >>= 1;
- mask >>= 1;
+ if (delta >= step) {
+ nibble |= 4;
+ delta -= step;
+ }
+ step >>= 1;
+ if (delta >= step) {
+ nibble |= 2;
+ delta -= step;
}
+ step >>= 1;
+ if (delta >= step) {
+ nibble |= 1;
+ delta -= step;
+ }
+ diff -= delta;
if (nibble & 8)
c->prev_sample -= diff;
@@ -224,7 +227,7 @@ static inline uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c,
bias = -c->idelta / 2;
nibble = (nibble + bias) / c->idelta;
- nibble = av_clip(nibble, -8, 7) & 0x0F;
+ nibble = av_clip_intp2(nibble, 3) & 0x0F;
predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
@@ -255,7 +258,7 @@ static inline uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c,
c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8);
c->predictor = av_clip_int16(c->predictor);
c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
- c->step = av_clip(c->step, 127, 24567);
+ c->step = av_clip(c->step, 127, 24576);
return nibble;
}
@@ -329,7 +332,7 @@ static void adpcm_compress_trellis(AVCodecContext *avctx,
uint8_t *h;\
dec_sample = av_clip_int16(dec_sample);\
d = sample - dec_sample;\
- ssd = nodes[j]->ssd + d*d;\
+ ssd = nodes[j]->ssd + d*(unsigned)d;\
/* Check for wraparound, skip such samples completely. \
* Note, changing ssd to a 64 bit variable would be \
* simpler, avoiding this check, but it's slower on \
@@ -364,7 +367,7 @@ static void adpcm_compress_trellis(AVCodecContext *avctx,
*h = generation;\
u = nodes_next[pos];\
if (!u) {\
- assert(pathn < FREEZE_INTERVAL << avctx->trellis);\
+ av_assert1(pathn < FREEZE_INTERVAL << avctx->trellis);\
u = t++;\
nodes_next[pos] = u;\
u->path = pathn++;\
@@ -412,7 +415,7 @@ static void adpcm_compress_trellis(AVCodecContext *avctx,
} else { //AV_CODEC_ID_ADPCM_YAMAHA
LOOP_NODES(yamaha, step,
av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8,
- 127, 24567));
+ 127, 24576));
#undef LOOP_NODES
#undef STORE_NODE
}
@@ -483,10 +486,8 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
pkt_size = (2 + avctx->channels * (22 + 4 * (frame->nb_samples - 1)) + 7) / 8;
else
pkt_size = avctx->block_align;
- if ((ret = ff_alloc_packet(avpkt, pkt_size))) {
- av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
+ if ((ret = ff_alloc_packet2(avctx, avpkt, pkt_size, 0)) < 0)
return ret;
- }
dst = avpkt->data;
switch(avctx->codec->id) {
@@ -508,7 +509,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
/* stereo: 4 bytes (8 samples) for left, 4 bytes for right */
if (avctx->trellis > 0) {
- FF_ALLOC_OR_GOTO(avctx, buf, avctx->channels * blocks * 8, error);
+ FF_ALLOC_ARRAY_OR_GOTO(avctx, buf, avctx->channels, blocks * 8, error);
for (ch = 0; ch < avctx->channels; ch++) {
adpcm_compress_trellis(avctx, &samples_p[ch][1],
buf + ch * blocks * 8, &c->status[ch],
@@ -540,7 +541,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
case AV_CODEC_ID_ADPCM_IMA_QT:
{
PutBitContext pb;
- init_put_bits(&pb, dst, pkt_size * 8);
+ init_put_bits(&pb, dst, pkt_size);
for (ch = 0; ch < avctx->channels; ch++) {
ADPCMChannelStatus *status = &c->status[ch];
@@ -570,7 +571,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
case AV_CODEC_ID_ADPCM_SWF:
{
PutBitContext pb;
- init_put_bits(&pb, dst, pkt_size * 8);
+ init_put_bits(&pb, dst, pkt_size);
n = frame->nb_samples - 1;
@@ -580,7 +581,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
// init the encoder state
for (i = 0; i < avctx->channels; i++) {
// clip step so it fits 6 bits
- c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63);
+ c->status[i].step_index = av_clip_uintp2(c->status[i].step_index, 6);
put_sbits(&pb, 16, samples[i]);
put_bits(&pb, 6, c->status[i].step_index);
c->status[i].prev_sample = samples[i];
OpenPOWER on IntegriCloud