summaryrefslogtreecommitdiffstats
path: root/libavcodec/h264.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/h264.c')
-rw-r--r--libavcodec/h264.c321
1 files changed, 219 insertions, 102 deletions
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index b1204c0..071a6ad 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -2,20 +2,20 @@
* H.26L/H.264/AVC/JVT/14496-10/... decoder
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -97,12 +97,9 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h){
}
return 0;
-} //FIXME cleanup like ff_h264_check_intra_pred_mode
+} //FIXME cleanup like check_intra_pred_mode
-/**
- * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
- */
-int ff_h264_check_intra_pred_mode(H264Context *h, int mode){
+static int check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
MpegEncContext * const s = &h->s;
static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
@@ -122,7 +119,7 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode){
if((h->left_samples_available&0x8080) != 0x8080){
mode= left[ mode ];
- if(h->left_samples_available&0x8080){ //mad cow disease mode, aka MBAFF + constrained_intra_pred
+ if(is_chroma && (h->left_samples_available&0x8080)){ //mad cow disease mode, aka MBAFF + constrained_intra_pred
mode= ALZHEIMER_DC_L0T_PRED8x8 + (!(h->left_samples_available&0x8000)) + 2*(mode == DC_128_PRED8x8);
}
if(mode<0){
@@ -134,6 +131,23 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode){
return mode;
}
+/**
+ * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
+ */
+int ff_h264_check_intra16x16_pred_mode(H264Context *h, int mode)
+{
+ return check_intra_pred_mode(h, mode, 0);
+}
+
+/**
+ * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
+ */
+int ff_h264_check_intra_chroma_pred_mode(H264Context *h, int mode)
+{
+ return check_intra_pred_mode(h, mode, 1);
+}
+
+
const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length){
int i, si, di;
uint8_t *dst;
@@ -174,20 +188,28 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_l
i-= RS;
}
- if(i>=length-1){ //no escaped 0
- *dst_length= length;
- *consumed= length+1; //+1 for the header
- return src;
- }
-
bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0; // use second escape buffer for inter data
- av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+FF_INPUT_BUFFER_PADDING_SIZE);
+ si=h->rbsp_buffer_size[bufidx];
+ av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+FF_INPUT_BUFFER_PADDING_SIZE+MAX_MBPAIR_SIZE);
dst= h->rbsp_buffer[bufidx];
+ if(si != h->rbsp_buffer_size[bufidx])
+ memset(dst + length, 0, FF_INPUT_BUFFER_PADDING_SIZE+MAX_MBPAIR_SIZE);
if (dst == NULL){
return NULL;
}
+ if(i>=length-1){ //no escaped 0
+ *dst_length= length;
+ *consumed= length+1; //+1 for the header
+ if(h->s.avctx->flags2 & CODEC_FLAG2_FAST){
+ return src;
+ }else{
+ memcpy(dst, src, length);
+ return dst;
+ }
+ }
+
//printf("decoding esc\n");
memcpy(dst, src, i);
si=di=i;
@@ -671,7 +693,7 @@ static inline void prefetch_motion(H264Context *h, int list, int pixel_shift, in
s->dsp.prefetch(src[1]+off, s->linesize, 4);
s->dsp.prefetch(src[2]+off, s->linesize, 4);
}else{
- off= ((mx>>1) << pixel_shift) + ((my>>1) + (s->mb_x&7))*s->uvlinesize + (64 << pixel_shift);
+ off= (((mx>>1)+64)<<pixel_shift) + ((my>>1) + (s->mb_x&7))*s->uvlinesize;
s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
}
}
@@ -942,7 +964,7 @@ static void clone_tables(H264Context *dst, H264Context *src, int i){
dst->list_counts = src->list_counts;
dst->s.obmc_scratchpad = NULL;
- ff_h264_pred_init(&dst->hpc, src->s.codec_id, src->sps.bit_depth_luma);
+ ff_h264_pred_init(&dst->hpc, src->s.codec_id, src->sps.bit_depth_luma, src->sps.chroma_format_idc);
}
/**
@@ -970,30 +992,39 @@ static av_cold void common_init(H264Context *h){
s->height = s->avctx->height;
s->codec_id= s->avctx->codec->id;
- ff_h264dsp_init(&h->h264dsp, 8);
- ff_h264_pred_init(&h->hpc, s->codec_id, 8);
+ s->avctx->bits_per_raw_sample = 8;
+ h->cur_chroma_format_idc = 1;
+
+ ff_h264dsp_init(&h->h264dsp,
+ s->avctx->bits_per_raw_sample, h->cur_chroma_format_idc);
+ ff_h264_pred_init(&h->hpc, s->codec_id,
+ s->avctx->bits_per_raw_sample, h->cur_chroma_format_idc);
h->dequant_coeff_pps= -1;
s->unrestricted_mv=1;
s->decode=1; //FIXME
+ s->dsp.dct_bits = 16;
dsputil_init(&s->dsp, s->avctx); // needed so that idct permutation is known early
memset(h->pps.scaling_matrix4, 16, 6*16*sizeof(uint8_t));
memset(h->pps.scaling_matrix8, 16, 2*64*sizeof(uint8_t));
}
-int ff_h264_decode_extradata(H264Context *h)
+int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size)
{
AVCodecContext *avctx = h->s.avctx;
- if(avctx->extradata[0] == 1){
+ if(!buf || size <= 0)
+ return -1;
+
+ if(buf[0] == 1){
int i, cnt, nalsize;
- unsigned char *p = avctx->extradata;
+ const unsigned char *p = buf;
h->is_avc = 1;
- if(avctx->extradata_size < 7) {
+ if(size < 7) {
av_log(avctx, AV_LOG_ERROR, "avcC too short\n");
return -1;
}
@@ -1005,7 +1036,7 @@ int ff_h264_decode_extradata(H264Context *h)
p += 6;
for (i = 0; i < cnt; i++) {
nalsize = AV_RB16(p) + 2;
- if (p - avctx->extradata + nalsize > avctx->extradata_size)
+ if(nalsize > size - (p-buf))
return -1;
if(decode_nal_units(h, p, nalsize) < 0) {
av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed\n", i);
@@ -1017,7 +1048,7 @@ int ff_h264_decode_extradata(H264Context *h)
cnt = *(p++); // Number of pps
for (i = 0; i < cnt; i++) {
nalsize = AV_RB16(p) + 2;
- if (p - avctx->extradata + nalsize > avctx->extradata_size)
+ if(nalsize > size - (p-buf))
return -1;
if (decode_nal_units(h, p, nalsize) < 0) {
av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed\n", i);
@@ -1026,10 +1057,10 @@ int ff_h264_decode_extradata(H264Context *h)
p += nalsize;
}
// Now store right nal length size, that will be use to parse all other nals
- h->nal_length_size = (avctx->extradata[4] & 0x03) + 1;
+ h->nal_length_size = (buf[4] & 0x03) + 1;
} else {
h->is_avc = 0;
- if(decode_nal_units(h, avctx->extradata, avctx->extradata_size) < 0)
+ if(decode_nal_units(h, buf, size) < 0)
return -1;
}
return 0;
@@ -1073,7 +1104,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx){
}
if(avctx->extradata_size > 0 && avctx->extradata &&
- ff_h264_decode_extradata(h))
+ ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size))
return -1;
if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames < h->sps.num_reorder_frames){
@@ -1197,6 +1228,7 @@ static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContex
copy_picture_range(h->delayed_pic, h1->delayed_pic, MAX_DELAYED_PIC_COUNT+2, s, s1);
h->last_slice_type = h1->last_slice_type;
+ h->sync = h1->sync;
if(!s->current_picture_ptr) return 0;
@@ -1383,7 +1415,7 @@ static void decode_postinit(H264Context *h, int setup_finished){
pics = 0;
while(h->delayed_pic[pics]) pics++;
- assert(pics <= MAX_DELAYED_PIC_COUNT);
+ av_assert0(pics <= MAX_DELAYED_PIC_COUNT);
h->delayed_pic[pics++] = cur;
if (cur->f.reference == 0)
@@ -1428,15 +1460,21 @@ static void decode_postinit(H264Context *h, int setup_finished){
av_log(s->avctx, AV_LOG_DEBUG, "no picture\n");
}
+ if (h->next_output_pic && h->next_output_pic->sync) {
+ h->sync |= 2;
+ }
+
if (setup_finished)
ff_thread_finish_setup(s->avctx);
}
-static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int chroma444, int simple){
+static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple){
MpegEncContext * const s = &h->s;
uint8_t *top_border;
int top_idx = 1;
const int pixel_shift = h->pixel_shift;
+ int chroma444 = CHROMA444;
+ int chroma422 = CHROMA422;
src_y -= linesize;
src_cb -= uvlinesize;
@@ -1460,6 +1498,14 @@ static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, ui
AV_COPY128(top_border+16, src_cb + 15*uvlinesize);
AV_COPY128(top_border+32, src_cr + 15*uvlinesize);
}
+ } else if(chroma422){
+ if (pixel_shift) {
+ AV_COPY128(top_border+32, src_cb + 15*uvlinesize);
+ AV_COPY128(top_border+48, src_cr + 15*uvlinesize);
+ } else {
+ AV_COPY64(top_border+16, src_cb + 15*uvlinesize);
+ AV_COPY64(top_border+24, src_cr + 15*uvlinesize);
+ }
} else {
if (pixel_shift) {
AV_COPY128(top_border+32, src_cb+7*uvlinesize);
@@ -1495,6 +1541,14 @@ static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, ui
AV_COPY128(top_border+16, src_cb + 16*linesize);
AV_COPY128(top_border+32, src_cr + 16*linesize);
}
+ } else if(chroma422) {
+ if (pixel_shift) {
+ AV_COPY128(top_border+32, src_cb+16*uvlinesize);
+ AV_COPY128(top_border+48, src_cr+16*uvlinesize);
+ } else {
+ AV_COPY64(top_border+16, src_cb+16*uvlinesize);
+ AV_COPY64(top_border+24, src_cr+16*uvlinesize);
+ }
} else {
if (pixel_shift) {
AV_COPY128(top_border+32, src_cb+8*uvlinesize);
@@ -1773,10 +1827,11 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
/* is_h264 should always be true if SVQ3 is disabled. */
const int is_h264 = !CONFIG_SVQ3_DECODER || simple || s->codec_id == CODEC_ID_H264;
void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
+ const int block_h = 16>>s->chroma_y_shift;
dest_y = s->current_picture.f.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16;
- dest_cb = s->current_picture.f.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
- dest_cr = s->current_picture.f.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * 8;
+ dest_cb = s->current_picture.f.data[1] + (mb_x << pixel_shift)*8 + mb_y * s->uvlinesize * block_h;
+ dest_cr = s->current_picture.f.data[2] + (mb_x << pixel_shift)*8 + mb_y * s->uvlinesize * block_h;
s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + (64 << pixel_shift), s->linesize, 4);
s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + (64 << pixel_shift), dest_cr - dest_cb, 2);
@@ -1789,8 +1844,8 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
block_offset = &h->block_offset[48];
if(mb_y&1){ //FIXME move out of this function?
dest_y -= s->linesize*15;
- dest_cb-= s->uvlinesize*7;
- dest_cr-= s->uvlinesize*7;
+ dest_cb-= s->uvlinesize*(block_h-1);
+ dest_cr-= s->uvlinesize*(block_h-1);
}
if(FRAME_MBAFF) {
int list;
@@ -1816,8 +1871,8 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
}
if (!simple && IS_INTRA_PCM(mb_type)) {
+ const int bit_depth = h->sps.bit_depth_luma;
if (pixel_shift) {
- const int bit_depth = h->sps.bit_depth_luma;
int j;
GetBitContext gb;
init_get_bits(&gb, (uint8_t*)h->mb, 384*bit_depth);
@@ -1831,23 +1886,18 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
if (!h->sps.chroma_format_idc) {
for (i = 0; i < 8; i++) {
uint16_t *tmp_cb = (uint16_t*)(dest_cb + i*uvlinesize);
- for (j = 0; j < 8; j++) {
- tmp_cb[j] = 1 << (bit_depth - 1);
- }
- }
- for (i = 0; i < 8; i++) {
uint16_t *tmp_cr = (uint16_t*)(dest_cr + i*uvlinesize);
for (j = 0; j < 8; j++) {
- tmp_cr[j] = 1 << (bit_depth - 1);
+ tmp_cb[j] = tmp_cr[j] = 1 << (bit_depth - 1);
}
}
} else {
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < block_h; i++) {
uint16_t *tmp_cb = (uint16_t*)(dest_cb + i*uvlinesize);
for (j = 0; j < 8; j++)
tmp_cb[j] = get_bits(&gb, bit_depth);
}
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < block_h; i++) {
uint16_t *tmp_cr = (uint16_t*)(dest_cr + i*uvlinesize);
for (j = 0; j < 8; j++)
tmp_cr[j] = get_bits(&gb, bit_depth);
@@ -1860,14 +1910,14 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
}
if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
if (!h->sps.chroma_format_idc) {
- for (i = 0; i < 8; i++) {
- memset(dest_cb + i*uvlinesize, 128, 8);
- memset(dest_cr + i*uvlinesize, 128, 8);
+ for (i=0; i<8; i++) {
+ memset(dest_cb+ i*uvlinesize, 1 << (bit_depth - 1), 8);
+ memset(dest_cr+ i*uvlinesize, 1 << (bit_depth - 1), 8);
}
} else {
- for (i = 0; i < 8; i++) {
- memcpy(dest_cb + i*uvlinesize, h->mb + 128 + i*4, 8);
- memcpy(dest_cr + i*uvlinesize, h->mb + 160 + i*4, 8);
+ for (i=0; i<block_h; i++) {
+ memcpy(dest_cb+ i*uvlinesize, h->mb + 128 + i*4, 8);
+ memcpy(dest_cr+ i*uvlinesize, h->mb + 160 + i*4, 8);
}
}
}
@@ -1913,14 +1963,24 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
}
}else{
if(is_h264){
+ int qp[2];
+ if (CHROMA422) {
+ qp[0] = h->chroma_qp[0]+3;
+ qp[1] = h->chroma_qp[1]+3;
+ } else {
+ qp[0] = h->chroma_qp[0];
+ qp[1] = h->chroma_qp[1];
+ }
if(h->non_zero_count_cache[ scan8[CHROMA_DC_BLOCK_INDEX+0] ])
- h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16*16*1 << pixel_shift), h->dequant4_coeff[IS_INTRA(mb_type) ? 1:4][h->chroma_qp[0]][0]);
+ h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16*16*1 << pixel_shift), h->dequant4_coeff[IS_INTRA(mb_type) ? 1:4][qp[0]][0]);
if(h->non_zero_count_cache[ scan8[CHROMA_DC_BLOCK_INDEX+1] ])
- h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16*16*2 << pixel_shift), h->dequant4_coeff[IS_INTRA(mb_type) ? 2:5][h->chroma_qp[1]][0]);
+ h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16*16*2 << pixel_shift), h->dequant4_coeff[IS_INTRA(mb_type) ? 2:5][qp[1]][0]);
h->h264dsp.h264_idct_add8(dest, block_offset,
h->mb, uvlinesize,
h->non_zero_count_cache);
- }else{
+ }
+#if CONFIG_SVQ3_DECODER
+ else{
h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16*16*1, h->dequant4_coeff[IS_INTRA(mb_type) ? 1:4][h->chroma_qp[0]][0]);
h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16*16*2, h->dequant4_coeff[IS_INTRA(mb_type) ? 2:5][h->chroma_qp[1]][0]);
for(j=1; j<3; j++){
@@ -1932,6 +1992,7 @@ static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple, i
}
}
}
+#endif
}
}
}
@@ -2224,7 +2285,7 @@ static void idr(H264Context *h){
static void flush_dpb(AVCodecContext *avctx){
H264Context *h= avctx->priv_data;
int i;
- for(i=0; i<MAX_DELAYED_PIC_COUNT; i++) {
+ for(i=0; i<=MAX_DELAYED_PIC_COUNT; i++) {
if(h->delayed_pic[i])
h->delayed_pic[i]->f.reference = 0;
h->delayed_pic[i]= NULL;
@@ -2237,6 +2298,8 @@ static void flush_dpb(AVCodecContext *avctx){
h->s.first_field= 0;
ff_h264_reset_sei(h);
ff_mpeg_flush(avctx);
+ h->recovery_frame= -1;
+ h->sync= 0;
}
static int init_poc(H264Context *h){
@@ -2555,22 +2618,27 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->b_stride= s->mb_width*4;
+ s->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
+
s->width = 16*s->mb_width - (2>>CHROMA444)*FFMIN(h->sps.crop_right, (8<<CHROMA444)-1);
if(h->sps.frame_mbs_only_flag)
- s->height= 16*s->mb_height - (2>>CHROMA444)*FFMIN(h->sps.crop_bottom, (8<<CHROMA444)-1);
+ s->height= 16*s->mb_height - (1<<s->chroma_y_shift)*FFMIN(h->sps.crop_bottom, (16>>s->chroma_y_shift)-1);
else
- s->height= 16*s->mb_height - (4>>CHROMA444)*FFMIN(h->sps.crop_bottom, (8<<CHROMA444)-1);
+ s->height= 16*s->mb_height - (2<<s->chroma_y_shift)*FFMIN(h->sps.crop_bottom, (16>>s->chroma_y_shift)-1);
if (s->context_initialized
&& ( s->width != s->avctx->width || s->height != s->avctx->height
+ || s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
+ || h->cur_chroma_format_idc != h->sps.chroma_format_idc
|| av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio))) {
if(h != h0) {
- av_log_missing_feature(s->avctx, "Width/height changing with threads is", 0);
+ av_log_missing_feature(s->avctx, "Width/height/bit depth/chroma idc changing with threads is", 0);
return -1; // width / height changed during parallelized decoding
}
free_tables(h, 0);
flush_dpb(s->avctx);
MPV_common_end(s);
+ h->list_count = 0;
}
if (!s->context_initialized) {
if (h != h0) {
@@ -2582,8 +2650,27 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
s->avctx->sample_aspect_ratio= h->sps.sar;
av_assert0(s->avctx->sample_aspect_ratio.den);
+ if (s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
+ h->cur_chroma_format_idc != h->sps.chroma_format_idc) {
+ if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10 &&
+ (h->sps.bit_depth_luma != 9 || !CHROMA422)) {
+ s->avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
+ h->cur_chroma_format_idc = h->sps.chroma_format_idc;
+ h->pixel_shift = h->sps.bit_depth_luma > 8;
+
+ ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma, h->sps.chroma_format_idc);
+ ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma, h->sps.chroma_format_idc);
+ s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
+ dsputil_init(&s->dsp, s->avctx);
+ } else {
+ av_log(s->avctx, AV_LOG_DEBUG, "Unsupported bit depth: %d chroma_idc: %d\n",
+ h->sps.bit_depth_luma, h->sps.chroma_format_idc);
+ return -1;
+ }
+ }
+
if(h->sps.video_signal_type_present_flag){
- s->avctx->color_range = h->sps.full_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
+ s->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
if(h->sps.colour_description_present_flag){
s->avctx->color_primaries = h->sps.color_primaries;
s->avctx->color_trc = h->sps.color_trc;
@@ -2601,14 +2688,28 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
switch (h->sps.bit_depth_luma) {
case 9 :
- s->avctx->pix_fmt = CHROMA444 ? PIX_FMT_YUV444P9 : PIX_FMT_YUV420P9;
+ if (CHROMA444)
+ s->avctx->pix_fmt = PIX_FMT_YUV444P9;
+ else
+ s->avctx->pix_fmt = PIX_FMT_YUV420P9;
break;
case 10 :
- s->avctx->pix_fmt = CHROMA444 ? PIX_FMT_YUV444P10 : PIX_FMT_YUV420P10;
+ if (CHROMA444)
+ s->avctx->pix_fmt = PIX_FMT_YUV444P10;
+ else if (CHROMA422)
+ s->avctx->pix_fmt = PIX_FMT_YUV422P10;
+ else
+ s->avctx->pix_fmt = PIX_FMT_YUV420P10;
break;
default:
if (CHROMA444){
s->avctx->pix_fmt = s->avctx->color_range == AVCOL_RANGE_JPEG ? PIX_FMT_YUVJ444P : PIX_FMT_YUV444P;
+ if (s->avctx->colorspace == AVCOL_SPC_RGB) {
+ s->avctx->pix_fmt = PIX_FMT_GBR24P;
+ av_log(h->s.avctx, AV_LOG_DEBUG, "Detected GBR colorspace.\n");
+ }
+ }else if (CHROMA422) {
+ s->avctx->pix_fmt = s->avctx->color_range == AVCOL_RANGE_JPEG ? PIX_FMT_YUVJ422P : PIX_FMT_YUV422P;
}else{
s->avctx->pix_fmt = s->avctx->get_format(s->avctx,
s->avctx->codec->pix_fmts ?
@@ -2830,6 +2931,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->ref_count[1]= h->pps.ref_count[1];
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
+ unsigned max= (16<<(s->picture_structure != PICT_FRAME))-1;
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
h->direct_spatial_mv_pred= get_bits1(&s->gb);
}
@@ -2840,18 +2942,18 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
if(h->slice_type_nos==AV_PICTURE_TYPE_B)
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
- if(h->ref_count[0]-1 > 32-1 || h->ref_count[1]-1 > 32-1){
- av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
- h->ref_count[0]= h->ref_count[1]= 1;
- return -1;
- }
+ }
+ if(h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
+ av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
+ h->ref_count[0]= h->ref_count[1]= 1;
+ return -1;
}
if(h->slice_type_nos == AV_PICTURE_TYPE_B)
h->list_count= 2;
else
h->list_count= 1;
}else
- h->list_count= 0;
+ h->ref_count[1]= h->ref_count[0]= h->list_count= 0;
if(!default_ref_list_done){
ff_h264_fill_default_ref_list(h);
@@ -2985,8 +3087,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h0->last_slice_type = slice_type;
h->slice_num = ++h0->current_slice;
- if(h->slice_num >= MAX_SLICES){
- av_log(s->avctx, AV_LOG_ERROR, "Too many slices, increase MAX_SLICES and recompile\n");
+
+ if(h->slice_num)
+ h0->slice_row[(h->slice_num-1)&(MAX_SLICES-1)]= s->resync_mb_y;
+ if ( h0->slice_row[h->slice_num&(MAX_SLICES-1)] + 3 >= s->resync_mb_y
+ && h0->slice_row[h->slice_num&(MAX_SLICES-1)] <= s->resync_mb_y
+ && h->slice_num >= MAX_SLICES) {
+ //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
+ av_log(s->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", h->slice_num, MAX_SLICES);
}
for(j=0; j<2; j++){
@@ -3272,6 +3380,7 @@ static void loop_filter(H264Context *h, int start_x, int end_x){
const int end_mb_y= s->mb_y + FRAME_MBAFF;
const int old_slice_type= h->slice_type;
const int pixel_shift = h->pixel_shift;
+ const int block_h = 16>>s->chroma_y_shift;
if(h->deblocking_filter) {
for(mb_x= start_x; mb_x<end_x; mb_x++){
@@ -3288,8 +3397,8 @@ static void loop_filter(H264Context *h, int start_x, int end_x){
s->mb_x= mb_x;
s->mb_y= mb_y;
dest_y = s->current_picture.f.data[0] + ((mb_x << pixel_shift) + mb_y * s->linesize ) * 16;
- dest_cb = s->current_picture.f.data[1] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444);
- dest_cr = s->current_picture.f.data[2] + ((mb_x << pixel_shift) + mb_y * s->uvlinesize) * (8 << CHROMA444);
+ dest_cb = s->current_picture.f.data[1] + (mb_x << pixel_shift)*(8<<CHROMA444) + mb_y * s->uvlinesize * block_h;
+ dest_cr = s->current_picture.f.data[2] + (mb_x << pixel_shift)*(8<<CHROMA444) + mb_y * s->uvlinesize * block_h;
//FIXME simplify above
if (MB_FIELD) {
@@ -3297,14 +3406,14 @@ static void loop_filter(H264Context *h, int start_x, int end_x){
uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2;
if(mb_y&1){ //FIXME move out of this function?
dest_y -= s->linesize*15;
- dest_cb-= s->uvlinesize*((8 << CHROMA444)-1);
- dest_cr-= s->uvlinesize*((8 << CHROMA444)-1);
+ dest_cb-= s->uvlinesize*(block_h-1);
+ dest_cr-= s->uvlinesize*(block_h-1);
}
} else {
linesize = h->mb_linesize = s->linesize;
uvlinesize = h->mb_uvlinesize = s->uvlinesize;
}
- backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, CHROMA444, 0);
+ backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0);
if(fill_filter_caches(h, mb_type))
continue;
h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mb_xy]);
@@ -3475,7 +3584,8 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){
if(s->mb_y >= s->mb_height){
tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
- if(get_bits_count(&s->gb) == s->gb.size_in_bits ) {
+ if( get_bits_count(&s->gb) == s->gb.size_in_bits
+ || get_bits_count(&s->gb) < s->gb.size_in_bits && s->avctx->error_recognition < FF_ER_AGGRESSIVE) {
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
return 0;
@@ -3525,6 +3635,7 @@ static int execute_decode_slices(H264Context *h, int context_count){
hx = h->thread_context[i];
hx->s.error_recognition = avctx->error_recognition;
hx->s.error_count = 0;
+ hx->x264_build= h->x264_build;
}
avctx->execute(avctx, (void *)decode_slice,
@@ -3612,13 +3723,13 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
s->workaround_bugs |= FF_BUG_TRUNCATED;
if(!(s->workaround_bugs & FF_BUG_TRUNCATED)){
- while(ptr[dst_length - 1] == 0 && dst_length > 0)
+ while(dst_length > 0 && ptr[dst_length - 1] == 0)
dst_length--;
}
bit_length= !dst_length ? 0 : (8*dst_length - ff_h264_decode_rbsp_trailing(h, ptr + dst_length - 1));
if(s->avctx->debug&FF_DEBUG_STARTCODE){
- av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d at %d/%d length %d\n", hx->nal_unit_type, buf_index, buf_size, dst_length);
+ av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d/%d at %d/%d length %d\n", hx->nal_unit_type, hx->nal_ref_idc, buf_index, buf_size, dst_length);
}
if (h->is_avc && (nalsize != consumed) && nalsize){
@@ -3668,9 +3779,22 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
if((err = decode_slice_header(hx, h)))
break;
+ if (h->sei_recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
+ h->recovery_frame = (h->frame_num + h->sei_recovery_frame_cnt) %
+ (1 << h->sps.log2_max_frame_num);
+ }
+
s->current_picture_ptr->f.key_frame |=
- (hx->nal_unit_type == NAL_IDR_SLICE) ||
- (h->sei_recovery_frame_cnt >= 0);
+ (hx->nal_unit_type == NAL_IDR_SLICE);
+
+ if (h->recovery_frame == h->frame_num) {
+ h->sync |= 1;
+ h->recovery_frame = -1;
+ }
+
+ h->sync |= !!s->current_picture_ptr->f.key_frame;
+ h->sync |= 3*!!(s->flags2 & CODEC_FLAG2_SHOW_ALL);
+ s->current_picture_ptr->sync = h->sync;
if (h->current_slice == 1) {
if(!(s->flags2 & CODEC_FLAG2_CHUNKS)) {
@@ -3733,7 +3857,11 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
break;
case NAL_SPS:
init_get_bits(&s->gb, ptr, bit_length);
- ff_h264_decode_seq_parameter_set(h);
+ if(ff_h264_decode_seq_parameter_set(h) < 0 && h->is_avc && (nalsize != consumed) && nalsize){
+ av_log(h->s.avctx, AV_LOG_DEBUG, "SPS decoding failure, trying alternative mode\n");
+ init_get_bits(&s->gb, &buf[buf_index + 1 - consumed], 8*nalsize);
+ ff_h264_decode_seq_parameter_set(h);
+ }
if (s->flags& CODEC_FLAG_LOW_DELAY ||
(h->sps.bitstream_restriction_flag && !h->sps.num_reorder_frames))
@@ -3741,21 +3869,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
if(avctx->has_b_frames < 2)
avctx->has_b_frames= !s->low_delay;
-
- if (avctx->bits_per_raw_sample != h->sps.bit_depth_luma) {
- if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
- avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
- h->pixel_shift = h->sps.bit_depth_luma > 8;
-
- ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma);
- ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma);
- s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
- dsputil_init(&s->dsp, s->avctx);
- } else {
- av_log(avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n", h->sps.bit_depth_luma);
- return -1;
- }
- }
break;
case NAL_PPS:
init_get_bits(&s->gb, ptr, bit_length);
@@ -3849,6 +3962,8 @@ static int decode_frame(AVCodecContext *avctx,
return 0;
}
+ if(h->is_avc && buf_size >= 9 && AV_RB32(buf)==0x0164001F && buf[5] && buf[8]==0x67)
+ return ff_h264_decode_extradata(h, buf, buf_size);
buf_index=decode_nal_units(h, buf, buf_size);
if(buf_index < 0)
@@ -3872,13 +3987,12 @@ static int decode_frame(AVCodecContext *avctx,
field_end(h, 0);
- if (!h->next_output_pic) {
- /* Wait for second field. */
- *data_size = 0;
-
- } else {
- *data_size = sizeof(AVFrame);
- *pict = *(AVFrame*)h->next_output_pic;
+ *data_size = 0; /* Wait for second field. */
+ if (h->next_output_pic && h->next_output_pic->sync) {
+ if(h->sync>1 || h->next_output_pic->f.pict_type != AV_PICTURE_TYPE_B){
+ *data_size = sizeof(AVFrame);
+ *pict = *(AVFrame*)h->next_output_pic;
+ }
}
}
@@ -3913,6 +4027,7 @@ static inline void fill_mb_avail(H264Context *h){
#undef random
#define COUNT 8000
#define SIZE (COUNT*40)
+extern AVCodec ff_h264_decoder;
int main(void){
int i;
uint8_t temp[SIZE];
@@ -3922,6 +4037,8 @@ int main(void){
DSPContext dsp;
AVCodecContext avctx;
+ avcodec_get_context_defaults3(&avctx, &ff_h264_decoder);
+
dsputil_init(&dsp, &avctx);
init_put_bits(&pb, temp, SIZE);
OpenPOWER on IntegriCloud