summaryrefslogtreecommitdiffstats
path: root/libavcodec/h264pred_template.c
diff options
context:
space:
mode:
authorBaptiste Coudurier <baptiste.coudurier@gmail.com>2011-08-16 17:05:44 +0200
committerRonald S. Bultje <rsbultje@gmail.com>2011-10-21 01:00:41 -0700
commit76741b0e56bfbc74cfa32ff59e15cf420463569b (patch)
treee52409a6a6bf1b94c33e8c13fbb67f690f707c09 /libavcodec/h264pred_template.c
parentdc49bf127010fdff2c3282755407cedd429475f5 (diff)
downloadffmpeg-streaming-76741b0e56bfbc74cfa32ff59e15cf420463569b.zip
ffmpeg-streaming-76741b0e56bfbc74cfa32ff59e15cf420463569b.tar.gz
h264: 4:2:2 intra decoding support
Signed-off-by: Diego Biurrun <diego@biurrun.de> Signed-off-by: Ronald S. Bultje <rsbultje@gmail.com>
Diffstat (limited to 'libavcodec/h264pred_template.c')
-rw-r--r--libavcodec/h264pred_template.c138
1 files changed, 138 insertions, 0 deletions
diff --git a/libavcodec/h264pred_template.c b/libavcodec/h264pred_template.c
index 750e82c..d4f654e 100644
--- a/libavcodec/h264pred_template.c
+++ b/libavcodec/h264pred_template.c
@@ -454,6 +454,19 @@ static void FUNCC(pred8x8_vertical)(uint8_t *_src, int _stride){
}
}
+static void FUNCC(pred8x16_vertical)(uint8_t *_src, int _stride){
+ int i;
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel4 a= AV_RN4PA(((pixel4*)(src-stride))+0);
+ const pixel4 b= AV_RN4PA(((pixel4*)(src-stride))+1);
+
+ for(i=0; i<16; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, b);
+ }
+}
+
static void FUNCC(pred8x8_horizontal)(uint8_t *_src, int stride){
int i;
pixel *src = (pixel*)_src;
@@ -466,6 +479,17 @@ static void FUNCC(pred8x8_horizontal)(uint8_t *_src, int stride){
}
}
+static void FUNCC(pred8x16_horizontal)(uint8_t *_src, int stride){
+ int i;
+ pixel *src = (pixel*)_src;
+ stride >>= sizeof(pixel)-1;
+ for(i=0; i<16; i++){
+ const pixel4 a = PIXEL_SPLAT_X4(src[-1+i*stride]);
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, a);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, a);
+ }
+}
+
#define PRED8x8_X(n, v)\
static void FUNCC(pred8x8_##n##_dc)(uint8_t *_src, int stride){\
int i;\
@@ -482,6 +506,11 @@ PRED8x8_X(127, (1<<(BIT_DEPTH-1))-1);
PRED8x8_X(128, (1<<(BIT_DEPTH-1))+0);
PRED8x8_X(129, (1<<(BIT_DEPTH-1))+1);
+static void FUNCC(pred8x16_128_dc)(uint8_t *_src, int stride){
+ FUNCC(pred8x8_128_dc)(_src, stride);
+ FUNCC(pred8x8_128_dc)(_src+8*stride, stride);
+}
+
static void FUNCC(pred8x8_left_dc)(uint8_t *_src, int stride){
int i;
int dc0, dc2;
@@ -507,6 +536,11 @@ static void FUNCC(pred8x8_left_dc)(uint8_t *_src, int stride){
}
}
+static void FUNCC(pred8x16_left_dc)(uint8_t *_src, int stride){
+ FUNCC(pred8x8_left_dc)(_src, stride);
+ FUNCC(pred8x8_left_dc)(_src+8*stride, stride);
+}
+
static void FUNCC(pred8x8_top_dc)(uint8_t *_src, int stride){
int i;
int dc0, dc1;
@@ -532,6 +566,27 @@ static void FUNCC(pred8x8_top_dc)(uint8_t *_src, int stride){
}
}
+static void FUNCC(pred8x16_top_dc)(uint8_t *_src, int stride){
+ int i;
+ int dc0, dc1;
+ pixel4 dc0splat, dc1splat;
+ pixel *src = (pixel*)_src;
+ stride >>= sizeof(pixel)-1;
+
+ dc0=dc1=0;
+ for(i=0;i<4; i++){
+ dc0+= src[i-stride];
+ dc1+= src[4+i-stride];
+ }
+ dc0splat = PIXEL_SPLAT_X4((dc0 + 2)>>2);
+ dc1splat = PIXEL_SPLAT_X4((dc1 + 2)>>2);
+
+ for(i=0; i<16; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc0splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc1splat);
+ }
+}
+
static void FUNCC(pred8x8_dc)(uint8_t *_src, int stride){
int i;
int dc0, dc1, dc2;
@@ -560,6 +615,48 @@ static void FUNCC(pred8x8_dc)(uint8_t *_src, int stride){
}
}
+static void FUNCC(pred8x16_dc)(uint8_t *_src, int stride){
+ int i;
+ int dc0, dc1, dc2, dc3, dc4;
+ pixel4 dc0splat, dc1splat, dc2splat, dc3splat, dc4splat, dc5splat, dc6splat, dc7splat;
+ pixel *src = (pixel*)_src;
+ stride >>= sizeof(pixel)-1;
+
+ dc0=dc1=dc2=dc3=dc4=0;
+ for(i=0;i<4; i++){
+ dc0+= src[-1+i*stride] + src[i-stride];
+ dc1+= src[4+i-stride];
+ dc2+= src[-1+(i+4)*stride];
+ dc3+= src[-1+(i+8)*stride];
+ dc4+= src[-1+(i+12)*stride];
+ }
+ dc0splat = PIXEL_SPLAT_X4((dc0 + 4)>>3);
+ dc1splat = PIXEL_SPLAT_X4((dc1 + 2)>>2);
+ dc2splat = PIXEL_SPLAT_X4((dc2 + 2)>>2);
+ dc3splat = PIXEL_SPLAT_X4((dc1 + dc2 + 4)>>3);
+ dc4splat = PIXEL_SPLAT_X4((dc3 + 2)>>2);
+ dc5splat = PIXEL_SPLAT_X4((dc1 + dc3 + 4)>>3);
+ dc6splat = PIXEL_SPLAT_X4((dc4 + 2)>>2);
+ dc7splat = PIXEL_SPLAT_X4((dc1 + dc4 + 4)>>3);
+
+ for(i=0; i<4; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc0splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc1splat);
+ }
+ for(i=4; i<8; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc2splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc3splat);
+ }
+ for(i=8; i<12; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc4splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc5splat);
+ }
+ for(i=12; i<16; i++){
+ AV_WN4PA(((pixel4*)(src+i*stride))+0, dc6splat);
+ AV_WN4PA(((pixel4*)(src+i*stride))+1, dc7splat);
+ }
+}
+
//the following 4 function should not be optimized!
static void FUNC(pred8x8_mad_cow_dc_l0t)(uint8_t *src, int stride){
FUNCC(pred8x8_top_dc)(src, stride);
@@ -618,6 +715,47 @@ static void FUNCC(pred8x8_plane)(uint8_t *_src, int _stride){
}
}
+static void FUNCC(pred8x16_plane)(uint8_t *_src, int _stride){
+ int j, k;
+ int a;
+ INIT_CLIP
+ pixel *src = (pixel*)_src;
+ int stride = _stride>>(sizeof(pixel)-1);
+ const pixel * const src0 = src +3-stride;
+ const pixel * src1 = src +8*stride-1;
+ const pixel * src2 = src1-2*stride; // == src+6*stride-1;
+ int H = src0[1] - src0[-1];
+ int V = src1[0] - src2[ 0];
+
+ for (k = 2; k <= 4; ++k) {
+ src1 += stride; src2 -= stride;
+ H += k*(src0[k] - src0[-k]);
+ V += k*(src1[0] - src2[ 0]);
+ }
+ for (; k <= 8; ++k) {
+ src1 += stride; src2 -= stride;
+ V += k*(src1[0] - src2[0]);
+ }
+
+ H = (17*H+16) >> 5;
+ V = (5*V+32) >> 6;
+
+ a = 16*(src1[0] + src2[8] + 1) - 7*V - 3*H;
+ for(j=16; j>0; --j) {
+ int b = a;
+ a += V;
+ src[0] = CLIP((b ) >> 5);
+ src[1] = CLIP((b+ H) >> 5);
+ src[2] = CLIP((b+2*H) >> 5);
+ src[3] = CLIP((b+3*H) >> 5);
+ src[4] = CLIP((b+4*H) >> 5);
+ src[5] = CLIP((b+5*H) >> 5);
+ src[6] = CLIP((b+6*H) >> 5);
+ src[7] = CLIP((b+7*H) >> 5);
+ src += stride;
+ }
+}
+
#define SRC(x,y) src[(x)+(y)*stride]
#define PL(y) \
const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
OpenPOWER on IntegriCloud