summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--postproc/rgb2rgb.c285
-rw-r--r--postproc/rgb2rgb.h31
-rw-r--r--postproc/rgb2rgb_template.c512
3 files changed, 827 insertions, 1 deletions
diff --git a/postproc/rgb2rgb.c b/postproc/rgb2rgb.c
index 3878e48..995f888 100644
--- a/postproc/rgb2rgb.c
+++ b/postproc/rgb2rgb.c
@@ -244,6 +244,20 @@ void rgb15to16(const uint8_t *src,uint8_t *dst,unsigned src_size)
rgb15to16_C(src, dst, src_size);
}
+void rgb16to15(const uint8_t *src,uint8_t *dst,unsigned src_size)
+{
+#ifdef CAN_COMPILE_X86_ASM
+ // ordered per speed fasterst first
+ if(gCpuCaps.hasMMX2)
+ rgb16to15_MMX2(src, dst, src_size);
+ else if(gCpuCaps.has3DNow)
+ rgb16to15_3DNow(src, dst, src_size);
+ else if(gCpuCaps.hasMMX)
+ rgb16to15_MMX(src, dst, src_size);
+ else
+#endif
+ rgb16to15_C(src, dst, src_size);
+}
/**
* Pallete is assumed to contain bgr32
*/
@@ -387,6 +401,61 @@ void rgb32tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
rgb32tobgr32_C(src, dst, src_size);
}
+void rgb32tobgr24(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ unsigned i;
+ unsigned num_pixels = src_size >> 2;
+ for(i=0; i<num_pixels; i++)
+ {
+ dst[3*i + 0] = src[4*i + 2];
+ dst[3*i + 1] = src[4*i + 1];
+ dst[3*i + 2] = src[4*i + 0];
+ }
+}
+
+void rgb32tobgr16(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+#ifdef CAN_COMPILE_X86_ASM
+ // ordered per speed fasterst first
+ if(gCpuCaps.hasMMX2)
+ rgb32tobgr16_MMX2(src, dst, src_size);
+ else if(gCpuCaps.has3DNow)
+ rgb32tobgr16_3DNow(src, dst, src_size);
+ else if(gCpuCaps.hasMMX)
+ rgb32tobgr16_MMX(src, dst, src_size);
+ else
+#endif
+ rgb32tobgr16_C(src, dst, src_size);
+}
+
+void rgb32tobgr15(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+#ifdef CAN_COMPILE_X86_ASM
+ // ordered per speed fasterst first
+ if(gCpuCaps.hasMMX2)
+ rgb32tobgr15_MMX2(src, dst, src_size);
+ else if(gCpuCaps.has3DNow)
+ rgb32tobgr15_3DNow(src, dst, src_size);
+ else if(gCpuCaps.hasMMX)
+ rgb32tobgr15_MMX(src, dst, src_size);
+ else
+#endif
+ rgb32tobgr15_C(src, dst, src_size);
+}
+
+void rgb24tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ unsigned i;
+ unsigned num_pixels = src_size >> 2;
+ for(i=0; i<num_pixels; i++)
+ {
+ dst[4*i + 0] = src[3*i + 2];
+ dst[4*i + 1] = src[3*i + 1];
+ dst[4*i + 2] = src[3*i + 0];
+ dst[4*i + 3] = 0;
+ }
+}
+
void rgb24tobgr24(const uint8_t *src, uint8_t *dst, unsigned int src_size)
{
#ifdef CAN_COMPILE_X86_ASM
@@ -402,6 +471,186 @@ void rgb24tobgr24(const uint8_t *src, uint8_t *dst, unsigned int src_size)
rgb24tobgr24_C(src, dst, src_size);
}
+void rgb24tobgr16(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+#ifdef CAN_COMPILE_X86_ASM
+ // ordered per speed fasterst first
+ if(gCpuCaps.hasMMX2)
+ rgb24tobgr16_MMX2(src, dst, src_size);
+ else if(gCpuCaps.has3DNow)
+ rgb24tobgr16_3DNow(src, dst, src_size);
+ else if(gCpuCaps.hasMMX)
+ rgb24tobgr16_MMX(src, dst, src_size);
+ else
+#endif
+ rgb24tobgr16_C(src, dst, src_size);
+}
+
+void rgb24tobgr15(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+#ifdef CAN_COMPILE_X86_ASM
+ // ordered per speed fasterst first
+ if(gCpuCaps.hasMMX2)
+ rgb24tobgr15_MMX2(src, dst, src_size);
+ else if(gCpuCaps.has3DNow)
+ rgb24tobgr15_3DNow(src, dst, src_size);
+ else if(gCpuCaps.hasMMX)
+ rgb24tobgr15_MMX(src, dst, src_size);
+ else
+#endif
+ rgb24tobgr15_C(src, dst, src_size);
+}
+
+void rgb16tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ const uint16_t *end;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (uint16_t *)src;
+ end = s + src_size/2;
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = 0;
+ }
+}
+
+void rgb16tobgr24(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ const uint16_t *end;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0x1F)<<3;
+ }
+}
+
+void rgb16tobgr16(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ unsigned i;
+ unsigned num_pixels = src_size >> 1;
+
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = src[2*i];
+ r = rgb&0x1F;
+ g = (rgb&0x7E0)>>5;
+ b = (rgb&0xF800)>>11;
+ dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
+ }
+}
+
+void rgb16tobgr15(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ unsigned i;
+ unsigned num_pixels = src_size >> 1;
+
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = src[2*i];
+ r = rgb&0x1F;
+ g = (rgb&0x7E0)>>5;
+ b = (rgb&0xF800)>>11;
+ dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10);
+ }
+}
+
+void rgb15tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ const uint16_t *end;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = 0;
+ }
+}
+
+void rgb15tobgr24(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ const uint16_t *end;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (uint16_t *)src;
+ end = s + src_size/2;
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x1F)<<3;
+ }
+}
+
+void rgb15tobgr16(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ unsigned i;
+ unsigned num_pixels = src_size >> 1;
+
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = src[2*i];
+ r = rgb&0x1F;
+ g = (rgb&0x3E0)>>5;
+ b = (rgb&0x7C00)>>10;
+ dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
+ }
+}
+
+void rgb15tobgr15(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ unsigned i;
+ unsigned num_pixels = src_size >> 1;
+
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = src[2*i];
+ r = rgb&0x1F;
+ g = (rgb&0x3E0)>>5;
+ b = (rgb&0x7C00)>>10;
+ dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10);
+ }
+}
+
+void rgb8tobgr8(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ unsigned i;
+ unsigned num_pixels = src_size;
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint8_t rgb;
+ rgb = src[i];
+ r = (rgb&0x07);
+ g = (rgb&0x38)>>3;
+ b = (rgb&0xC0)>>6;
+ dst[i] = ((b<<1)&0x07) | ((g&0x07)<<3) | ((r&0x03)<<6);
+ }
+}
+
/**
*
* height should be a multiple of 2 and width should be a multiple of 16 (if this is a
@@ -564,3 +813,39 @@ void interleaveBytes(uint8_t *src1, uint8_t *src2, uint8_t *dst,
#endif
interleaveBytes_C(src1, src2, dst, width, height, src1Stride, src2Stride, dstStride);
}
+
+void vu9_to_vu12(const uint8_t *src1, const uint8_t *src2,
+ uint8_t *dst1, uint8_t *dst2,
+ unsigned width, unsigned height,
+ unsigned srcStride1, unsigned srcStride2,
+ unsigned dstStride1, unsigned dstStride2)
+{
+#ifdef CAN_COMPILE_X86_ASM
+ if(gCpuCaps.hasMMX2)
+ vu9_to_vu12_MMX2(src1, src2, dst1, dst2, width, height, srcStride1, srcStride2, dstStride1, dstStride2);
+ else if(gCpuCaps.has3DNow)
+ vu9_to_vu12_3DNow(src1, src2, dst1, dst2, width, height, srcStride1, srcStride2, dstStride1, dstStride2);
+ else if(gCpuCaps.hasMMX)
+ vu9_to_vu12_MMX(src1, src2, dst1, dst2, width, height, srcStride1, srcStride2, dstStride1, dstStride2);
+ else
+#endif
+ vu9_to_vu12_C(src1, src2, dst1, dst2, width, height, srcStride1, srcStride2, dstStride1, dstStride2);
+}
+
+void yvu9_to_yuy2(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
+ uint8_t *dst,
+ unsigned width, unsigned height,
+ unsigned srcStride1, unsigned srcStride2,
+ unsigned srcStride3, unsigned dstStride)
+{
+#ifdef CAN_COMPILE_X86_ASM
+ if(gCpuCaps.hasMMX2)
+ yvu9_to_yuy2_MMX2(src1, src2, src3, dst, width, height, srcStride1, srcStride2, srcStride3, dstStride);
+ else if(gCpuCaps.has3DNow)
+ yvu9_to_yuy2_3DNow(src1, src2, src3, dst, width, height, srcStride1, srcStride2, srcStride3, dstStride);
+ else if(gCpuCaps.hasMMX)
+ yvu9_to_yuy2_MMX(src1, src2, src3, dst, width, height, srcStride1, srcStride2, srcStride3, dstStride);
+ else
+#endif
+ yvu9_to_yuy2_C(src1, src2, src3, dst, width, height, srcStride1, srcStride2, srcStride3, dstStride);
+}
diff --git a/postproc/rgb2rgb.h b/postproc/rgb2rgb.h
index a0ce006..1e1064d 100644
--- a/postproc/rgb2rgb.h
+++ b/postproc/rgb2rgb.h
@@ -9,6 +9,7 @@
#ifndef RGB2RGB_INCLUDED
#define RGB2RGB_INCLUDED
+/* A full collection of rgb to rgb(bgr) convertors */
extern void rgb24to32(const uint8_t *src,uint8_t *dst,unsigned src_size);
extern void rgb24to16(const uint8_t *src,uint8_t *dst,unsigned src_size);
extern void rgb24to15(const uint8_t *src,uint8_t *dst,unsigned src_size);
@@ -18,10 +19,26 @@ extern void rgb32to15(const uint8_t *src,uint8_t *dst,unsigned src_size);
extern void rgb15to16(const uint8_t *src,uint8_t *dst,unsigned src_size);
extern void rgb15to24(const uint8_t *src,uint8_t *dst,unsigned src_size);
extern void rgb15to32(const uint8_t *src,uint8_t *dst,unsigned src_size);
+extern void rgb16to15(const uint8_t *src,uint8_t *dst,unsigned src_size);
extern void rgb16to24(const uint8_t *src,uint8_t *dst,unsigned src_size);
extern void rgb16to32(const uint8_t *src,uint8_t *dst,unsigned src_size);
-extern void rgb32tobgr32(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb24tobgr32(const uint8_t *src, uint8_t *dst, unsigned src_size);
extern void rgb24tobgr24(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb24tobgr16(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb24tobgr15(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb32tobgr32(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb32tobgr24(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb32tobgr16(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb32tobgr15(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb16tobgr32(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb16tobgr24(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb16tobgr16(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb16tobgr15(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb15tobgr32(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb15tobgr24(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb15tobgr16(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb15tobgr15(const uint8_t *src, uint8_t *dst, unsigned src_size);
+extern void rgb8tobgr8(const uint8_t *src, uint8_t *dst, unsigned src_size);
extern void palette8torgb32(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette);
@@ -46,6 +63,18 @@ extern void planar2x(const uint8_t *src, uint8_t *dst, int width, int height, in
extern void interleaveBytes(uint8_t *src1, uint8_t *src2, uint8_t *dst,
unsigned width, unsigned height, unsigned src1Stride,
unsigned src2Stride, unsigned dstStride);
+
+extern void vu9_to_vu12(const uint8_t *src1, const uint8_t *src2,
+ uint8_t *dst1, uint8_t *dst2,
+ unsigned width, unsigned height,
+ unsigned srcStride1, unsigned srcStride2,
+ unsigned dstStride1, unsigned dstStride2);
+
+extern void yvu9_to_yuy2(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
+ uint8_t *dst,
+ unsigned width, unsigned height,
+ unsigned srcStride1, unsigned srcStride2,
+ unsigned srcStride3, unsigned dstStride);
#define MODE_RGB 0x1
diff --git a/postproc/rgb2rgb_template.c b/postproc/rgb2rgb_template.c
index 464ad4c..79ec4fc 100644
--- a/postproc/rgb2rgb_template.c
+++ b/postproc/rgb2rgb_template.c
@@ -251,6 +251,61 @@ static inline void RENAME(bgr24torgb24)(const uint8_t *src, uint8_t *dst, unsign
}
}
+static inline void RENAME(rgb16to15)(const uint8_t *src,uint8_t *dst,unsigned src_size)
+{
+ register const uint8_t* s=src;
+ register uint8_t* d=dst;
+ register const uint8_t *end;
+ uint8_t *mm_end;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s));
+ __asm __volatile("movq %0, %%mm7"::"m"(mask15rg));
+ __asm __volatile("movq %0, %%mm6"::"m"(mask15b));
+ mm_end = (uint8_t*)((((unsigned long)end)/16)*16);
+ while(s<mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq 8%1, %%mm2\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "psrlq $1, %%mm0\n\t"
+ "psrlq $1, %%mm2\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm2\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm3\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm3, %%mm2\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm2, 8%0"
+ :"=m"(*d)
+ :"m"(*s)
+ );
+ d+=16;
+ s+=16;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register uint32_t x= *((uint32_t *)s);
+ *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
+ s+=4;
+ d+=4;
+ }
+ if(s < end)
+ {
+ register uint16_t x= *((uint16_t *)s);
+ *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
+ s+=2;
+ d+=2;
+ }
+}
+
static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, unsigned src_size)
{
const uint8_t *s = src;
@@ -315,6 +370,70 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, unsigned
}
}
+static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ mm_end = (uint8_t*)((((unsigned long)end)/16)*16);
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 4%1, %%mm3\n\t"
+ "punpckldq 8%1, %%mm0\n\t"
+ "punpckldq 12%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psllq $8, %%mm0\n\t"
+ "psllq $8, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ "psrlq $5, %%mm1\n\t"
+ "psrlq $5, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $19, %%mm2\n\t"
+ "psrlq $19, %%mm5\n\t"
+ "pand %2, %%mm2\n\t"
+ "pand %2, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 16;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ const int r= *s++;
+ const int g= *s++;
+ const int b= *s++;
+ *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
+ s++;
+ }
+}
+
static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, unsigned src_size)
{
const uint8_t *s = src;
@@ -379,6 +498,70 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, unsigned
}
}
+static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, unsigned src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ mm_end = (uint8_t*)((((unsigned long)end)/16)*16);
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 4%1, %%mm3\n\t"
+ "punpckldq 8%1, %%mm0\n\t"
+ "punpckldq 12%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psllq $7, %%mm0\n\t"
+ "psllq $7, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ "psrlq $6, %%mm1\n\t"
+ "psrlq $6, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $19, %%mm2\n\t"
+ "psrlq $19, %%mm5\n\t"
+ "pand %2, %%mm2\n\t"
+ "pand %2, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 16;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ const int r= *s++;
+ const int g= *s++;
+ const int b= *s++;
+ *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
+ s++;
+ }
+}
+
static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, unsigned src_size)
{
const uint8_t *s = src;
@@ -442,6 +625,69 @@ static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, unsigned
}
}
+static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, unsigned int src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ mm_end = (uint8_t*)((((unsigned long)end)/16)*16);
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 3%1, %%mm3\n\t"
+ "punpckldq 6%1, %%mm0\n\t"
+ "punpckldq 9%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psllq $8, %%mm0\n\t"
+ "psllq $8, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ "psrlq $5, %%mm1\n\t"
+ "psrlq $5, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $19, %%mm2\n\t"
+ "psrlq $19, %%mm5\n\t"
+ "pand %2, %%mm2\n\t"
+ "pand %2, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ const int r= *s++;
+ const int g= *s++;
+ const int b= *s++;
+ *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
+ }
+}
+
static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, unsigned src_size)
{
const uint8_t *s = src;
@@ -505,6 +751,69 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, unsigned
}
}
+static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, unsigned src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ mm_end = (uint8_t*)((((unsigned long)end)/16)*16);
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 3%1, %%mm3\n\t"
+ "punpckldq 6%1, %%mm0\n\t"
+ "punpckldq 9%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psllq $7, %%mm0\n\t"
+ "psllq $7, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ "psrlq $6, %%mm1\n\t"
+ "psrlq $6, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $19, %%mm2\n\t"
+ "psrlq $19, %%mm5\n\t"
+ "pand %2, %%mm2\n\t"
+ "pand %2, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ const int r= *s++;
+ const int g= *s++;
+ const int b= *s++;
+ *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
+ }
+}
+
/*
I use here less accurate approximation by simply
left-shifting the input
@@ -1893,3 +2202,206 @@ void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest,
);
#endif
}
+
+static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
+ uint8_t *dst1, uint8_t *dst2,
+ unsigned width, unsigned height,
+ unsigned srcStride1, unsigned srcStride2,
+ unsigned dstStride1, unsigned dstStride2)
+{
+ unsigned y,x,w,h;
+ w=width/2; h=height/2;
+#ifdef HAVE_MMX
+ asm volatile(
+ PREFETCH" %0\n\t"
+ PREFETCH" %1\n\t"
+ ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
+#endif
+ for(y=0;y<h;y++){
+ const uint8_t* s1=src1+srcStride1*(y>>1);
+ uint8_t* d=dst1+dstStride1*y;
+ x=0;
+#ifdef HAVE_MMX
+ if(w > 32)
+ for(;x<w;x+=32)
+ {
+ asm volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq 8%1, %%mm2\n\t"
+ "movq 16%1, %%mm4\n\t"
+ "movq 24%1, %%mm6\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "movq %%mm4, %%mm5\n\t"
+ "movq %%mm6, %%mm7\n\t"
+ "punpcklbw %%mm0, %%mm0\n\t"
+ "punpckhbw %%mm1, %%mm1\n\t"
+ "punpcklbw %%mm2, %%mm2\n\t"
+ "punpckhbw %%mm3, %%mm3\n\t"
+ "punpcklbw %%mm4, %%mm4\n\t"
+ "punpckhbw %%mm5, %%mm5\n\t"
+ "punpcklbw %%mm6, %%mm6\n\t"
+ "punpckhbw %%mm7, %%mm7\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm1, 8%0\n\t"
+ MOVNTQ" %%mm2, 16%0\n\t"
+ MOVNTQ" %%mm3, 24%0\n\t"
+ MOVNTQ" %%mm4, 32%0\n\t"
+ MOVNTQ" %%mm5, 40%0\n\t"
+ MOVNTQ" %%mm6, 48%0\n\t"
+ MOVNTQ" %%mm7, 56%0"
+ :"=m"(d[2*x])
+ :"m"(s1[x])
+ :"memory");
+ }
+#endif
+ for(;x<w;x++) d[2*x]=d[2*x+1]=s1[x];
+ }
+ for(y=0;y<h;y++){
+ const uint8_t* s2=src2+srcStride2*(y>>1);
+ uint8_t* d=dst2+dstStride2*y;
+ x=0;
+#ifdef HAVE_MMX
+ if(w > 32)
+ for(;x<w;x+=32)
+ {
+ asm volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq 8%1, %%mm2\n\t"
+ "movq 16%1, %%mm4\n\t"
+ "movq 24%1, %%mm6\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "movq %%mm4, %%mm5\n\t"
+ "movq %%mm6, %%mm7\n\t"
+ "punpcklbw %%mm0, %%mm0\n\t"
+ "punpckhbw %%mm1, %%mm1\n\t"
+ "punpcklbw %%mm2, %%mm2\n\t"
+ "punpckhbw %%mm3, %%mm3\n\t"
+ "punpcklbw %%mm4, %%mm4\n\t"
+ "punpckhbw %%mm5, %%mm5\n\t"
+ "punpcklbw %%mm6, %%mm6\n\t"
+ "punpckhbw %%mm7, %%mm7\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm1, 8%0\n\t"
+ MOVNTQ" %%mm2, 16%0\n\t"
+ MOVNTQ" %%mm3, 24%0\n\t"
+ MOVNTQ" %%mm4, 32%0\n\t"
+ MOVNTQ" %%mm5, 40%0\n\t"
+ MOVNTQ" %%mm6, 48%0\n\t"
+ MOVNTQ" %%mm7, 56%0"
+ :"=m"(d[2*x])
+ :"m"(s2[x])
+ :"memory");
+ }
+#endif
+ for(;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
+ }
+#ifdef HAVE_MMX
+ asm(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
+#endif
+}
+
+static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
+ uint8_t *dst,
+ unsigned width, unsigned height,
+ unsigned srcStride1, unsigned srcStride2,
+ unsigned srcStride3, unsigned dstStride)
+{
+ unsigned y,x,x2,w,h;
+ w=width/2; h=height;
+#ifdef HAVE_MMX
+ asm volatile(
+ PREFETCH" %0\n\t"
+ PREFETCH" %1\n\t"
+ PREFETCH" %2\n\t"
+ ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)),"m"(*(src3+srcStride3)):"memory");
+#endif
+ for(y=0;y<h;y++){
+ const uint8_t* yp=src1+srcStride1*y;
+ const uint8_t* up=src2+srcStride2*(y>>2);
+ const uint8_t* vp=src3+srcStride3*(y>>2);
+ uint8_t* d=dst+dstStride*y;
+ x2=0;
+ x=0;
+#ifdef HAVE_MMX
+ for(;x<w;x+=8,x2+=32)
+ {
+ asm volatile(
+ PREFETCH" 32%1\n\t"
+ PREFETCH" 32%2\n\t"
+ PREFETCH" 32%3\n\t"
+ "movq %1, %%mm0\n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
+ "movq %2, %%mm1\n\t" /* U0U1U2U3U4U5U6U7 */
+ "movq %3, %%mm2\n\t" /* V0V1V2V3V4V5V6V7 */
+ "movq %%mm0, %%mm3\n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
+ "movq %%mm1, %%mm4\n\t" /* U0U1U2U3U4U5U6U7 */
+ "movq %%mm2, %%mm5\n\t" /* V0V1V2V3V4V5V6V7 */
+ "punpcklbw %%mm1, %%mm1\n\t" /* U0U0 U1U1 U2U2 U3U3 */
+ "punpcklbw %%mm2, %%mm2\n\t" /* V0V0 V1V1 V2V2 V3V3 */
+ "punpckhbw %%mm4, %%mm4\n\t" /* U4U4 U5U5 U6U6 U7U7 */
+ "punpckhbw %%mm5, %%mm5\n\t" /* V4V4 V5V5 V6V6 V7V7 */
+
+ "movq %%mm1, %%mm6\n\t"
+ "punpcklbw %%mm2, %%mm1\n\t" /* U0V0 U0V0 U1V1 U1V1*/
+ "punpcklbw %%mm1, %%mm0\n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
+ "punpckhbw %%mm1, %%mm3\n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm3, 8%0\n\t"
+
+ "punpckhbw %%mm2, %%mm6\n\t" /* U2V2 U2V2 U3V3 U3V3*/
+ "movq 8%1, %%mm0\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "punpcklbw %%mm6, %%mm0\n\t" /* Y U2 Y V2 Y U2 Y V2*/
+ "punpckhbw %%mm6, %%mm3\n\t" /* Y U3 Y V3 Y U3 Y V3*/
+ MOVNTQ" %%mm0, 16%0\n\t"
+ MOVNTQ" %%mm3, 24%0\n\t"
+
+ "movq %%mm4, %%mm6\n\t"
+ "movq 16%1, %%mm0\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "punpcklbw %%mm5, %%mm4\n\t"
+ "punpcklbw %%mm4, %%mm0\n\t" /* Y U4 Y V4 Y U4 Y V4*/
+ "punpckhbw %%mm4, %%mm3\n\t" /* Y U5 Y V5 Y U5 Y V5*/
+ MOVNTQ" %%mm0, 32%0\n\t"
+ MOVNTQ" %%mm3, 40%0\n\t"
+
+ "punpckhbw %%mm5, %%mm6\n\t"
+ "movq 24%1, %%mm0\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "punpcklbw %%mm6, %%mm0\n\t" /* Y U6 Y V6 Y U6 Y V6*/
+ "punpckhbw %%mm6, %%mm3\n\t" /* Y U7 Y V7 Y U7 Y V7*/
+ MOVNTQ" %%mm0, 48%0\n\t"
+ MOVNTQ" %%mm3, 56%0\n\t"
+
+ :"=m"(d[8*x])
+ :"m"(yp[x2]),"m"(up[x]),"m"(vp[x])
+ :"memory");
+ }
+#endif
+ for(;x<w;x++,x2+=4)
+ {
+ d[8*x+0]=yp[x2];
+ d[8*x+1]=up[x];
+ d[8*x+2]=yp[x2+1];
+ d[8*x+3]=vp[x];
+ d[8*x+4]=yp[x2+2];
+ d[8*x+5]=up[x];
+ d[8*x+6]=yp[x2+3];
+ d[8*x+7]=vp[x];
+ }
+ }
+#ifdef HAVE_MMX
+ asm(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
+#endif
+}
OpenPOWER on IntegriCloud