summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--libavcodec/h264pred.c1
-rw-r--r--libavcodec/h264pred.h2
-rw-r--r--libavcodec/mips/Makefile2
-rw-r--r--libavcodec/mips/h264dsp_mips.h27
-rw-r--r--libavcodec/mips/h264pred_init_mips.c104
-rw-r--r--libavcodec/mips/h264pred_msa.c723
-rw-r--r--libavutil/mips/generic_macros_msa.h11
7 files changed, 870 insertions, 0 deletions
diff --git a/libavcodec/h264pred.c b/libavcodec/h264pred.c
index 044fc90..497b080 100644
--- a/libavcodec/h264pred.c
+++ b/libavcodec/h264pred.c
@@ -594,4 +594,5 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
if (ARCH_ARM) ff_h264_pred_init_arm(h, codec_id, bit_depth, chroma_format_idc);
if (ARCH_X86) ff_h264_pred_init_x86(h, codec_id, bit_depth, chroma_format_idc);
+ if (ARCH_MIPS) ff_h264_pred_init_mips(h, codec_id, bit_depth, chroma_format_idc);
}
diff --git a/libavcodec/h264pred.h b/libavcodec/h264pred.h
index 6708292..edeca91 100644
--- a/libavcodec/h264pred.h
+++ b/libavcodec/h264pred.h
@@ -117,5 +117,7 @@ void ff_h264_pred_init_arm(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_x86(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
+void ff_h264_pred_init_mips(H264PredContext *h, int codec_id,
+ const int bit_depth, const int chroma_format_idc);
#endif /* AVCODEC_H264PRED_H */
diff --git a/libavcodec/mips/Makefile b/libavcodec/mips/Makefile
index aa98774..0a97e7c 100644
--- a/libavcodec/mips/Makefile
+++ b/libavcodec/mips/Makefile
@@ -22,6 +22,7 @@ OBJS-$(CONFIG_HEVC_DECODER) += mips/hevcdsp_init_mips.o \
mips/hevcpred_init_mips.o
OBJS-$(CONFIG_H264DSP) += mips/h264dsp_init_mips.o
OBJS-$(CONFIG_H264CHROMA) += mips/h264chroma_init_mips.o
+OBJS-$(CONFIG_H264PRED) += mips/h264pred_init_mips.o
MSA-OBJS-$(CONFIG_HEVC_DECODER) += mips/hevcdsp_msa.o \
mips/hevc_mc_uni_msa.o \
mips/hevc_mc_uniw_msa.o \
@@ -32,5 +33,6 @@ MSA-OBJS-$(CONFIG_HEVC_DECODER) += mips/hevcdsp_msa.o \
mips/hevcpred_msa.o
MSA-OBJS-$(CONFIG_H264DSP) += mips/h264dsp_msa.o
MSA-OBJS-$(CONFIG_H264CHROMA) += mips/h264chroma_msa.o
+MSA-OBJS-$(CONFIG_H264PRED) += mips/h264pred_msa.o
LOONGSON3-OBJS-$(CONFIG_H264DSP) += mips/h264dsp_mmi.o
LOONGSON3-OBJS-$(CONFIG_H264CHROMA) += mips/h264chroma_mmi.o
diff --git a/libavcodec/mips/h264dsp_mips.h b/libavcodec/mips/h264dsp_mips.h
index 319f6d3..ef380fa 100644
--- a/libavcodec/mips/h264dsp_mips.h
+++ b/libavcodec/mips/h264dsp_mips.h
@@ -68,6 +68,33 @@ void ff_weight_h264_pixels8_8_msa(uint8_t *src, int stride, int height,
void ff_weight_h264_pixels4_8_msa(uint8_t *src, int stride, int height,
int log2_denom, int weight, int offset);
+void ff_h264_intra_predict_plane_8x8_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_predict_dc_4blk_8x8_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_predict_hor_dc_8x8_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_predict_vert_dc_8x8_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_predict_mad_cow_dc_l0t_8x8_msa(uint8_t *src,
+ ptrdiff_t stride);
+void ff_h264_intra_predict_mad_cow_dc_0lt_8x8_msa(uint8_t *src,
+ ptrdiff_t stride);
+void ff_h264_intra_predict_mad_cow_dc_l00_8x8_msa(uint8_t *src,
+ ptrdiff_t stride);
+void ff_h264_intra_predict_mad_cow_dc_0l0_8x8_msa(uint8_t *src,
+ ptrdiff_t stride);
+void ff_h264_intra_predict_plane_16x16_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_pred_vert_8x8_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_pred_horiz_8x8_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_pred_dc_16x16_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_pred_vert_16x16_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_pred_horiz_16x16_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_pred_dc_left_16x16_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_pred_dc_top_16x16_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_pred_dc_128_8x8_msa(uint8_t *src, ptrdiff_t stride);
+void ff_h264_intra_pred_dc_128_16x16_msa(uint8_t *src, ptrdiff_t stride);
+void ff_vp8_pred8x8_127_dc_8_msa(uint8_t *src, ptrdiff_t stride);
+void ff_vp8_pred8x8_129_dc_8_msa(uint8_t *src, ptrdiff_t stride);
+void ff_vp8_pred16x16_127_dc_8_msa(uint8_t *src, ptrdiff_t stride);
+void ff_vp8_pred16x16_129_dc_8_msa(uint8_t *src, ptrdiff_t stride);
+
void ff_h264_weight_pixels16_8_mmi(uint8_t *block, int stride, int height,
int log2_denom, int weight, int offset);
void ff_h264_biweight_pixels16_8_mmi(uint8_t *dst, uint8_t *src,
diff --git a/libavcodec/mips/h264pred_init_mips.c b/libavcodec/mips/h264pred_init_mips.c
new file mode 100644
index 0000000..27ff10f
--- /dev/null
+++ b/libavcodec/mips/h264pred_init_mips.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2015 Shivraj Patil (Shivraj.Patil@imgtec.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "h264dsp_mips.h"
+
+#if HAVE_MSA
+static av_cold void h264_pred_init_msa(H264PredContext *h, int codec_id,
+ const int bit_depth,
+ const int chroma_format_idc)
+{
+ if (8 == bit_depth) {
+ if (chroma_format_idc == 1) {
+ h->pred8x8[VERT_PRED8x8] = ff_h264_intra_pred_vert_8x8_msa;
+ h->pred8x8[HOR_PRED8x8] = ff_h264_intra_pred_horiz_8x8_msa;
+ }
+
+ if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) {
+ if (chroma_format_idc == 1) {
+ h->pred8x8[PLANE_PRED8x8] = ff_h264_intra_predict_plane_8x8_msa;
+ }
+ }
+ if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7
+ && codec_id != AV_CODEC_ID_VP8) {
+ if (chroma_format_idc == 1) {
+ h->pred8x8[DC_PRED8x8] = ff_h264_intra_predict_dc_4blk_8x8_msa;
+ h->pred8x8[LEFT_DC_PRED8x8] =
+ ff_h264_intra_predict_hor_dc_8x8_msa;
+ h->pred8x8[TOP_DC_PRED8x8] =
+ ff_h264_intra_predict_vert_dc_8x8_msa;
+ h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] =
+ ff_h264_intra_predict_mad_cow_dc_l0t_8x8_msa;
+ h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] =
+ ff_h264_intra_predict_mad_cow_dc_0lt_8x8_msa;
+ h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] =
+ ff_h264_intra_predict_mad_cow_dc_l00_8x8_msa;
+ h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8] =
+ ff_h264_intra_predict_mad_cow_dc_0l0_8x8_msa;
+ }
+ } else {
+ if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {
+ h->pred8x8[7] = ff_vp8_pred8x8_127_dc_8_msa;
+ h->pred8x8[8] = ff_vp8_pred8x8_129_dc_8_msa;
+ }
+ }
+
+ if (chroma_format_idc == 1) {
+ h->pred8x8[DC_128_PRED8x8] = ff_h264_intra_pred_dc_128_8x8_msa;
+ }
+
+ h->pred16x16[DC_PRED8x8] = ff_h264_intra_pred_dc_16x16_msa;
+ h->pred16x16[VERT_PRED8x8] = ff_h264_intra_pred_vert_16x16_msa;
+ h->pred16x16[HOR_PRED8x8] = ff_h264_intra_pred_horiz_16x16_msa;
+
+ switch (codec_id) {
+ case AV_CODEC_ID_SVQ3:
+ ;
+ break;
+ case AV_CODEC_ID_RV40:
+ ;
+ break;
+ case AV_CODEC_ID_VP7:
+ case AV_CODEC_ID_VP8:
+ h->pred16x16[7] = ff_vp8_pred16x16_127_dc_8_msa;
+ h->pred16x16[8] = ff_vp8_pred16x16_129_dc_8_msa;
+ break;
+ default:
+ h->pred16x16[PLANE_PRED8x8] =
+ ff_h264_intra_predict_plane_16x16_msa;
+ break;
+ }
+
+ h->pred16x16[LEFT_DC_PRED8x8] = ff_h264_intra_pred_dc_left_16x16_msa;
+ h->pred16x16[TOP_DC_PRED8x8] = ff_h264_intra_pred_dc_top_16x16_msa;
+ h->pred16x16[DC_128_PRED8x8] = ff_h264_intra_pred_dc_128_16x16_msa;
+ }
+}
+#endif // #if HAVE_MSA
+
+av_cold void ff_h264_pred_init_mips(H264PredContext *h, int codec_id,
+ int bit_depth,
+ const int chroma_format_idc)
+{
+#if HAVE_MSA
+ h264_pred_init_msa(h, codec_id, bit_depth, chroma_format_idc);
+#endif // #if HAVE_MSA
+}
diff --git a/libavcodec/mips/h264pred_msa.c b/libavcodec/mips/h264pred_msa.c
new file mode 100644
index 0000000..cddcd2e
--- /dev/null
+++ b/libavcodec/mips/h264pred_msa.c
@@ -0,0 +1,723 @@
+/*
+ * Copyright (c) 2015 Shivraj Patil (Shivraj.Patil@imgtec.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/mips/generic_macros_msa.h"
+#include "h264dsp_mips.h"
+
+static void intra_predict_vert_8x8_msa(uint8_t *src, uint8_t *dst,
+ int32_t dst_stride)
+{
+ uint32_t row;
+ uint32_t src_data1, src_data2;
+
+ src_data1 = LW(src);
+ src_data2 = LW(src + 4);
+
+ for (row = 8; row--;) {
+ SW(src_data1, dst);
+ SW(src_data2, (dst + 4));
+ dst += dst_stride;
+ }
+}
+
+static void intra_predict_vert_16x16_msa(uint8_t *src, uint8_t *dst,
+ int32_t dst_stride)
+{
+ uint32_t row;
+ v16u8 src0;
+
+ src0 = LD_UB(src);
+
+ for (row = 16; row--;) {
+ ST_UB(src0, dst);
+ dst += dst_stride;
+ }
+}
+
+static void intra_predict_horiz_8x8_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride)
+{
+ uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
+
+ out0 = src[0 * src_stride] * 0x0101010101010101;
+ out1 = src[1 * src_stride] * 0x0101010101010101;
+ out2 = src[2 * src_stride] * 0x0101010101010101;
+ out3 = src[3 * src_stride] * 0x0101010101010101;
+ out4 = src[4 * src_stride] * 0x0101010101010101;
+ out5 = src[5 * src_stride] * 0x0101010101010101;
+ out6 = src[6 * src_stride] * 0x0101010101010101;
+ out7 = src[7 * src_stride] * 0x0101010101010101;
+
+ SD4(out0, out1, out2, out3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ SD4(out4, out5, out6, out7, dst, dst_stride);
+}
+
+static void intra_predict_horiz_16x16_msa(uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride)
+{
+ uint32_t row;
+ uint8_t inp0, inp1, inp2, inp3;
+ v16u8 src0, src1, src2, src3;
+
+ for (row = 4; row--;) {
+ inp0 = src[0];
+ src += src_stride;
+ inp1 = src[0];
+ src += src_stride;
+ inp2 = src[0];
+ src += src_stride;
+ inp3 = src[0];
+ src += src_stride;
+
+ src0 = (v16u8) __msa_fill_b(inp0);
+ src1 = (v16u8) __msa_fill_b(inp1);
+ src2 = (v16u8) __msa_fill_b(inp2);
+ src3 = (v16u8) __msa_fill_b(inp3);
+
+ ST_UB4(src0, src1, src2, src3, dst, dst_stride);
+ dst += (4 * dst_stride);
+ }
+}
+
+static void intra_predict_dc_8x8_msa(uint8_t *src_top, uint8_t *src_left,
+ int32_t src_stride_left,
+ uint8_t *dst, int32_t dst_stride,
+ uint8_t is_above, uint8_t is_left)
+{
+ uint32_t row;
+ uint32_t out, addition = 0;
+ v16u8 src_above, store;
+ v8u16 sum_above;
+ v4u32 sum_top;
+ v2u64 sum;
+
+ if (is_left && is_above) {
+ src_above = LD_UB(src_top);
+
+ sum_above = __msa_hadd_u_h(src_above, src_above);
+ sum_top = __msa_hadd_u_w(sum_above, sum_above);
+ sum = __msa_hadd_u_d(sum_top, sum_top);
+ addition = __msa_copy_u_w((v4i32) sum, 0);
+
+ for (row = 0; row < 8; row++) {
+ addition += src_left[row * src_stride_left];
+ }
+
+ addition = (addition + 8) >> 4;
+ store = (v16u8) __msa_fill_b(addition);
+ } else if (is_left) {
+ for (row = 0; row < 8; row++) {
+ addition += src_left[row * src_stride_left];
+ }
+
+ addition = (addition + 4) >> 3;
+ store = (v16u8) __msa_fill_b(addition);
+ } else if (is_above) {
+ src_above = LD_UB(src_top);
+
+ sum_above = __msa_hadd_u_h(src_above, src_above);
+ sum_top = __msa_hadd_u_w(sum_above, sum_above);
+ sum = __msa_hadd_u_d(sum_top, sum_top);
+ sum = (v2u64) __msa_srari_d((v2i64) sum, 3);
+ store = (v16u8) __msa_splati_b((v16i8) sum, 0);
+ } else {
+ store = (v16u8) __msa_ldi_b(128);
+ }
+
+ out = __msa_copy_u_w((v4i32) store, 0);
+
+ for (row = 8; row--;) {
+ SW(out, dst);
+ SW(out, (dst + 4));
+ dst += dst_stride;
+ }
+}
+
+static void intra_predict_dc_16x16_msa(uint8_t *src_top, uint8_t *src_left,
+ int32_t src_stride_left,
+ uint8_t *dst, int32_t dst_stride,
+ uint8_t is_above, uint8_t is_left)
+{
+ uint32_t row;
+ uint32_t addition = 0;
+ v16u8 src_above, store;
+ v8u16 sum_above;
+ v4u32 sum_top;
+ v2u64 sum;
+
+ if (is_left && is_above) {
+ src_above = LD_UB(src_top);
+
+ sum_above = __msa_hadd_u_h(src_above, src_above);
+ sum_top = __msa_hadd_u_w(sum_above, sum_above);
+ sum = __msa_hadd_u_d(sum_top, sum_top);
+ sum_top = (v4u32) __msa_pckev_w((v4i32) sum, (v4i32) sum);
+ sum = __msa_hadd_u_d(sum_top, sum_top);
+ addition = __msa_copy_u_w((v4i32) sum, 0);
+
+ for (row = 0; row < 16; row++) {
+ addition += src_left[row * src_stride_left];
+ }
+
+ addition = (addition + 16) >> 5;
+ store = (v16u8) __msa_fill_b(addition);
+ } else if (is_left) {
+ for (row = 0; row < 16; row++) {
+ addition += src_left[row * src_stride_left];
+ }
+
+ addition = (addition + 8) >> 4;
+ store = (v16u8) __msa_fill_b(addition);
+ } else if (is_above) {
+ src_above = LD_UB(src_top);
+
+ sum_above = __msa_hadd_u_h(src_above, src_above);
+ sum_top = __msa_hadd_u_w(sum_above, sum_above);
+ sum = __msa_hadd_u_d(sum_top, sum_top);
+ sum_top = (v4u32) __msa_pckev_w((v4i32) sum, (v4i32) sum);
+ sum = __msa_hadd_u_d(sum_top, sum_top);
+ sum = (v2u64) __msa_srari_d((v2i64) sum, 4);
+ store = (v16u8) __msa_splati_b((v16i8) sum, 0);
+ } else {
+ store = (v16u8) __msa_ldi_b(128);
+ }
+
+ for (row = 16; row--;) {
+ ST_UB(store, dst);
+ dst += dst_stride;
+ }
+}
+
+#define INTRA_PREDICT_VALDC_8X8_MSA(val) \
+static void intra_predict_##val##dc_8x8_msa(uint8_t *dst, \
+ int32_t dst_stride) \
+{ \
+ uint32_t row, out; \
+ v16i8 store; \
+ \
+ store = __msa_ldi_b(val); \
+ out = __msa_copy_u_w((v4i32) store, 0); \
+ \
+ for (row = 8; row--;) { \
+ SW(out, dst); \
+ SW(out, (dst + 4)); \
+ dst += dst_stride; \
+ } \
+}
+
+INTRA_PREDICT_VALDC_8X8_MSA(127);
+INTRA_PREDICT_VALDC_8X8_MSA(129);
+
+#define INTRA_PREDICT_VALDC_16X16_MSA(val) \
+static void intra_predict_##val##dc_16x16_msa(uint8_t *dst, \
+ int32_t dst_stride) \
+{ \
+ uint32_t row; \
+ v16u8 store; \
+ \
+ store = (v16u8) __msa_ldi_b(val); \
+ \
+ for (row = 16; row--;) { \
+ ST_UB(store, dst); \
+ dst += dst_stride; \
+ } \
+}
+
+INTRA_PREDICT_VALDC_16X16_MSA(127);
+INTRA_PREDICT_VALDC_16X16_MSA(129);
+
+static void intra_predict_plane_8x8_msa(uint8_t *src, int32_t stride)
+{
+ uint8_t lpcnt;
+ int32_t res, res0, res1, res2, res3;
+ uint64_t out0, out1;
+ v16i8 shf_mask = { 3, 5, 2, 6, 1, 7, 0, 8, 3, 5, 2, 6, 1, 7, 0, 8 };
+ v8i16 short_multiplier = { 1, 2, 3, 4, 1, 2, 3, 4 };
+ v4i32 int_multiplier = { 0, 1, 2, 3 };
+ v16u8 src_top;
+ v8i16 vec9, vec10, vec11;
+ v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8;
+ v2i64 sum;
+
+ src_top = LD_UB(src - (stride + 1));
+ src_top = (v16u8) __msa_vshf_b(shf_mask, (v16i8) src_top, (v16i8) src_top);
+
+ vec9 = __msa_hsub_u_h(src_top, src_top);
+ vec9 *= short_multiplier;
+ vec8 = __msa_hadd_s_w(vec9, vec9);
+ sum = __msa_hadd_s_d(vec8, vec8);
+
+ res0 = __msa_copy_s_w((v4i32) sum, 0);
+
+ res1 = (src[4 * stride - 1] - src[2 * stride - 1]) +
+ 2 * (src[5 * stride - 1] - src[stride - 1]) +
+ 3 * (src[6 * stride - 1] - src[-1]) +
+ 4 * (src[7 * stride - 1] - src[-stride - 1]);
+
+ res0 *= 17;
+ res1 *= 17;
+ res0 = (res0 + 16) >> 5;
+ res1 = (res1 + 16) >> 5;
+
+ res3 = 3 * (res0 + res1);
+ res2 = 16 * (src[7 * stride - 1] + src[-stride + 7] + 1);
+ res = res2 - res3;
+
+ vec8 = __msa_fill_w(res0);
+ vec4 = __msa_fill_w(res);
+ vec2 = __msa_fill_w(res1);
+ vec5 = vec8 * int_multiplier;
+ vec3 = vec8 * 4;
+
+ for (lpcnt = 4; lpcnt--;) {
+ vec0 = vec5;
+ vec0 += vec4;
+ vec1 = vec0 + vec3;
+ vec6 = vec5;
+ vec4 += vec2;
+ vec6 += vec4;
+ vec7 = vec6 + vec3;
+
+ SRA_4V(vec0, vec1, vec6, vec7, 5);
+ PCKEV_H2_SH(vec1, vec0, vec7, vec6, vec10, vec11);
+ CLIP_SH2_0_255(vec10, vec11);
+ PCKEV_B2_SH(vec10, vec10, vec11, vec11, vec10, vec11);
+
+ out0 = __msa_copy_s_d((v2i64) vec10, 0);
+ out1 = __msa_copy_s_d((v2i64) vec11, 0);
+ SD(out0, src);
+ src += stride;
+ SD(out1, src);
+ src += stride;
+
+ vec4 += vec2;
+ }
+}
+
+static void intra_predict_plane_16x16_msa(uint8_t *src, int32_t stride)
+{
+ uint8_t lpcnt;
+ int32_t res0, res1, res2, res3;
+ uint64_t load0, load1;
+ v16i8 shf_mask = { 7, 8, 6, 9, 5, 10, 4, 11, 3, 12, 2, 13, 1, 14, 0, 15 };
+ v8i16 short_multiplier = { 1, 2, 3, 4, 5, 6, 7, 8 };
+ v4i32 int_multiplier = { 0, 1, 2, 3 };
+ v16u8 src_top = { 0 };
+ v8i16 vec9, vec10;
+ v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, res_add;
+
+ load0 = LD(src - (stride + 1));
+ load1 = LD(src - (stride + 1) + 9);
+
+ INSERT_D2_UB(load0, load1, src_top);
+
+ src_top = (v16u8) __msa_vshf_b(shf_mask, (v16i8) src_top, (v16i8) src_top);
+
+ vec9 = __msa_hsub_u_h(src_top, src_top);
+ vec9 *= short_multiplier;
+ vec8 = __msa_hadd_s_w(vec9, vec9);
+ res_add = (v4i32) __msa_hadd_s_d(vec8, vec8);
+
+ res0 = __msa_copy_s_w(res_add, 0) + __msa_copy_s_w(res_add, 2);
+
+ res1 = (src[8 * stride - 1] - src[6 * stride - 1]) +
+ 2 * (src[9 * stride - 1] - src[5 * stride - 1]) +
+ 3 * (src[10 * stride - 1] - src[4 * stride - 1]) +
+ 4 * (src[11 * stride - 1] - src[3 * stride - 1]) +
+ 5 * (src[12 * stride - 1] - src[2 * stride - 1]) +
+ 6 * (src[13 * stride - 1] - src[stride - 1]) +
+ 7 * (src[14 * stride - 1] - src[-1]) +
+ 8 * (src[15 * stride - 1] - src[-1 * stride - 1]);
+
+ res0 *= 5;
+ res1 *= 5;
+ res0 = (res0 + 32) >> 6;
+ res1 = (res1 + 32) >> 6;
+
+ res3 = 7 * (res0 + res1);
+ res2 = 16 * (src[15 * stride - 1] + src[-stride + 15] + 1);
+ res2 -= res3;
+
+ vec8 = __msa_fill_w(res0);
+ vec4 = __msa_fill_w(res2);
+ vec5 = __msa_fill_w(res1);
+ vec6 = vec8 * 4;
+ vec7 = vec8 * int_multiplier;
+
+ for (lpcnt = 16; lpcnt--;) {
+ vec0 = vec7;
+ vec0 += vec4;
+ vec1 = vec0 + vec6;
+ vec2 = vec1 + vec6;
+ vec3 = vec2 + vec6;
+
+ SRA_4V(vec0, vec1, vec2, vec3, 5);
+ PCKEV_H2_SH(vec1, vec0, vec3, vec2, vec9, vec10);
+ CLIP_SH2_0_255(vec9, vec10);
+ PCKEV_ST_SB(vec9, vec10, src);
+ src += stride;
+
+ vec4 += vec5;
+ }
+}
+
+static void intra_predict_dc_4blk_8x8_msa(uint8_t *src, int32_t stride)
+{
+ uint8_t lp_cnt;
+ uint32_t src0, src1, src3, src2 = 0;
+ uint32_t out0, out1, out2, out3;
+ v16u8 src_top;
+ v8u16 add;
+ v4u32 sum;
+
+ src_top = LD_UB(src - stride);
+ add = __msa_hadd_u_h((v16u8) src_top, (v16u8) src_top);
+ sum = __msa_hadd_u_w(add, add);
+ src0 = __msa_copy_u_w((v4i32) sum, 0);
+ src1 = __msa_copy_u_w((v4i32) sum, 1);
+
+ for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
+ src0 += src[lp_cnt * stride - 1];
+ src2 += src[(4 + lp_cnt) * stride - 1];
+ }
+
+ src0 = (src0 + 4) >> 3;
+ src3 = (src1 + src2 + 4) >> 3;
+ src1 = (src1 + 2) >> 2;
+ src2 = (src2 + 2) >> 2;
+ out0 = src0 * 0x01010101;
+ out1 = src1 * 0x01010101;
+ out2 = src2 * 0x01010101;
+ out3 = src3 * 0x01010101;
+
+ for (lp_cnt = 4; lp_cnt--;) {
+ SW(out0, src);
+ SW(out1, (src + 4));
+ SW(out2, (src + 4 * stride));
+ SW(out3, (src + 4 * stride + 4));
+ src += stride;
+ }
+}
+
+static void intra_predict_hor_dc_8x8_msa(uint8_t *src, int32_t stride)
+{
+ uint8_t lp_cnt;
+ uint32_t src0 = 0, src1 = 0;
+ uint64_t out0, out1;
+
+ for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
+ src0 += src[lp_cnt * stride - 1];
+ src1 += src[(4 + lp_cnt) * stride - 1];
+ }
+
+ src0 = (src0 + 2) >> 2;
+ src1 = (src1 + 2) >> 2;
+ out0 = src0 * 0x0101010101010101;
+ out1 = src1 * 0x0101010101010101;
+
+ for (lp_cnt = 4; lp_cnt--;) {
+ SD(out0, src);
+ SD(out1, (src + 4 * stride));
+ src += stride;
+ }
+}
+
+static void intra_predict_vert_dc_8x8_msa(uint8_t *src, int32_t stride)
+{
+ uint8_t lp_cnt;
+ uint32_t out0 = 0, out1 = 0;
+ v16u8 src_top;
+ v8u16 add;
+ v4u32 sum;
+ v4i32 res0, res1;
+
+ src_top = LD_UB(src - stride);
+ add = __msa_hadd_u_h(src_top, src_top);
+ sum = __msa_hadd_u_w(add, add);
+ sum = (v4u32) __msa_srari_w((v4i32) sum, 2);
+ res0 = (v4i32) __msa_splati_b((v16i8) sum, 0);
+ res1 = (v4i32) __msa_splati_b((v16i8) sum, 4);
+ out0 = __msa_copy_u_w(res0, 0);
+ out1 = __msa_copy_u_w(res1, 0);
+
+ for (lp_cnt = 8; lp_cnt--;) {
+ SW(out0, src);
+ SW(out1, src + 4);
+ src += stride;
+ }
+}
+
+static void intra_predict_mad_cow_dc_l0t_8x8_msa(uint8_t *src, int32_t stride)
+{
+ uint8_t lp_cnt;
+ uint32_t src0, src1, src2 = 0;
+ uint32_t out0, out1, out2;
+ v16u8 src_top;
+ v8u16 add;
+ v4u32 sum;
+
+ src_top = LD_UB(src - stride);
+ add = __msa_hadd_u_h(src_top, src_top);
+ sum = __msa_hadd_u_w(add, add);
+ src0 = __msa_copy_u_w((v4i32) sum, 0);
+ src1 = __msa_copy_u_w((v4i32) sum, 1);
+
+ for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
+ src2 += src[lp_cnt * stride - 1];
+ }
+ src2 = (src0 + src2 + 4) >> 3;
+ src0 = (src0 + 2) >> 2;
+ src1 = (src1 + 2) >> 2;
+ out0 = src0 * 0x01010101;
+ out1 = src1 * 0x01010101;
+ out2 = src2 * 0x01010101;
+
+ for (lp_cnt = 4; lp_cnt--;) {
+ SW(out2, src);
+ SW(out1, src + 4);
+ SW(out0, src + stride * 4);
+ SW(out1, src + stride * 4 + 4);
+ src += stride;
+ }
+}
+
+static void intra_predict_mad_cow_dc_0lt_8x8_msa(uint8_t *src, int32_t stride)
+{
+ uint8_t lp_cnt;
+ uint32_t src0, src1, src2 = 0, src3;
+ uint32_t out0, out1, out2, out3;
+ v16u8 src_top;
+ v8u16 add;
+ v4u32 sum;
+
+ src_top = LD_UB(src - stride);
+ add = __msa_hadd_u_h(src_top, src_top);
+ sum = __msa_hadd_u_w(add, add);
+ src0 = __msa_copy_u_w((v4i32) sum, 0);
+ src1 = __msa_copy_u_w((v4i32) sum, 1);
+
+ for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
+ src2 += src[(4 + lp_cnt) * stride - 1];
+ }
+
+ src0 = (src0 + 2) >> 2;
+ src3 = (src1 + src2 + 4) >> 3;
+ src1 = (src1 + 2) >> 2;
+ src2 = (src2 + 2) >> 2;
+
+ out0 = src0 * 0x01010101;
+ out1 = src1 * 0x01010101;
+ out2 = src2 * 0x01010101;
+ out3 = src3 * 0x01010101;
+
+ for (lp_cnt = 4; lp_cnt--;) {
+ SW(out0, src);
+ SW(out1, src + 4);
+ SW(out2, src + stride * 4);
+ SW(out3, src + stride * 4 + 4);
+ src += stride;
+ }
+}
+
+static void intra_predict_mad_cow_dc_l00_8x8_msa(uint8_t *src, int32_t stride)
+{
+ uint8_t lp_cnt;
+ uint32_t src0 = 0;
+ uint64_t out0, out1;
+
+ for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
+ src0 += src[lp_cnt * stride - 1];
+ }
+
+ src0 = (src0 + 2) >> 2;
+ out0 = src0 * 0x0101010101010101;
+ out1 = 0x8080808080808080;
+
+ for (lp_cnt = 4; lp_cnt--;) {
+ SD(out0, src);
+ SD(out1, src + stride * 4);
+ src += stride;
+ }
+}
+
+static void intra_predict_mad_cow_dc_0l0_8x8_msa(uint8_t *src, int32_t stride)
+{
+ uint8_t lp_cnt;
+ uint32_t src0 = 0;
+ uint64_t out0, out1;
+
+ for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) {
+ src0 += src[(4 + lp_cnt) * stride - 1];
+ }
+
+ src0 = (src0 + 2) >> 2;
+
+ out0 = 0x8080808080808080;
+ out1 = src0 * 0x0101010101010101;
+
+ for (lp_cnt = 4; lp_cnt--;) {
+ SD(out0, src);
+ SD(out1, src + stride * 4);
+ src += stride;
+ }
+}
+
+void ff_h264_intra_predict_plane_8x8_msa(uint8_t *src, ptrdiff_t stride)
+{
+ intra_predict_plane_8x8_msa(src, stride);
+}
+
+void ff_h264_intra_predict_dc_4blk_8x8_msa(uint8_t *src, ptrdiff_t stride)
+{
+ intra_predict_dc_4blk_8x8_msa(src, stride);
+}
+
+void ff_h264_intra_predict_hor_dc_8x8_msa(uint8_t *src, ptrdiff_t stride)
+{
+ intra_predict_hor_dc_8x8_msa(src, stride);
+}
+
+void ff_h264_intra_predict_vert_dc_8x8_msa(uint8_t *src, ptrdiff_t stride)
+{
+ intra_predict_vert_dc_8x8_msa(src, stride);
+}
+
+void ff_h264_intra_predict_mad_cow_dc_l0t_8x8_msa(uint8_t *src,
+ ptrdiff_t stride)
+{
+ intra_predict_mad_cow_dc_l0t_8x8_msa(src, stride);
+}
+
+void ff_h264_intra_predict_mad_cow_dc_0lt_8x8_msa(uint8_t *src,
+ ptrdiff_t stride)
+{
+ intra_predict_mad_cow_dc_0lt_8x8_msa(src, stride);
+}
+
+void ff_h264_intra_predict_mad_cow_dc_l00_8x8_msa(uint8_t *src,
+ ptrdiff_t stride)
+{
+ intra_predict_mad_cow_dc_l00_8x8_msa(src, stride);
+}
+
+void ff_h264_intra_predict_mad_cow_dc_0l0_8x8_msa(uint8_t *src,
+ ptrdiff_t stride)
+{
+ intra_predict_mad_cow_dc_0l0_8x8_msa(src, stride);
+}
+
+void ff_h264_intra_predict_plane_16x16_msa(uint8_t *src, ptrdiff_t stride)
+{
+ intra_predict_plane_16x16_msa(src, stride);
+}
+
+void ff_h264_intra_pred_vert_8x8_msa(uint8_t *src, ptrdiff_t stride)
+{
+ uint8_t *dst = src;
+
+ intra_predict_vert_8x8_msa(src - stride, dst, stride);
+}
+
+void ff_h264_intra_pred_horiz_8x8_msa(uint8_t *src, ptrdiff_t stride)
+{
+ uint8_t *dst = src;
+
+ intra_predict_horiz_8x8_msa(src - 1, stride, dst, stride);
+}
+
+void ff_h264_intra_pred_dc_16x16_msa(uint8_t *src, ptrdiff_t stride)
+{
+ uint8_t *src_top = src - stride;
+ uint8_t *src_left = src - 1;
+ uint8_t *dst = src;
+
+ intra_predict_dc_16x16_msa(src_top, src_left, stride, dst, stride, 1, 1);
+}
+
+void ff_h264_intra_pred_vert_16x16_msa(uint8_t *src, ptrdiff_t stride)
+{
+ uint8_t *dst = src;
+
+ intra_predict_vert_16x16_msa(src - stride, dst, stride);
+}
+
+void ff_h264_intra_pred_horiz_16x16_msa(uint8_t *src, ptrdiff_t stride)
+{
+ uint8_t *dst = src;
+
+ intra_predict_horiz_16x16_msa(src - 1, stride, dst, stride);
+}
+
+void ff_h264_intra_pred_dc_left_16x16_msa(uint8_t *src, ptrdiff_t stride)
+{
+ uint8_t *src_top = src - stride;
+ uint8_t *src_left = src - 1;
+ uint8_t *dst = src;
+
+ intra_predict_dc_16x16_msa(src_top, src_left, stride, dst, stride, 0, 1);
+}
+
+void ff_h264_intra_pred_dc_top_16x16_msa(uint8_t *src, ptrdiff_t stride)
+{
+ uint8_t *src_top = src - stride;
+ uint8_t *src_left = src - 1;
+ uint8_t *dst = src;
+
+ intra_predict_dc_16x16_msa(src_top, src_left, stride, dst, stride, 1, 0);
+}
+
+void ff_h264_intra_pred_dc_128_8x8_msa(uint8_t *src, ptrdiff_t stride)
+{
+ uint8_t *src_top = src - stride;
+ uint8_t *src_left = src - 1;
+ uint8_t *dst = src;
+
+ intra_predict_dc_8x8_msa(src_top, src_left, stride, dst, stride, 0, 0);
+}
+
+void ff_h264_intra_pred_dc_128_16x16_msa(uint8_t *src, ptrdiff_t stride)
+{
+ uint8_t *src_top = src - stride;
+ uint8_t *src_left = src - 1;
+ uint8_t *dst = src;
+
+ intra_predict_dc_16x16_msa(src_top, src_left, stride, dst, stride, 0, 0);
+}
+
+void ff_vp8_pred8x8_127_dc_8_msa(uint8_t *src, ptrdiff_t stride)
+{
+ intra_predict_127dc_8x8_msa(src, stride);
+}
+
+void ff_vp8_pred8x8_129_dc_8_msa(uint8_t *src, ptrdiff_t stride)
+{
+ intra_predict_129dc_8x8_msa(src, stride);
+}
+
+void ff_vp8_pred16x16_127_dc_8_msa(uint8_t *src, ptrdiff_t stride)
+{
+ intra_predict_127dc_16x16_msa(src, stride);
+}
+
+void ff_vp8_pred16x16_129_dc_8_msa(uint8_t *src, ptrdiff_t stride)
+{
+ intra_predict_129dc_16x16_msa(src, stride);
+}
diff --git a/libavutil/mips/generic_macros_msa.h b/libavutil/mips/generic_macros_msa.h
index bee24e2..0f06c80 100644
--- a/libavutil/mips/generic_macros_msa.h
+++ b/libavutil/mips/generic_macros_msa.h
@@ -2307,4 +2307,15 @@
\
SW4(out0_m, out1_m, out2_m, out3_m, pdst, stride); \
}
+
+/* Description : Pack even byte elements and store byte vector in destination
+ memory
+ Arguments : Inputs - in0, in1, pdst
+*/
+#define PCKEV_ST_SB(in0, in1, pdst) \
+{ \
+ v16i8 tmp_m; \
+ tmp_m = __msa_pckev_b((v16i8) in1, (v16i8) in0); \
+ ST_SB(tmp_m, (pdst)); \
+}
#endif /* AVUTIL_MIPS_GENERIC_MACROS_MSA_H */
OpenPOWER on IntegriCloud