31 #if HAVE_ALTIVEC && HAVE_BIGENDIAN 33 static const vec_s16 constants =
34 {0, 64277, 60547, 54491, 46341, 36410, 25080, 12785};
35 static const vec_u8 interleave_high =
36 {0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29};
39 vec_s16 A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H;\ 40 vec_s16 Ed, Gd, Add, Bdd, Fd, Hd;\ 41 vec_s16 eight = vec_splat_s16(8);\ 42 vec_u16 four = vec_splat_u16(4);\ 44 vec_s16 C1 = vec_splat(constants, 1);\ 45 vec_s16 C2 = vec_splat(constants, 2);\ 46 vec_s16 C3 = vec_splat(constants, 3);\ 47 vec_s16 C4 = vec_splat(constants, 4);\ 48 vec_s16 C5 = vec_splat(constants, 5);\ 49 vec_s16 C6 = vec_splat(constants, 6);\ 50 vec_s16 C7 = vec_splat(constants, 7);\ 52 vec_s16 b0 = vec_ld(0x00, block);\ 53 vec_s16 b1 = vec_ld(0x10, block);\ 54 vec_s16 b2 = vec_ld(0x20, block);\ 55 vec_s16 b3 = vec_ld(0x30, block);\ 56 vec_s16 b4 = vec_ld(0x40, block);\ 57 vec_s16 b5 = vec_ld(0x50, block);\ 58 vec_s16 b6 = vec_ld(0x60, block);\ 59 vec_s16 b7 = vec_ld(0x70, block); 67 return (
vec_s16)vec_perm(vec_mule(a,C), vec_mulo(a,C), interleave_high);
71 return vec_add(a, M15(a, C));
74 #define IDCT_1D(ADD, SHIFT)\ 75 A = vec_add(M16(b1, C1), M15(b7, C7));\ 76 B = vec_sub(M15(b1, C7), M16(b7, C1));\ 77 C = vec_add(M16(b3, C3), M16(b5, C5));\ 78 D = vec_sub(M16(b5, C3), M16(b3, C5));\ 80 Ad = M16(vec_sub(A, C), C4);\ 81 Bd = M16(vec_sub(B, D), C4);\ 86 E = ADD(M16(vec_add(b0, b4), C4));\ 87 F = ADD(M16(vec_sub(b0, b4), C4));\ 89 G = vec_add(M16(b2, C2), M15(b6, C6));\ 90 H = vec_sub(M15(b2, C6), M16(b6, C2));\ 95 Add = vec_add(F, Ad);\ 96 Bdd = vec_sub(Bd, H);\ 101 b0 = SHIFT(vec_add(Gd, Cd));\ 102 b7 = SHIFT(vec_sub(Gd, Cd));\ 104 b1 = SHIFT(vec_add(Add, Hd));\ 105 b2 = SHIFT(vec_sub(Add, Hd));\ 107 b3 = SHIFT(vec_add(Ed, Dd));\ 108 b4 = SHIFT(vec_sub(Ed, Dd));\ 110 b5 = SHIFT(vec_add(Fd, Bdd));\ 111 b6 = SHIFT(vec_sub(Fd, Bdd)); 114 #define ADD8(a) vec_add(a, eight) 115 #define SHIFT4(a) vec_sra(a, four) 123 vec_s16 v2048 = vec_sl(vec_splat_s16(1), vec_splat_u16(11));
124 eight = vec_add(eight, v2048);
127 TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7);
128 IDCT_1D(ADD8, SHIFT4)
131 t = vec_packsu(a, a);\ 132 vec_ste((vec_u32)t, 0, (unsigned int *)dst);\ 133 vec_ste((vec_u32)t, 4, (unsigned int *)dst); 135 PUT(b0) dst += stride;
136 PUT(b1) dst += stride;
137 PUT(b2) dst += stride;
138 PUT(b3) dst += stride;
139 PUT(b4) dst += stride;
140 PUT(b5) dst += stride;
141 PUT(b6) dst += stride;
143 memset(block, 0, sizeof(*block) * 64);
146 static
void vp3_idct_add_altivec(
uint8_t *dst,
int stride, int16_t block[64])
151 vec_u8 vdst_mask = vec_mergeh(vec_splat_u8(-1), vec_lvsl(0, dst));
156 TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7);
157 IDCT_1D(ADD8, SHIFT4)
160 vdst = vec_ld(0, dst);\ 161 vdst_16 = (vec_s16)vec_perm(vdst, zero_u8v, vdst_mask);\ 162 vdst_16 = vec_adds(a, vdst_16);\ 163 t = vec_packsu(vdst_16, vdst_16);\ 164 vec_ste((vec_u32)t, 0, (unsigned int *)dst);\ 165 vec_ste((vec_u32)t, 4, (unsigned int *)dst); 167 ADD(b0) dst += stride;
168 ADD(b1) dst += stride;
169 ADD(b2) dst += stride;
170 ADD(b3) dst += stride;
171 ADD(b4) dst += stride;
172 ADD(b5) dst += stride;
173 ADD(b6) dst += stride;
175 memset(block, 0, sizeof(*block) * 64);
182 #if HAVE_ALTIVEC && HAVE_BIGENDIAN
Macro definitions for various function/variable attributes.
void(* idct_add)(uint8_t *dest, int line_size, int16_t *block)
#define PPC_ALTIVEC(flags)
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Contains misc utility macros and inline functions.
av_cold void ff_vp3dsp_init_ppc(VP3DSPContext *c, int flags)