24 #ifndef AVUTIL_PPC_UTIL_ALTIVEC_H 25 #define AVUTIL_PPC_UTIL_ALTIVEC_H 41 #define WORD_0 0x00,0x01,0x02,0x03 42 #define WORD_1 0x04,0x05,0x06,0x07 43 #define WORD_2 0x08,0x09,0x0a,0x0b 44 #define WORD_3 0x0c,0x0d,0x0e,0x0f 45 #define WORD_s0 0x10,0x11,0x12,0x13 46 #define WORD_s1 0x14,0x15,0x16,0x17 47 #define WORD_s2 0x18,0x19,0x1a,0x1b 48 #define WORD_s3 0x1c,0x1d,0x1e,0x1f 50 #define vcprm(a,b,c,d) (const vector unsigned char){WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d} 54 #define TRANSPOSE8(a,b,c,d,e,f,g,h) \ 56 vector signed short A1, B1, C1, D1, E1, F1, G1, H1; \ 57 vector signed short A2, B2, C2, D2, E2, F2, G2, H2; \ 59 A1 = vec_mergeh (a, e); \ 60 B1 = vec_mergel (a, e); \ 61 C1 = vec_mergeh (b, f); \ 62 D1 = vec_mergel (b, f); \ 63 E1 = vec_mergeh (c, g); \ 64 F1 = vec_mergel (c, g); \ 65 G1 = vec_mergeh (d, h); \ 66 H1 = vec_mergel (d, h); \ 68 A2 = vec_mergeh (A1, E1); \ 69 B2 = vec_mergel (A1, E1); \ 70 C2 = vec_mergeh (B1, F1); \ 71 D2 = vec_mergel (B1, F1); \ 72 E2 = vec_mergeh (C1, G1); \ 73 F2 = vec_mergel (C1, G1); \ 74 G2 = vec_mergeh (D1, H1); \ 75 H2 = vec_mergel (D1, H1); \ 77 a = vec_mergeh (A2, E2); \ 78 b = vec_mergel (A2, E2); \ 79 c = vec_mergeh (B2, F2); \ 80 d = vec_mergel (B2, F2); \ 81 e = vec_mergeh (C2, G2); \ 82 f = vec_mergel (C2, G2); \ 83 g = vec_mergeh (D2, H2); \ 84 h = vec_mergel (D2, H2); \ 90 static inline vector
unsigned char unaligned_load(
int offset,
uint8_t *src)
92 register vector
unsigned char first = vec_ld(offset, src);
93 register vector
unsigned char second = vec_ld(offset+15, src);
94 register vector
unsigned char mask = vec_lvsl(offset, src);
95 return vec_perm(first, second, mask);
104 vec_u8 a = vec_ld(offset, src);
105 vec_u8 b = vec_ld(offset+15, src);
106 return vec_perm(a, b, perm_vec);
109 #define vec_unaligned_load(b) \ 110 vec_perm(vec_ld(0, b), vec_ld(15, b), vec_lvsl(0, b));
static const uint16_t mask[17]