27 #include "libavutil/common.h"
28 #include "libavutil/intreadwrite.h"
36 #define RV40_LOWPASS(OPNAME, OP) \
37 static av_unused void OPNAME ## rv40_qpel8_h_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\
38 const int h, const int C1, const int C2, const int SHIFT){\
39 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
41 for(i = 0; i < h; i++)\
43 OP(dst[0], (src[-2] + src[ 3] - 5*(src[-1]+src[2]) + src[0]*C1 + src[1]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
44 OP(dst[1], (src[-1] + src[ 4] - 5*(src[ 0]+src[3]) + src[1]*C1 + src[2]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
45 OP(dst[2], (src[ 0] + src[ 5] - 5*(src[ 1]+src[4]) + src[2]*C1 + src[3]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
46 OP(dst[3], (src[ 1] + src[ 6] - 5*(src[ 2]+src[5]) + src[3]*C1 + src[4]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
47 OP(dst[4], (src[ 2] + src[ 7] - 5*(src[ 3]+src[6]) + src[4]*C1 + src[5]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
48 OP(dst[5], (src[ 3] + src[ 8] - 5*(src[ 4]+src[7]) + src[5]*C1 + src[6]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
49 OP(dst[6], (src[ 4] + src[ 9] - 5*(src[ 5]+src[8]) + src[6]*C1 + src[7]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
50 OP(dst[7], (src[ 5] + src[10] - 5*(src[ 6]+src[9]) + src[7]*C1 + src[8]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
56 static void OPNAME ## rv40_qpel8_v_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\
57 const int w, const int C1, const int C2, const int SHIFT){\
58 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
60 for(i = 0; i < w; i++)\
62 const int srcB = src[-2*srcStride];\
63 const int srcA = src[-1*srcStride];\
64 const int src0 = src[0 *srcStride];\
65 const int src1 = src[1 *srcStride];\
66 const int src2 = src[2 *srcStride];\
67 const int src3 = src[3 *srcStride];\
68 const int src4 = src[4 *srcStride];\
69 const int src5 = src[5 *srcStride];\
70 const int src6 = src[6 *srcStride];\
71 const int src7 = src[7 *srcStride];\
72 const int src8 = src[8 *srcStride];\
73 const int src9 = src[9 *srcStride];\
74 const int src10 = src[10*srcStride];\
75 OP(dst[0*dstStride], (srcB + src3 - 5*(srcA+src2) + src0*C1 + src1*C2 + (1<<(SHIFT-1))) >> SHIFT);\
76 OP(dst[1*dstStride], (srcA + src4 - 5*(src0+src3) + src1*C1 + src2*C2 + (1<<(SHIFT-1))) >> SHIFT);\
77 OP(dst[2*dstStride], (src0 + src5 - 5*(src1+src4) + src2*C1 + src3*C2 + (1<<(SHIFT-1))) >> SHIFT);\
78 OP(dst[3*dstStride], (src1 + src6 - 5*(src2+src5) + src3*C1 + src4*C2 + (1<<(SHIFT-1))) >> SHIFT);\
79 OP(dst[4*dstStride], (src2 + src7 - 5*(src3+src6) + src4*C1 + src5*C2 + (1<<(SHIFT-1))) >> SHIFT);\
80 OP(dst[5*dstStride], (src3 + src8 - 5*(src4+src7) + src5*C1 + src6*C2 + (1<<(SHIFT-1))) >> SHIFT);\
81 OP(dst[6*dstStride], (src4 + src9 - 5*(src5+src8) + src6*C1 + src7*C2 + (1<<(SHIFT-1))) >> SHIFT);\
82 OP(dst[7*dstStride], (src5 + src10 - 5*(src6+src9) + src7*C1 + src8*C2 + (1<<(SHIFT-1))) >> SHIFT);\
88 static void OPNAME ## rv40_qpel16_v_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\
89 const int w, const int C1, const int C2, const int SHIFT){\
90 OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\
91 OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\
94 OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, w-8, C1, C2, SHIFT);\
95 OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, w-8, C1, C2, SHIFT);\
98 static void OPNAME ## rv40_qpel16_h_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\
99 const int h, const int C1, const int C2, const int SHIFT){\
100 OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\
101 OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\
104 OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, h-8, C1, C2, SHIFT);\
105 OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, h-8, C1, C2, SHIFT);\
109 #define RV40_MC(OPNAME, SIZE) \
110 static void OPNAME ## rv40_qpel ## SIZE ## _mc10_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
112 OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\
115 static void OPNAME ## rv40_qpel ## SIZE ## _mc30_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
117 OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\
120 static void OPNAME ## rv40_qpel ## SIZE ## _mc01_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
122 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\
125 static void OPNAME ## rv40_qpel ## SIZE ## _mc11_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
127 uint8_t full[SIZE*(SIZE+5)];\
128 uint8_t * const full_mid = full + SIZE*2;\
129 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
130 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
133 static void OPNAME ## rv40_qpel ## SIZE ## _mc21_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
135 uint8_t full[SIZE*(SIZE+5)];\
136 uint8_t * const full_mid = full + SIZE*2;\
137 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
138 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
141 static void OPNAME ## rv40_qpel ## SIZE ## _mc31_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
143 uint8_t full[SIZE*(SIZE+5)];\
144 uint8_t * const full_mid = full + SIZE*2;\
145 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\
146 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
149 static void OPNAME ## rv40_qpel ## SIZE ## _mc12_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
151 uint8_t full[SIZE*(SIZE+5)];\
152 uint8_t * const full_mid = full + SIZE*2;\
153 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
154 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
157 static void OPNAME ## rv40_qpel ## SIZE ## _mc22_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
159 uint8_t full[SIZE*(SIZE+5)];\
160 uint8_t * const full_mid = full + SIZE*2;\
161 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
162 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
165 static void OPNAME ## rv40_qpel ## SIZE ## _mc32_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
167 uint8_t full[SIZE*(SIZE+5)];\
168 uint8_t * const full_mid = full + SIZE*2;\
169 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\
170 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
173 static void OPNAME ## rv40_qpel ## SIZE ## _mc03_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
175 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\
178 static void OPNAME ## rv40_qpel ## SIZE ## _mc13_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
180 uint8_t full[SIZE*(SIZE+5)];\
181 uint8_t * const full_mid = full + SIZE*2;\
182 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
183 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\
186 static void OPNAME ## rv40_qpel ## SIZE ## _mc23_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
188 uint8_t full[SIZE*(SIZE+5)];\
189 uint8_t * const full_mid = full + SIZE*2;\
190 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
191 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\
195 #define op_avg(a, b) a = (((a)+cm[b]+1)>>1)
196 #define op_put(a, b) a = cm[b]
209 #define PIXOP2(OPNAME, OP) \
210 static inline void OPNAME ## _pixels8_xy2_8_c(uint8_t *block, \
211 const uint8_t *pixels, \
212 ptrdiff_t line_size, \
218 for (j = 0; j < 2; j++) { \
220 const uint32_t a = AV_RN32(pixels); \
221 const uint32_t b = AV_RN32(pixels + 1); \
222 uint32_t l0 = (a & 0x03030303UL) + \
223 (b & 0x03030303UL) + \
225 uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + \
226 ((b & 0xFCFCFCFCUL) >> 2); \
229 pixels += line_size; \
230 for (i = 0; i < h; i += 2) { \
231 uint32_t a = AV_RN32(pixels); \
232 uint32_t b = AV_RN32(pixels + 1); \
233 l1 = (a & 0x03030303UL) + \
234 (b & 0x03030303UL); \
235 h1 = ((a & 0xFCFCFCFCUL) >> 2) + \
236 ((b & 0xFCFCFCFCUL) >> 2); \
237 OP(*((uint32_t *) block), \
238 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \
239 pixels += line_size; \
240 block += line_size; \
241 a = AV_RN32(pixels); \
242 b = AV_RN32(pixels + 1); \
243 l0 = (a & 0x03030303UL) + \
244 (b & 0x03030303UL) + \
246 h0 = ((a & 0xFCFCFCFCUL) >> 2) + \
247 ((b & 0xFCFCFCFCUL) >> 2); \
248 OP(*((uint32_t *) block), \
249 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \
250 pixels += line_size; \
251 block += line_size; \
253 pixels += 4 - line_size * (h + 1); \
254 block += 4 - line_size * h; \
258 CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_8_c, \
259 OPNAME ## _pixels8_xy2_8_c, \
262 #define op_avg(a, b) a = rnd_avg32(a, b)
263 #define op_put(a, b) a = b
271 put_pixels16_xy2_8_c(dst, src, stride, 16);
275 avg_pixels16_xy2_8_c(dst, src, stride, 16);
279 put_pixels8_xy2_8_c(dst, src, stride, 8);
283 avg_pixels8_xy2_8_c(dst, src, stride, 8);
293 #define RV40_CHROMA_MC(OPNAME, OP)\
294 static void OPNAME ## rv40_chroma_mc4_c(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y){\
295 const int A = (8-x) * (8-y);\
296 const int B = ( x) * (8-y);\
297 const int C = (8-x) * ( y);\
298 const int D = ( x) * ( y);\
300 int bias = rv40_bias[y>>1][x>>1];\
302 assert(x<8 && y<8 && x>=0 && y>=0);\
305 for(i = 0; i < h; i++){\
306 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\
307 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\
308 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\
309 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\
314 const int E = B + C;\
315 const int step = C ? stride : 1;\
316 for(i = 0; i < h; i++){\
317 OP(dst[0], (A*src[0] + E*src[step+0] + bias));\
318 OP(dst[1], (A*src[1] + E*src[step+1] + bias));\
319 OP(dst[2], (A*src[2] + E*src[step+2] + bias));\
320 OP(dst[3], (A*src[3] + E*src[step+3] + bias));\
327 static void OPNAME ## rv40_chroma_mc8_c(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y){\
328 const int A = (8-x) * (8-y);\
329 const int B = ( x) * (8-y);\
330 const int C = (8-x) * ( y);\
331 const int D = ( x) * ( y);\
333 int bias = rv40_bias[y>>1][x>>1];\
335 assert(x<8 && y<8 && x>=0 && y>=0);\
338 for(i = 0; i < h; i++){\
339 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\
340 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\
341 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\
342 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\
343 OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + bias));\
344 OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + bias));\
345 OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + bias));\
346 OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + bias));\
351 const int E = B + C;\
352 const int step = C ? stride : 1;\
353 for(i = 0; i < h; i++){\
354 OP(dst[0], (A*src[0] + E*src[step+0] + bias));\
355 OP(dst[1], (A*src[1] + E*src[step+1] + bias));\
356 OP(dst[2], (A*src[2] + E*src[step+2] + bias));\
357 OP(dst[3], (A*src[3] + E*src[step+3] + bias));\
358 OP(dst[4], (A*src[4] + E*src[step+4] + bias));\
359 OP(dst[5], (A*src[5] + E*src[step+5] + bias));\
360 OP(dst[6], (A*src[6] + E*src[step+6] + bias));\
361 OP(dst[7], (A*src[7] + E*src[step+7] + bias));\
368 #define op_avg(a, b) a = (((a)+((b)>>6)+1)>>1)
369 #define op_put(a, b) a = ((b)>>6)
374 #define RV40_WEIGHT_FUNC(size) \
375 static void rv40_weight_func_rnd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\
379 for (j = 0; j < size; j++) {\
380 for (i = 0; i < size; i++)\
381 dst[i] = (((w2 * src1[i]) >> 9) + ((w1 * src2[i]) >> 9) + 0x10) >> 5;\
387 static void rv40_weight_func_nornd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\
391 for (j = 0; j < size; j++) {\
392 for (i = 0; i < size; i++)\
393 dst[i] = (w2 * src1[i] + w1 * src2[i] + 0x10) >> 5;\
407 0x40, 0x50, 0x20, 0x60, 0x30, 0x50, 0x40, 0x30,
408 0x50, 0x40, 0x50, 0x30, 0x60, 0x20, 0x50, 0x40
415 0x40, 0x30, 0x60, 0x20, 0x50, 0x30, 0x30, 0x40,
416 0x40, 0x40, 0x50, 0x30, 0x20, 0x60, 0x30, 0x40
419 #define CLIP_SYMM(a, b) av_clip(a, -(b), b)
437 for (i = 0; i < 4; i++, src +=
stride) {
438 int diff_p1p0 = src[-2*
step] - src[-1*
step];
439 int diff_q1q0 = src[ 1*
step] - src[ 0*
step];
440 int diff_p1p2 = src[-2*
step] - src[-3*
step];
441 int diff_q1q2 = src[ 1*
step] - src[ 2*
step];
447 u = (alpha *
FFABS(t)) >> 7;
448 if (u > 3 - (filter_p1 && filter_q1))
452 if (filter_p1 && filter_q1)
455 diff =
CLIP_SYMM((t + 4) >> 3, lim_p0q0);
456 src[-1*
step] = cm[src[-1*
step] + diff];
457 src[ 0*
step] = cm[src[ 0*
step] - diff];
459 if (filter_p1 &&
FFABS(diff_p1p2) <= beta) {
460 t = (diff_p1p0 + diff_p1p2 - diff) >> 1;
464 if (filter_q1 &&
FFABS(diff_q1q2) <= beta) {
465 t = (diff_q1q0 + diff_q1q2 + diff) >> 1;
472 const int filter_p1,
const int filter_q1,
473 const int alpha,
const int beta,
474 const int lim_p0q0,
const int lim_q1,
478 alpha, beta, lim_p0q0, lim_q1, lim_p1);
482 const int filter_p1,
const int filter_q1,
483 const int alpha,
const int beta,
484 const int lim_p0q0,
const int lim_q1,
488 alpha, beta, lim_p0q0, lim_q1, lim_p1);
501 for(i = 0; i < 4; i++, src +=
stride){
502 int sflag, p0, q0, p1, q1;
508 sflag = (alpha *
FFABS(t)) >> 7;
512 p0 = (25*src[-3*
step] + 26*src[-2*
step] + 26*src[-1*
step] +
516 q0 = (25*src[-2*
step] + 26*src[-1*
step] + 26*src[ 0*
step] +
518 rv40_dither_r[dmode + i]) >> 7;
521 p0 = av_clip(p0, src[-1*step] - lims, src[-1*step] + lims);
522 q0 = av_clip(q0, src[ 0*step] - lims, src[ 0*step] + lims);
525 p1 = (25*src[-4*
step] + 26*src[-3*
step] + 26*src[-2*
step] + 26*p0 +
527 q1 = (25*src[-1*
step] + 26*q0 + 26*src[ 1*
step] + 26*src[ 2*
step] +
528 25*src[ 3*
step] + rv40_dither_r[dmode + i]) >> 7;
531 p1 = av_clip(p1, src[-2*step] - lims, src[-2*step] + lims);
532 q1 = av_clip(q1, src[ 1*step] - lims, src[ 1*step] + lims);
542 51*src[-3*
step] + 26*src[-4*
step] + 64) >> 7;
544 51*src[ 2*
step] + 26*src[ 3*
step] + 64) >> 7;
550 const int alpha,
const int lims,
551 const int dmode,
const int chroma)
557 const int alpha,
const int lims,
558 const int dmode,
const int chroma)
569 int sum_p1p0 = 0, sum_q1q0 = 0, sum_p1p2 = 0, sum_q1q2 = 0;
570 int strong0 = 0, strong1 = 0;
574 for (i = 0, ptr = src; i < 4; i++, ptr +=
stride) {
575 sum_p1p0 += ptr[-2*
step] - ptr[-1*
step];
576 sum_q1q0 += ptr[ 1*
step] - ptr[ 0*
step];
579 *p1 =
FFABS(sum_p1p0) < (beta << 2);
580 *q1 =
FFABS(sum_q1q0) < (beta << 2);
588 for (i = 0, ptr = src; i < 4; i++, ptr +=
stride) {
589 sum_p1p2 += ptr[-2*
step] - ptr[-3*
step];
590 sum_q1q2 += ptr[ 1*
step] - ptr[ 2*
step];
593 strong0 = *p1 && (
FFABS(sum_p1p2) < beta2);
594 strong1 = *q1 && (
FFABS(sum_q1q2) < beta2);
596 return strong0 && strong1;
600 int beta,
int beta2,
int edge,
607 int beta,
int beta2,
int edge,
qpel_mc_func put_pixels_tab[4][16]
static void rv40_h_weak_loop_filter(uint8_t *src, const ptrdiff_t stride, const int filter_p1, const int filter_q1, const int alpha, const int beta, const int lim_p0q0, const int lim_q1, const int lim_p1)
static void rv40_h_strong_loop_filter(uint8_t *src, const ptrdiff_t stride, const int alpha, const int lims, const int dmode, const int chroma)
rv40_loop_filter_strength_func rv40_loop_filter_strength[2]
av_cold void ff_rv40dsp_init_arm(RV34DSPContext *c)
static av_always_inline void rv40_strong_loop_filter(uint8_t *src, const int step, const ptrdiff_t stride, const int alpha, const int lims, const int dmode, const int chroma)
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
#define RV40_LOWPASS(OPNAME, OP)
static void put_rv40_qpel8_mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
rv40_weak_loop_filter_func rv40_weak_loop_filter[2]
static void avg_rv40_qpel16_mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
rv40_weight_func rv40_weight_pixels_tab[2][2]
Biweight functions, first dimension is transform size (16/8), second is whether the weight is prescal...
av_cold void ff_rv40dsp_init_aarch64(RV34DSPContext *c)
av_cold void ff_rv34dsp_init(RV34DSPContext *c)
static av_always_inline int rv40_loop_filter_strength(uint8_t *src, int step, ptrdiff_t stride, int beta, int beta2, int edge, int *p1, int *q1)
qpel_mc_func avg_h264_qpel_pixels_tab[4][16]
static void rv40_v_strong_loop_filter(uint8_t *src, const ptrdiff_t stride, const int alpha, const int lims, const int dmode, const int chroma)
qpel_mc_func avg_pixels_tab[4][16]
RV30/40 decoder motion compensation functions.
av_cold void ff_rv40dsp_init(RV34DSPContext *c)
#define RV40_MC(OPNAME, SIZE)
static av_always_inline void rv40_weak_loop_filter(uint8_t *src, const int step, const ptrdiff_t stride, const int filter_p1, const int filter_q1, const int alpha, const int beta, const int lim_p0q0, const int lim_q1, const int lim_p1)
weaker deblocking very similar to the one described in 4.4.2 of JVT-A003r1
Libavcodec external API header.
qpel_mc_func put_h264_qpel_pixels_tab[4][16]
static const uint8_t rv40_dither_r[16]
dither values for deblocking filter - right/bottom values
static void rv40_v_weak_loop_filter(uint8_t *src, const ptrdiff_t stride, const int filter_p1, const int filter_q1, const int alpha, const int beta, const int lim_p0q0, const int lim_q1, const int lim_p1)
static void avg_rv40_qpel8_mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
static void put_rv40_qpel16_mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
static const int rv40_bias[4][4]
#define RV40_WEIGHT_FUNC(size)
void ff_rv40dsp_init_x86(RV34DSPContext *c)
static const uint8_t rv40_dither_l[16]
dither values for deblocking filter - left/top values
static int rv40_h_loop_filter_strength(uint8_t *src, ptrdiff_t stride, int beta, int beta2, int edge, int *p1, int *q1)
rv40_strong_loop_filter_func rv40_strong_loop_filter[2]
static int rv40_v_loop_filter_strength(uint8_t *src, ptrdiff_t stride, int beta, int beta2, int edge, int *p1, int *q1)
#define PIXOP2(OPNAME, OP)
h264_chroma_mc_func avg_chroma_pixels_tab[3]
h264_chroma_mc_func put_chroma_pixels_tab[3]
#define RV40_CHROMA_MC(OPNAME, OP)