40 int td = av_clip(poc1 - poc0, -128, 127);
44 int tb = av_clip(poc - poc0, -128, 127);
45 int tx = (16384 + (
FFABS(td) >> 1)) / td;
46 return av_clip((tb * tx + 32) >> 6, -1024, 1023);
58 for (field = 0; field < 2; field++) {
71 int field,
int colfield,
int mbafi)
74 int j, old_ref, rfield;
75 int start = mbafi ? 16 : 0;
80 memset(map[list], 0,
sizeof(map[list]));
82 for (rfield = 0; rfield < 2; rfield++) {
83 for (old_ref = 0; old_ref < ref1->
ref_count[colfield][list]; old_ref++) {
84 int poc = ref1->
ref_poc[colfield][list][old_ref];
89 else if (interl && (poc & 3) == 3)
90 poc = (poc & ~3) + rfield + 1;
92 for (j = start; j < end; j++) {
95 int cur_ref = mbafi ? (j - 16) ^ field : j;
97 map[list][2 * old_ref + (rfield ^ field) + 16] = cur_ref;
98 if (rfield == field || !interl)
99 map[list][old_ref] = cur_ref;
113 int ref1sidx = (ref1->
reference & 1) ^ 1;
115 for (list = 0; list < 2; list++) {
134 FFABS(col_poc[1] - cur_poc));
146 for (list = 0; list < 2; list++) {
149 for (field = 0; field < 2; field++)
160 int ref_height = 16 * h->
mb_height >> ref_field_picture;
169 FFMIN(16 * mb_y >> ref_field_picture,
171 ref_field_picture && ref_field);
180 const int16_t (*l1mv0)[2], (*l1mv1)[2];
181 const int8_t *l1ref0, *l1ref1;
182 const int is_b8x8 =
IS_8X8(*mb_type);
194 #define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16 | MB_TYPE_INTRA4x4 | \
195 MB_TYPE_INTRA16x16 | MB_TYPE_INTRA_PCM)
198 for (list = 0; list < 2; list++) {
207 ref[list] =
FFMIN3((
unsigned)left_ref,
210 if (ref[list] >= 0) {
216 int match_count = (left_ref == ref[list]) +
217 (top_ref == ref[list]) +
220 if (match_count > 1) {
224 assert(match_count == 1);
225 if (left_ref == ref[list])
227 else if (top_ref == ref[list])
241 if (ref[0] < 0 && ref[1] < 0) {
248 if (!(is_b8x8 | mv[0] | mv[1])) {
300 }
else if (!is_b8x8 &&
325 l1mv0 += 2 * b4_stride;
326 l1mv1 += 2 * b4_stride;
332 for (i8 = 0; i8 < 4; i8++) {
335 int xy8 = x8 + y8 * b8_stride;
336 int xy4 = x8 * 3 + y8 * b4_stride;
348 ((l1ref0[xy8] == 0 &&
349 FFABS(l1mv0[xy4][0]) <= 1 &&
350 FFABS(l1mv0[xy4][1]) <= 1) ||
353 FFABS(l1mv1[xy4][0]) <= 1 &&
354 FFABS(l1mv1[xy4][1]) <= 1))) {
369 if (!is_b8x8 && !(n & 3))
380 FFABS(l1mv0[0][0]) <= 1 &&
381 FFABS(l1mv0[0][1]) <= 1) ||
382 (l1ref0[0] < 0 && !l1ref1[0] &&
383 FFABS(l1mv1[0][0]) <= 1 &&
384 FFABS(l1mv1[0][1]) <= 1 &&
399 for (i8 = 0; i8 < 4; i8++) {
400 const int x8 = i8 & 1;
401 const int y8 = i8 >> 1;
414 assert(b8_stride == 2);
421 const int16_t (*l1mv)[2] = l1ref0[i8] == 0 ? l1mv0 : l1mv1;
423 const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride];
424 if (
FFABS(mv_col[0]) <= 1 &&
FFABS(mv_col[1]) <= 1) {
435 for (i4 = 0; i4 < 4; i4++) {
436 const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) +
437 (y8 * 2 + (i4 >> 1)) * b4_stride];
438 if (
FFABS(mv_col[0]) <= 1 &&
FFABS(mv_col[1]) <= 1) {
452 if (!is_b8x8 && !(n & 15))
465 const int16_t (*l1mv0)[2], (*l1mv1)[2];
466 const int8_t *l1ref0, *l1ref1;
467 const int is_b8x8 =
IS_8X8(*mb_type);
522 }
else if (!is_b8x8 &&
548 l1mv0 += 2 * b4_stride;
549 l1mv1 += 2 * b4_stride;
564 ref_offset = (h->
ref_list[1][0].
mbaff << 4) & (mb_type_col[0] >> 3);
570 for (i8 = 0; i8 < 4; i8++) {
571 const int x8 = i8 & 1;
572 const int y8 = i8 >> 1;
574 const int16_t (*l1mv)[2] = l1mv0;
588 ref0 = l1ref0[x8 + y8 * b8_stride];
590 ref0 = map_col_to_list0[0][ref0 + ref_offset];
592 ref0 = map_col_to_list0[1][l1ref1[x8 + y8 * b8_stride] +
596 scale = dist_scale_factor[ref0];
601 const int16_t *mv_col = l1mv[x8 * 3 + y8 * b4_stride];
602 int my_col = (mv_col[1] << y_shift) / 2;
603 int mx = (scale * mv_col[0] + 128) >> 8;
604 int my = (scale * my_col + 128) >> 8;
623 const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset]
624 : map_col_to_list0[1][l1ref1[0] + ref_offset];
625 const int scale = dist_scale_factor[ref0];
626 const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
628 mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
629 mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
632 mv1 =
pack16to32(mv_l0[0] - mv_col[0], mv_l0[1] - mv_col[1]);
638 for (i8 = 0; i8 < 4; i8++) {
639 const int x8 = i8 & 1;
640 const int y8 = i8 >> 1;
642 const int16_t (*l1mv)[2] = l1mv0;
655 assert(b8_stride == 2);
658 ref0 = map_col_to_list0[0][ref0 + ref_offset];
660 ref0 = map_col_to_list0[1][l1ref1[i8] + ref_offset];
663 scale = dist_scale_factor[ref0];
668 const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride];
669 int mx = (scale * mv_col[0] + 128) >> 8;
670 int my = (scale * mv_col[1] + 128) >> 8;
674 pack16to32(mx - mv_col[0], my - mv_col[1]), 4);
676 for (i4 = 0; i4 < 4; i4++) {
677 const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) +
678 (y8 * 2 + (i4 >> 1)) * b4_stride];
680 mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
681 mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
684 mv_l0[1] - mv_col[1]));
int long_ref
1->long term reference 0->short term reference
void ff_h264_direct_dist_scale_factor(H264Context *const h)
int16_t(*[2] motion_val)[2]
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
static av_always_inline uint32_t pack16to32(int a, int b)
void ff_h264_pred_direct_motion(H264Context *const h, int *mb_type)
static void await_reference_mb_row(H264Context *const h, H264Picture *ref, int mb_y)
int field_picture
whether or not picture was encoded in separate fields
Multithreading support functions.
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
#define PICT_BOTTOM_FIELD
static void pred_temp_direct_motion(H264Context *const h, int *mb_type)
H.264 / AVC / MPEG4 part10 codec.
void ff_h264_direct_ref_list_init(H264Context *const h)
static const uint16_t mask[17]
int active_thread_type
Which multithreading methods are in use by the codec.
int direct_spatial_mv_pred
int ref_poc[2][2][32]
POCs of the frames used as reference (FIXME need per slice)
int frame_num
frame_num (raw frame_num from slice header)
Libavcodec external API header.
int map_col_to_list0[2][16+32]
#define MB_TYPE_16x16_OR_INTRA
static const uint8_t scan8[16 *3+3]
useful rectangle filling function
#define MB_TYPE_INTERLACED
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
int dist_scale_factor[32]
H264Picture ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
int direct_8x8_inference_flag
static int get_scale_factor(H264Context *const h, int poc, int poc1, int i)
#define PART_NOT_AVAILABLE
if(ac->has_optimized_func)
static const int8_t mv[256][2]
static void pred_spatial_direct_motion(H264Context *const h, int *mb_type)
int field_poc[2]
top/bottom POC
#define FF_THREAD_FRAME
Decode more than one frame at once.
H264Picture * cur_pic_ptr
int mbaff
1 -> MBAFF frame 0-> not MBAFF
int dist_scale_factor_field[2][32]
common internal api header.
int ref_count[2][2]
number of entries in ref_poc (FIXME need per slice)
int map_col_to_list0_field[2][2][16+32]
static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, int colfield, int mbafi)
int8_t ref_cache[2][5 *8]