Libav
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The Libav Project
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "internal.h"
47 #include "avcodec.h"
48 #include "mpegutils.h"
49 #include "h264.h"
50 
51 #include "h264data.h" // FIXME FIXME FIXME
52 
53 #include "h264_mvpred.h"
54 #include "golomb.h"
55 #include "hpeldsp.h"
56 #include "rectangle.h"
57 #include "tpeldsp.h"
58 
59 #if CONFIG_ZLIB
60 #include <zlib.h>
61 #endif
62 
63 #include "svq1.h"
64 #include "svq3.h"
65 
71 typedef struct {
82  uint32_t watermark_key;
88 } SVQ3Context;
89 
90 #define FULLPEL_MODE 1
91 #define HALFPEL_MODE 2
92 #define THIRDPEL_MODE 3
93 #define PREDICT_MODE 4
94 
95 /* dual scan (from some older h264 draft)
96  * o-->o-->o o
97  * | /|
98  * o o o / o
99  * | / | |/ |
100  * o o o o
101  * /
102  * o-->o-->o-->o
103  */
104 static const uint8_t svq3_scan[16] = {
105  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
106  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
107  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
108  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
109 };
110 
111 static const uint8_t luma_dc_zigzag_scan[16] = {
112  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
113  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
114  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
115  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
116 };
117 
118 static const uint8_t svq3_pred_0[25][2] = {
119  { 0, 0 },
120  { 1, 0 }, { 0, 1 },
121  { 0, 2 }, { 1, 1 }, { 2, 0 },
122  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
123  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
124  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
125  { 2, 4 }, { 3, 3 }, { 4, 2 },
126  { 4, 3 }, { 3, 4 },
127  { 4, 4 }
128 };
129 
130 static const int8_t svq3_pred_1[6][6][5] = {
131  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
132  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
133  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
134  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
135  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
136  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
137  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
138  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
139  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
140  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
141  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
142  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
143 };
144 
145 static const struct {
148 } svq3_dct_tables[2][16] = {
149  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
150  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
151  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
152  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
153 };
154 
155 static const uint32_t svq3_dequant_coeff[32] = {
156  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
157  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
158  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
159  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
160 };
161 
162 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
163 {
164  const int qmul = svq3_dequant_coeff[qp];
165 #define stride 16
166  int i;
167  int temp[16];
168  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
169 
170  for (i = 0; i < 4; i++) {
171  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
172  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
173  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
174  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
175 
176  temp[4 * i + 0] = z0 + z3;
177  temp[4 * i + 1] = z1 + z2;
178  temp[4 * i + 2] = z1 - z2;
179  temp[4 * i + 3] = z0 - z3;
180  }
181 
182  for (i = 0; i < 4; i++) {
183  const int offset = x_offset[i];
184  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
185  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
186  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
187  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
188 
189  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
190  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
191  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
192  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
193  }
194 }
195 #undef stride
196 
197 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
198  int stride, int qp, int dc)
199 {
200  const int qmul = svq3_dequant_coeff[qp];
201  int i;
202 
203  if (dc) {
204  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
205  : qmul * (block[0] >> 3) / 2);
206  block[0] = 0;
207  }
208 
209  for (i = 0; i < 4; i++) {
210  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
211  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
212  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
213  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
214 
215  block[0 + 4 * i] = z0 + z3;
216  block[1 + 4 * i] = z1 + z2;
217  block[2 + 4 * i] = z1 - z2;
218  block[3 + 4 * i] = z0 - z3;
219  }
220 
221  for (i = 0; i < 4; i++) {
222  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
223  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
224  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
225  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
226  const int rr = (dc + 0x80000);
227 
228  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
229  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
230  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
231  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
232  }
233 
234  memset(block, 0, 16 * sizeof(int16_t));
235 }
236 
237 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
238  int index, const int type)
239 {
240  static const uint8_t *const scan_patterns[4] =
242 
243  int run, level, limit;
244  unsigned vlc;
245  const int intra = 3 * type >> 2;
246  const uint8_t *const scan = scan_patterns[type];
247 
248  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
249  for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
250  int sign = (vlc & 1) ? 0 : -1;
251  vlc = vlc + 1 >> 1;
252 
253  if (type == 3) {
254  if (vlc < 3) {
255  run = 0;
256  level = vlc;
257  } else if (vlc < 4) {
258  run = 1;
259  level = 1;
260  } else {
261  run = vlc & 0x3;
262  level = (vlc + 9 >> 2) - run;
263  }
264  } else {
265  if (vlc < 16) {
266  run = svq3_dct_tables[intra][vlc].run;
267  level = svq3_dct_tables[intra][vlc].level;
268  } else if (intra) {
269  run = vlc & 0x7;
270  level = (vlc >> 3) +
271  ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
272  } else {
273  run = vlc & 0xF;
274  level = (vlc >> 4) +
275  ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
276  }
277  }
278 
279  if ((index += run) >= limit)
280  return -1;
281 
282  block[scan[index]] = (level ^ sign) - sign;
283  }
284 
285  if (type != 2) {
286  break;
287  }
288  }
289 
290  return 0;
291 }
292 
293 static inline void svq3_mc_dir_part(SVQ3Context *s,
294  int x, int y, int width, int height,
295  int mx, int my, int dxy,
296  int thirdpel, int dir, int avg)
297 {
298  H264Context *h = &s->h;
299  const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
300  uint8_t *src, *dest;
301  int i, emu = 0;
302  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
303 
304  mx += x;
305  my += y;
306 
307  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
308  my < 0 || my >= s->v_edge_pos - height - 1) {
309  emu = 1;
310  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
311  my = av_clip(my, -16, s->v_edge_pos - height + 15);
312  }
313 
314  /* form component predictions */
315  dest = h->cur_pic.f.data[0] + x + y * h->linesize;
316  src = pic->f.data[0] + mx + my * h->linesize;
317 
318  if (emu) {
320  h->linesize, h->linesize,
321  width + 1, height + 1,
322  mx, my, s->h_edge_pos, s->v_edge_pos);
323  src = h->edge_emu_buffer;
324  }
325  if (thirdpel)
326  (avg ? s->tdsp.avg_tpel_pixels_tab
327  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
328  width, height);
329  else
330  (avg ? s->hdsp.avg_pixels_tab
331  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
332  height);
333 
334  if (!(h->flags & CODEC_FLAG_GRAY)) {
335  mx = mx + (mx < (int) x) >> 1;
336  my = my + (my < (int) y) >> 1;
337  width = width >> 1;
338  height = height >> 1;
339  blocksize++;
340 
341  for (i = 1; i < 3; i++) {
342  dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
343  src = pic->f.data[i] + mx + my * h->uvlinesize;
344 
345  if (emu) {
347  h->uvlinesize, h->uvlinesize,
348  width + 1, height + 1,
349  mx, my, (s->h_edge_pos >> 1),
350  s->v_edge_pos >> 1);
351  src = h->edge_emu_buffer;
352  }
353  if (thirdpel)
354  (avg ? s->tdsp.avg_tpel_pixels_tab
355  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
356  h->uvlinesize,
357  width, height);
358  else
359  (avg ? s->hdsp.avg_pixels_tab
360  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
361  h->uvlinesize,
362  height);
363  }
364  }
365 }
366 
367 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
368  int dir, int avg)
369 {
370  int i, j, k, mx, my, dx, dy, x, y;
371  H264Context *h = &s->h;
372  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
373  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
374  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
375  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
376  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
377 
378  for (i = 0; i < 16; i += part_height)
379  for (j = 0; j < 16; j += part_width) {
380  const int b_xy = (4 * h->mb_x + (j >> 2)) +
381  (4 * h->mb_y + (i >> 2)) * h->b_stride;
382  int dxy;
383  x = 16 * h->mb_x + j;
384  y = 16 * h->mb_y + i;
385  k = (j >> 2 & 1) + (i >> 1 & 2) +
386  (j >> 1 & 4) + (i & 8);
387 
388  if (mode != PREDICT_MODE) {
389  pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
390  } else {
391  mx = s->next_pic->motion_val[0][b_xy][0] << 1;
392  my = s->next_pic->motion_val[0][b_xy][1] << 1;
393 
394  if (dir == 0) {
395  mx = mx * h->frame_num_offset /
396  h->prev_frame_num_offset + 1 >> 1;
397  my = my * h->frame_num_offset /
398  h->prev_frame_num_offset + 1 >> 1;
399  } else {
400  mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
401  h->prev_frame_num_offset + 1 >> 1;
402  my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
403  h->prev_frame_num_offset + 1 >> 1;
404  }
405  }
406 
407  /* clip motion vector prediction to frame border */
408  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
409  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
410 
411  /* get (optional) motion vector differential */
412  if (mode == PREDICT_MODE) {
413  dx = dy = 0;
414  } else {
415  dy = svq3_get_se_golomb(&h->gb);
416  dx = svq3_get_se_golomb(&h->gb);
417 
418  if (dx == INVALID_VLC || dy == INVALID_VLC) {
419  av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
420  return -1;
421  }
422  }
423 
424  /* compute motion vector */
425  if (mode == THIRDPEL_MODE) {
426  int fx, fy;
427  mx = (mx + 1 >> 1) + dx;
428  my = (my + 1 >> 1) + dy;
429  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
430  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
431  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
432 
433  svq3_mc_dir_part(s, x, y, part_width, part_height,
434  fx, fy, dxy, 1, dir, avg);
435  mx += mx;
436  my += my;
437  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
438  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
439  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
440  dxy = (mx & 1) + 2 * (my & 1);
441 
442  svq3_mc_dir_part(s, x, y, part_width, part_height,
443  mx >> 1, my >> 1, dxy, 0, dir, avg);
444  mx *= 3;
445  my *= 3;
446  } else {
447  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
448  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
449 
450  svq3_mc_dir_part(s, x, y, part_width, part_height,
451  mx, my, 0, 0, dir, avg);
452  mx *= 6;
453  my *= 6;
454  }
455 
456  /* update mv_cache */
457  if (mode != PREDICT_MODE) {
458  int32_t mv = pack16to32(mx, my);
459 
460  if (part_height == 8 && i < 8) {
461  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
462 
463  if (part_width == 8 && j < 8)
464  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
465  }
466  if (part_width == 8 && j < 8)
467  AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
468  if (part_width == 4 || part_height == 4)
469  AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
470  }
471 
472  /* write back motion vectors */
473  fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
474  part_width >> 2, part_height >> 2, h->b_stride,
475  pack16to32(mx, my), 4);
476  }
477 
478  return 0;
479 }
480 
481 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
482 {
483  H264Context *h = &s->h;
484  int i, j, k, m, dir, mode;
485  int cbp = 0;
486  uint32_t vlc;
487  int8_t *top, *left;
488  const int mb_xy = h->mb_xy;
489  const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
490 
491  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
492  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
493  h->topright_samples_available = 0xFFFF;
494 
495  if (mb_type == 0) { /* SKIP */
496  if (h->pict_type == AV_PICTURE_TYPE_P ||
497  s->next_pic->mb_type[mb_xy] == -1) {
498  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
499  0, 0, 0, 0, 0, 0);
500 
501  if (h->pict_type == AV_PICTURE_TYPE_B)
502  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
503  0, 0, 0, 0, 1, 1);
504 
505  mb_type = MB_TYPE_SKIP;
506  } else {
507  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
508  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
509  return -1;
510  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
511  return -1;
512 
513  mb_type = MB_TYPE_16x16;
514  }
515  } else if (mb_type < 8) { /* INTER */
516  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
517  mode = THIRDPEL_MODE;
518  else if (s->halfpel_flag &&
519  s->thirdpel_flag == !get_bits1(&h->gb))
520  mode = HALFPEL_MODE;
521  else
522  mode = FULLPEL_MODE;
523 
524  /* fill caches */
525  /* note ref_cache should contain here:
526  * ????????
527  * ???11111
528  * N??11111
529  * N??11111
530  * N??11111
531  */
532 
533  for (m = 0; m < 2; m++) {
534  if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
535  for (i = 0; i < 4; i++)
536  AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
537  h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
538  } else {
539  for (i = 0; i < 4; i++)
540  AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
541  }
542  if (h->mb_y > 0) {
543  memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
544  h->cur_pic.motion_val[m][b_xy - h->b_stride],
545  4 * 2 * sizeof(int16_t));
546  memset(&h->ref_cache[m][scan8[0] - 1 * 8],
547  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
548 
549  if (h->mb_x < h->mb_width - 1) {
550  AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
551  h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
552  h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
553  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
554  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
555  } else
556  h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
557  if (h->mb_x > 0) {
558  AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
559  h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
560  h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
561  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
562  } else
563  h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
564  } else
565  memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
566  PART_NOT_AVAILABLE, 8);
567 
568  if (h->pict_type != AV_PICTURE_TYPE_B)
569  break;
570  }
571 
572  /* decode motion vector(s) and form prediction(s) */
573  if (h->pict_type == AV_PICTURE_TYPE_P) {
574  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
575  return -1;
576  } else { /* AV_PICTURE_TYPE_B */
577  if (mb_type != 2) {
578  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
579  return -1;
580  } else {
581  for (i = 0; i < 4; i++)
582  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
583  0, 4 * 2 * sizeof(int16_t));
584  }
585  if (mb_type != 1) {
586  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
587  return -1;
588  } else {
589  for (i = 0; i < 4; i++)
590  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
591  0, 4 * 2 * sizeof(int16_t));
592  }
593  }
594 
595  mb_type = MB_TYPE_16x16;
596  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
597  memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
598 
599  if (mb_type == 8) {
600  if (h->mb_x > 0) {
601  for (i = 0; i < 4; i++)
602  h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
603  if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
604  h->left_samples_available = 0x5F5F;
605  }
606  if (h->mb_y > 0) {
607  h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
608  h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
609  h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
610  h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
611 
612  if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
613  h->top_samples_available = 0x33FF;
614  }
615 
616  /* decode prediction codes for luma blocks */
617  for (i = 0; i < 16; i += 2) {
618  vlc = svq3_get_ue_golomb(&h->gb);
619 
620  if (vlc >= 25) {
622  "luma prediction:%"PRIu32"\n", vlc);
623  return -1;
624  }
625 
626  left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
627  top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
628 
629  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
630  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
631 
632  if (left[1] == -1 || left[2] == -1) {
633  av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
634  return -1;
635  }
636  }
637  } else { /* mb_type == 33, DC_128_PRED block type */
638  for (i = 0; i < 4; i++)
639  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
640  }
641 
643 
644  if (mb_type == 8) {
646 
647  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
648  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
649  } else {
650  for (i = 0; i < 4; i++)
651  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
652 
653  h->top_samples_available = 0x33FF;
654  h->left_samples_available = 0x5F5F;
655  }
656 
657  mb_type = MB_TYPE_INTRA4x4;
658  } else { /* INTRA16x16 */
659  dir = i_mb_type_info[mb_type - 8].pred_mode;
660  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
661 
662  if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
663  av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
664  return h->intra16x16_pred_mode;
665  }
666 
667  cbp = i_mb_type_info[mb_type - 8].cbp;
668  mb_type = MB_TYPE_INTRA16x16;
669  }
670 
671  if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
672  for (i = 0; i < 4; i++)
673  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
674  0, 4 * 2 * sizeof(int16_t));
675  if (h->pict_type == AV_PICTURE_TYPE_B) {
676  for (i = 0; i < 4; i++)
677  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
678  0, 4 * 2 * sizeof(int16_t));
679  }
680  }
681  if (!IS_INTRA4x4(mb_type)) {
682  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
683  }
684  if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
685  memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
686  }
687 
688  if (!IS_INTRA16x16(mb_type) &&
689  (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
690  if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
691  av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
692  return -1;
693  }
694 
695  cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
696  : golomb_to_inter_cbp[vlc];
697  }
698  if (IS_INTRA16x16(mb_type) ||
699  (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
700  h->qscale += svq3_get_se_golomb(&h->gb);
701 
702  if (h->qscale > 31u) {
703  av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
704  return -1;
705  }
706  }
707  if (IS_INTRA16x16(mb_type)) {
708  AV_ZERO128(h->mb_luma_dc[0] + 0);
709  AV_ZERO128(h->mb_luma_dc[0] + 8);
710  if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
712  "error while decoding intra luma dc\n");
713  return -1;
714  }
715  }
716 
717  if (cbp) {
718  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
719  const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
720 
721  for (i = 0; i < 4; i++)
722  if ((cbp & (1 << i))) {
723  for (j = 0; j < 4; j++) {
724  k = index ? (1 * (j & 1) + 2 * (i & 1) +
725  2 * (j & 2) + 4 * (i & 2))
726  : (4 * i + j);
727  h->non_zero_count_cache[scan8[k]] = 1;
728 
729  if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
731  "error while decoding block\n");
732  return -1;
733  }
734  }
735  }
736 
737  if ((cbp & 0x30)) {
738  for (i = 1; i < 3; ++i)
739  if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
741  "error while decoding chroma dc block\n");
742  return -1;
743  }
744 
745  if ((cbp & 0x20)) {
746  for (i = 1; i < 3; i++) {
747  for (j = 0; j < 4; j++) {
748  k = 16 * i + j;
749  h->non_zero_count_cache[scan8[k]] = 1;
750 
751  if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
753  "error while decoding chroma ac block\n");
754  return -1;
755  }
756  }
757  }
758  }
759  }
760  }
761 
762  h->cbp = cbp;
763  h->cur_pic.mb_type[mb_xy] = mb_type;
764 
765  if (IS_INTRA(mb_type))
767 
768  return 0;
769 }
770 
772 {
773  SVQ3Context *s = avctx->priv_data;
774  H264Context *h = &s->h;
775  const int mb_xy = h->mb_xy;
776  int i, header;
777  unsigned slice_id;
778 
779  header = get_bits(&h->gb, 8);
780 
781  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
782  /* TODO: what? */
783  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
784  return -1;
785  } else {
786  int length = header >> 5 & 3;
787 
789  8 * show_bits(&h->gb, 8 * length) +
790  8 * length;
791 
792  if (s->next_slice_index > h->gb.size_in_bits) {
793  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
794  return -1;
795  }
796 
797  h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
798  skip_bits(&h->gb, 8);
799 
800  if (s->watermark_key) {
801  uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
802  AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
803  header ^ s->watermark_key);
804  }
805  if (length > 0) {
806  memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
807  &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
808  }
809  skip_bits_long(&h->gb, 0);
810  }
811 
812  if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
813  av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
814  return -1;
815  }
816 
817  h->slice_type = golomb_to_pict_type[slice_id];
818 
819  if ((header & 0x9F) == 2) {
820  i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
821  h->mb_skip_run = get_bits(&h->gb, i) -
822  (h->mb_y * h->mb_width + h->mb_x);
823  } else {
824  skip_bits1(&h->gb);
825  h->mb_skip_run = 0;
826  }
827 
828  h->slice_num = get_bits(&h->gb, 8);
829  h->qscale = get_bits(&h->gb, 5);
830  s->adaptive_quant = get_bits1(&h->gb);
831 
832  /* unknown fields */
833  skip_bits1(&h->gb);
834 
835  if (s->unknown_flag)
836  skip_bits1(&h->gb);
837 
838  skip_bits1(&h->gb);
839  skip_bits(&h->gb, 2);
840 
841  while (get_bits1(&h->gb))
842  skip_bits(&h->gb, 8);
843 
844  /* reset intra predictors and invalidate motion vector references */
845  if (h->mb_x > 0) {
846  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
847  -1, 4 * sizeof(int8_t));
848  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
849  -1, 8 * sizeof(int8_t) * h->mb_x);
850  }
851  if (h->mb_y > 0) {
852  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
853  -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
854 
855  if (h->mb_x > 0)
856  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
857  }
858 
859  return 0;
860 }
861 
863 {
864  SVQ3Context *s = avctx->priv_data;
865  H264Context *h = &s->h;
866  int m;
867  unsigned char *extradata;
868  unsigned char *extradata_end;
869  unsigned int size;
870  int marker_found = 0;
871 
872  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
873  s->last_pic = av_mallocz(sizeof(*s->last_pic));
874  s->next_pic = av_mallocz(sizeof(*s->next_pic));
875  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
876  av_freep(&s->cur_pic);
877  av_freep(&s->last_pic);
878  av_freep(&s->next_pic);
879  return AVERROR(ENOMEM);
880  }
881 
882  if (ff_h264_decode_init(avctx) < 0)
883  return -1;
884 
885  ff_hpeldsp_init(&s->hdsp, avctx->flags);
886  ff_tpeldsp_init(&s->tdsp);
887 
888  h->flags = avctx->flags;
889  h->is_complex = 1;
891  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
892  avctx->color_range = AVCOL_RANGE_JPEG;
893 
894  h->chroma_qp[0] = h->chroma_qp[1] = 4;
895  h->chroma_x_shift = h->chroma_y_shift = 1;
896 
897  s->halfpel_flag = 1;
898  s->thirdpel_flag = 1;
899  s->unknown_flag = 0;
900 
901  /* prowl for the "SEQH" marker in the extradata */
902  extradata = (unsigned char *)avctx->extradata;
903  extradata_end = avctx->extradata + avctx->extradata_size;
904  if (extradata) {
905  for (m = 0; m + 8 < avctx->extradata_size; m++) {
906  if (!memcmp(extradata, "SEQH", 4)) {
907  marker_found = 1;
908  break;
909  }
910  extradata++;
911  }
912  }
913 
914  /* if a match was found, parse the extra data */
915  if (marker_found) {
916  GetBitContext gb;
917  int frame_size_code;
918 
919  size = AV_RB32(&extradata[4]);
920  if (size > extradata_end - extradata - 8)
921  return AVERROR_INVALIDDATA;
922  init_get_bits(&gb, extradata + 8, size * 8);
923 
924  /* 'frame size code' and optional 'width, height' */
925  frame_size_code = get_bits(&gb, 3);
926  switch (frame_size_code) {
927  case 0:
928  avctx->width = 160;
929  avctx->height = 120;
930  break;
931  case 1:
932  avctx->width = 128;
933  avctx->height = 96;
934  break;
935  case 2:
936  avctx->width = 176;
937  avctx->height = 144;
938  break;
939  case 3:
940  avctx->width = 352;
941  avctx->height = 288;
942  break;
943  case 4:
944  avctx->width = 704;
945  avctx->height = 576;
946  break;
947  case 5:
948  avctx->width = 240;
949  avctx->height = 180;
950  break;
951  case 6:
952  avctx->width = 320;
953  avctx->height = 240;
954  break;
955  case 7:
956  avctx->width = get_bits(&gb, 12);
957  avctx->height = get_bits(&gb, 12);
958  break;
959  }
960 
961  s->halfpel_flag = get_bits1(&gb);
962  s->thirdpel_flag = get_bits1(&gb);
963 
964  /* unknown fields */
965  skip_bits1(&gb);
966  skip_bits1(&gb);
967  skip_bits1(&gb);
968  skip_bits1(&gb);
969 
970  h->low_delay = get_bits1(&gb);
971 
972  /* unknown field */
973  skip_bits1(&gb);
974 
975  while (get_bits1(&gb))
976  skip_bits(&gb, 8);
977 
978  s->unknown_flag = get_bits1(&gb);
979  avctx->has_b_frames = !h->low_delay;
980  if (s->unknown_flag) {
981 #if CONFIG_ZLIB
982  unsigned watermark_width = svq3_get_ue_golomb(&gb);
983  unsigned watermark_height = svq3_get_ue_golomb(&gb);
984  int u1 = svq3_get_ue_golomb(&gb);
985  int u2 = get_bits(&gb, 8);
986  int u3 = get_bits(&gb, 2);
987  int u4 = svq3_get_ue_golomb(&gb);
988  unsigned long buf_len = watermark_width *
989  watermark_height * 4;
990  int offset = get_bits_count(&gb) + 7 >> 3;
991  uint8_t *buf;
992 
993  if (watermark_height > 0 &&
994  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
995  return -1;
996 
997  buf = av_malloc(buf_len);
998  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
999  watermark_width, watermark_height);
1000  av_log(avctx, AV_LOG_DEBUG,
1001  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1002  u1, u2, u3, u4, offset);
1003  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1004  size - offset) != Z_OK) {
1005  av_log(avctx, AV_LOG_ERROR,
1006  "could not uncompress watermark logo\n");
1007  av_free(buf);
1008  return -1;
1009  }
1010  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1011  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1012  av_log(avctx, AV_LOG_DEBUG,
1013  "watermark key %#"PRIx32"\n", s->watermark_key);
1014  av_free(buf);
1015 #else
1016  av_log(avctx, AV_LOG_ERROR,
1017  "this svq3 file contains watermark which need zlib support compiled in\n");
1018  return -1;
1019 #endif
1020  }
1021  }
1022 
1023  h->width = avctx->width;
1024  h->height = avctx->height;
1025  h->mb_width = (h->width + 15) / 16;
1026  h->mb_height = (h->height + 15) / 16;
1027  h->mb_stride = h->mb_width + 1;
1028  h->mb_num = h->mb_width * h->mb_height;
1029  h->b_stride = 4 * h->mb_width;
1030  s->h_edge_pos = h->mb_width * 16;
1031  s->v_edge_pos = h->mb_height * 16;
1032 
1033  if (ff_h264_alloc_tables(h) < 0) {
1034  av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1035  return AVERROR(ENOMEM);
1036  }
1037 
1038  return 0;
1039 }
1040 
1041 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1042 {
1043  int i;
1044  for (i = 0; i < 2; i++) {
1045  av_buffer_unref(&pic->motion_val_buf[i]);
1046  av_buffer_unref(&pic->ref_index_buf[i]);
1047  }
1049 
1050  av_frame_unref(&pic->f);
1051 }
1052 
1053 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1054 {
1055  SVQ3Context *s = avctx->priv_data;
1056  H264Context *h = &s->h;
1057  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1058  const int mb_array_size = h->mb_stride * h->mb_height;
1059  const int b4_stride = h->mb_width * 4 + 1;
1060  const int b4_array_size = b4_stride * h->mb_height * 4;
1061  int ret;
1062 
1063  if (!pic->motion_val_buf[0]) {
1064  int i;
1065 
1066  pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1067  if (!pic->mb_type_buf)
1068  return AVERROR(ENOMEM);
1069  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1070 
1071  for (i = 0; i < 2; i++) {
1072  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1073  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1074  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1075  ret = AVERROR(ENOMEM);
1076  goto fail;
1077  }
1078 
1079  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1080  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1081  }
1082  }
1083  pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1084 
1085  ret = ff_get_buffer(avctx, &pic->f,
1086  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1087  if (ret < 0)
1088  goto fail;
1089 
1090  if (!h->edge_emu_buffer) {
1091  h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1092  if (!h->edge_emu_buffer)
1093  return AVERROR(ENOMEM);
1094  }
1095 
1096  h->linesize = pic->f.linesize[0];
1097  h->uvlinesize = pic->f.linesize[1];
1098 
1099  return 0;
1100 fail:
1101  free_picture(avctx, pic);
1102  return ret;
1103 }
1104 
1105 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1106  int *got_frame, AVPacket *avpkt)
1107 {
1108  const uint8_t *buf = avpkt->data;
1109  SVQ3Context *s = avctx->priv_data;
1110  H264Context *h = &s->h;
1111  int buf_size = avpkt->size;
1112  int ret, m, i;
1113 
1114  /* special case for last picture */
1115  if (buf_size == 0) {
1116  if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1117  ret = av_frame_ref(data, &s->next_pic->f);
1118  if (ret < 0)
1119  return ret;
1120  s->last_frame_output = 1;
1121  *got_frame = 1;
1122  }
1123  return 0;
1124  }
1125 
1126  init_get_bits(&h->gb, buf, 8 * buf_size);
1127 
1128  h->mb_x = h->mb_y = h->mb_xy = 0;
1129 
1130  if (svq3_decode_slice_header(avctx))
1131  return -1;
1132 
1133  h->pict_type = h->slice_type;
1134 
1135  if (h->pict_type != AV_PICTURE_TYPE_B)
1136  FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1137 
1138  av_frame_unref(&s->cur_pic->f);
1139 
1140  /* for skipping the frame */
1141  s->cur_pic->f.pict_type = h->pict_type;
1143 
1144  ret = get_buffer(avctx, s->cur_pic);
1145  if (ret < 0)
1146  return ret;
1147 
1148  h->cur_pic_ptr = s->cur_pic;
1149  av_frame_unref(&h->cur_pic.f);
1150  h->cur_pic = *s->cur_pic;
1151  ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1152  if (ret < 0)
1153  return ret;
1154 
1155  for (i = 0; i < 16; i++) {
1156  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1157  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1158  }
1159  for (i = 0; i < 16; i++) {
1160  h->block_offset[16 + i] =
1161  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1162  h->block_offset[48 + 16 + i] =
1163  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1164  }
1165 
1166  if (h->pict_type != AV_PICTURE_TYPE_I) {
1167  if (!s->last_pic->f.data[0]) {
1168  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1169  ret = get_buffer(avctx, s->last_pic);
1170  if (ret < 0)
1171  return ret;
1172  memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1173  memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1174  s->last_pic->f.linesize[1]);
1175  memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1176  s->last_pic->f.linesize[2]);
1177  }
1178 
1179  if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1180  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1181  ret = get_buffer(avctx, s->next_pic);
1182  if (ret < 0)
1183  return ret;
1184  memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1185  memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1186  s->next_pic->f.linesize[1]);
1187  memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1188  s->next_pic->f.linesize[2]);
1189  }
1190  }
1191 
1192  if (avctx->debug & FF_DEBUG_PICT_INFO)
1194  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1196  s->halfpel_flag, s->thirdpel_flag,
1197  s->adaptive_quant, h->qscale, h->slice_num);
1198 
1199  if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1201  avctx->skip_frame >= AVDISCARD_ALL)
1202  return 0;
1203 
1204  if (s->next_p_frame_damaged) {
1205  if (h->pict_type == AV_PICTURE_TYPE_B)
1206  return 0;
1207  else
1208  s->next_p_frame_damaged = 0;
1209  }
1210 
1211  if (h->pict_type == AV_PICTURE_TYPE_B) {
1213 
1214  if (h->frame_num_offset < 0)
1215  h->frame_num_offset += 256;
1216  if (h->frame_num_offset == 0 ||
1218  av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1219  return -1;
1220  }
1221  } else {
1222  h->prev_frame_num = h->frame_num;
1223  h->frame_num = h->slice_num;
1225 
1226  if (h->prev_frame_num_offset < 0)
1227  h->prev_frame_num_offset += 256;
1228  }
1229 
1230  for (m = 0; m < 2; m++) {
1231  int i;
1232  for (i = 0; i < 4; i++) {
1233  int j;
1234  for (j = -1; j < 4; j++)
1235  h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1236  if (i < 3)
1237  h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1238  }
1239  }
1240 
1241  for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1242  for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1243  unsigned mb_type;
1244  h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1245 
1246  if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1247  ((get_bits_count(&h->gb) & 7) == 0 ||
1248  show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1249  skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1250  h->gb.size_in_bits = 8 * buf_size;
1251 
1252  if (svq3_decode_slice_header(avctx))
1253  return -1;
1254 
1255  /* TODO: support s->mb_skip_run */
1256  }
1257 
1258  mb_type = svq3_get_ue_golomb(&h->gb);
1259 
1260  if (h->pict_type == AV_PICTURE_TYPE_I)
1261  mb_type += 8;
1262  else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1263  mb_type += 4;
1264  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1266  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1267  return -1;
1268  }
1269 
1270  if (mb_type != 0)
1272 
1273  if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1274  h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1275  (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1276  }
1277 
1278  ff_draw_horiz_band(avctx, &s->cur_pic->f,
1279  s->last_pic->f.data[0] ? &s->last_pic->f : NULL,
1280  16 * h->mb_y, 16, h->picture_structure, 0,
1281  h->low_delay);
1282  }
1283 
1284  if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1285  ret = av_frame_ref(data, &s->cur_pic->f);
1286  else if (s->last_pic->f.data[0])
1287  ret = av_frame_ref(data, &s->last_pic->f);
1288  if (ret < 0)
1289  return ret;
1290 
1291  /* Do not output the last pic after seeking. */
1292  if (s->last_pic->f.data[0] || h->low_delay)
1293  *got_frame = 1;
1294 
1295  if (h->pict_type != AV_PICTURE_TYPE_B) {
1296  FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1297  } else {
1298  av_frame_unref(&s->cur_pic->f);
1299  }
1300 
1301  return buf_size;
1302 }
1303 
1305 {
1306  SVQ3Context *s = avctx->priv_data;
1307  H264Context *h = &s->h;
1308 
1309  free_picture(avctx, s->cur_pic);
1310  free_picture(avctx, s->next_pic);
1311  free_picture(avctx, s->last_pic);
1312  av_freep(&s->cur_pic);
1313  av_freep(&s->next_pic);
1314  av_freep(&s->last_pic);
1315 
1316  av_frame_unref(&h->cur_pic.f);
1317 
1319 
1320  return 0;
1321 }
1322 
1324  .name = "svq3",
1325  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1326  .type = AVMEDIA_TYPE_VIDEO,
1327  .id = AV_CODEC_ID_SVQ3,
1328  .priv_data_size = sizeof(SVQ3Context),
1332  .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1333  CODEC_CAP_DR1 |
1335  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1336  AV_PIX_FMT_NONE},
1337 };
#define MB_TYPE_INTRA16x16
Definition: avcodec.h:776
uint8_t pred_mode
Definition: h264data.h:75
#define MB_TYPE_SKIP
Definition: avcodec.h:786
discard all frames except keyframes
Definition: avcodec.h:567
uint8_t * edge_emu_buffer
Definition: h264.h:700
unsigned int top_samples_available
Definition: h264.h:362
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:54
GetBitContext gb
Definition: h264.h:311
int low_delay
Definition: h264.h:332
int mb_num
Definition: h264.h:504
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:106
int size
ptrdiff_t uvlinesize
Definition: h264.h:325
int cbp
Definition: h264.h:472
HpelDSPContext hdsp
Definition: svq3.c:73
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:240
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
Definition: svq13.c:60
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:237
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:199
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:603
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1766
int mb_y
Definition: h264.h:498
AVBufferRef * mb_type_buf
Definition: h264.h:273
int size
Definition: avcodec.h:974
#define MB_TYPE_INTRA4x4
Definition: avcodec.h:775
int chroma_x_shift
Definition: h264.h:326
const uint8_t * buffer
Definition: get_bits.h:54
static unsigned svq3_get_ue_golomb(GetBitContext *gb)
Definition: golomb.h:111
#define INVALID_VLC
Definition: golomb.h:38
int16_t(*[2] motion_val)[2]
Definition: h264.h:271
int flags
Definition: h264.h:335
void av_log(void *avcl, int level, const char *fmt,...) av_printf_format(3
Send the specified message to the log if the level is less than or equal to the current av_log_level...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1254
int mb_height
Definition: h264.h:502
#define AV_COPY32(d, s)
Definition: intreadwrite.h:506
int v_edge_pos
Definition: svq3.c:86
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:378
H264Context.
Definition: h264.h:303
discard all
Definition: avcodec.h:568
uint8_t run
Definition: svq3.c:146
struct AVFrame f
Definition: h264.h:264
#define FULLPEL_MODE
Definition: svq3.c:90
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:30
AVCodec.
Definition: avcodec.h:2796
int picture_structure
Definition: h264.h:419
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:884
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1164
static const struct @62 svq3_dct_tables[2][16]
static const uint8_t zigzag_scan[16]
Definition: h264data.h:54
int mb_skip_run
Definition: h264.h:501
void av_freep(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:367
enum AVDiscard skip_frame
Definition: avcodec.h:2727
static const uint8_t golomb_to_pict_type[5]
Definition: h264data.h:37
int thirdpel_flag
Definition: svq3.c:79
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:275
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:111
uint8_t
int prev_frame_num_offset
for POC type 2
Definition: h264.h:548
#define DC_PRED8x8
Definition: h264pred.h:68
H264Picture * last_pic
Definition: svq3.c:77
int mb_xy
Definition: h264.h:505
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:188
const char * name
Name of the codec implementation.
Definition: avcodec.h:2803
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:684
const char data[16]
Definition: mxf.c:70
int height
Definition: h264.h:324
int mb_x
Definition: h264.h:498
static const IMbInfo i_mb_type_info[26]
Definition: h264data.h:79
#define AV_ZERO32(d)
Definition: intreadwrite.h:534
thirdpel DSP context
Definition: tpeldsp.h:42
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:194
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:43
int chroma_y_shift
Definition: h264.h:326
thirdpel DSP functions
#define AV_WL32(p, d)
Definition: intreadwrite.h:255
int width
Definition: h264.h:324
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:481
H.264 / AVC / MPEG4 part10 codec.
int frame_num
Definition: h264.h:544
static void free_picture(AVCodecContext *avctx, H264Picture *pic)
Definition: svq3.c:1041
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:175
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:123
int next_slice_index
Definition: svq3.c:81
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1339
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:186
int16_t mb_luma_dc[3][16 *2]
Definition: h264.h:461
#define HALFPEL_MODE
Definition: svq3.c:91
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:713
#define AVERROR(e)
Definition: error.h:43
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:150
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:144
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1144
#define IS_SKIP(a)
Definition: mpegutils.h:77
#define PREDICT_MODE
Definition: svq3.c:93
Libavcodec external API header.
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
Sorenson Vector Quantizer #1 (SVQ1) video codec.
static const uint8_t scan8[16 *3+3]
Definition: h264.h:868
static av_always_inline void pred_motion(H264Context *const h, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: h264_mvpred.h:94
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
static int svq3_get_se_golomb(GetBitContext *gb)
Definition: golomb.h:221
int chroma_pred_mode
Definition: h264.h:342
void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:197
useful rectangle filling function
unsigned int left_samples_available
Definition: h264.h:364
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: tpeldsp.h:54
Half-pel DSP context.
Definition: hpeldsp.h:45
AVBufferRef * motion_val_buf[2]
Definition: h264.h:270
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:196
int frame_num_offset
for POC type 2
Definition: h264.h:547
uint8_t * data
The data buffer.
Definition: buffer.h:89
uint32_t * mb2br_xy
Definition: h264.h:395
#define CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:678
ptrdiff_t linesize
Definition: h264.h:325
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:365
int reference
Definition: h264.h:296
H264Context h
Definition: svq3.c:72
int width
picture width / height.
Definition: avcodec.h:1224
void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:162
void * av_malloc(size_t size) av_malloc_attrib 1(1)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:62
uint32_t * mb_type
Definition: h264.h:274
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:52
int size_in_bits
Definition: get_bits.h:56
int32_t
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:254
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:862
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
Definition: avplay.c:396
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a + b + 1) >> 1.
Definition: tpeldsp.h:53
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:358
unsigned int topright_samples_available
Definition: h264.h:363
H264Picture * cur_pic
Definition: svq3.c:75
int slice_type
Definition: h264.h:411
static const uint8_t golomb_to_intra4x4_cbp[48]
Definition: h264data.h:42
int last_frame_output
Definition: svq3.c:87
#define PART_NOT_AVAILABLE
Definition: h264.h:381
int next_p_frame_damaged
Definition: svq3.c:84
#define IS_INTRA16x16(a)
Definition: mpegutils.h:72
if(ac->has_optimized_func)
static const int8_t mv[256][2]
Definition: 4xm.c:75
VideoDSPContext vdsp
Definition: h264.h:306
NULL
Definition: eval.c:55
Half-pel DSP functions.
AVCodec ff_svq3_decoder
Definition: svq3.c:1323
static int width
Definition: utils.c:156
int mb_stride
Definition: h264.h:503
AVCodecContext * avctx
Definition: h264.h:304
#define av_cold
Definition: attributes.h:66
H264 / AVC / MPEG4 part10 codec data table
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:549
int debug
debug
Definition: avcodec.h:2362
main external API structure.
Definition: avcodec.h:1050
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:490
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:128
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:404
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:612
int16_t mb[16 *48 *2]
as a dct coeffecient is int32_t in high depth, we need to reserve twice the space.
Definition: h264.h:460
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
int extradata_size
Definition: avcodec.h:1165
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:271
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:296
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:82
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:263
int index
Definition: gxfenc.c:72
static const uint8_t chroma_dc_scan[4]
Definition: h264data.h:61
int8_t * ref_index[2]
Definition: h264.h:280
uint8_t * data
Definition: avcodec.h:973
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:293
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:375
#define MB_TYPE_16x16
Definition: avcodec.h:778
H264Picture * cur_pic_ptr
Definition: h264.h:315
static av_cold int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1304
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:118
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:153
#define IS_INTER(a)
Definition: mpegutils.h:75
int unknown_flag
Definition: svq3.c:80
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
Definition: h264.h:392
void * priv_data
Definition: avcodec.h:1092
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:283
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2363
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:1816
uint8_t level
Definition: svq3.c:147
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
Definition: vp9.h:56
int height
Definition: gxfenc.c:72
discard all non reference
Definition: avcodec.h:565
int is_complex
Definition: h264.h:507
int qscale
Definition: h264.h:328
#define FFSWAP(type, a, b)
Definition: common.h:60
uint8_t cbp
Definition: h264data.h:76
common internal api header.
H264Picture * next_pic
Definition: svq3.c:76
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:637
int h_edge_pos
Definition: svq3.c:85
static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
Definition: svq3.c:1053
H.264 / AVC / MPEG4 part10 motion vector predicion.
Bi-dir predicted.
Definition: avutil.h:255
#define stride
int chroma_qp[2]
Definition: h264.h:319
static const uint8_t golomb_to_inter_cbp[48]
Definition: h264data.h:48
static av_cold int init(AVCodecParserContext *s)
Definition: h264_parser.c:499
static av_always_inline void write_back_intra_pred_mode(H264Context *h)
Definition: h264.h:928
int intra16x16_pred_mode
Definition: h264.h:343
#define IS_INTRA(x, y)
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:155
#define THIRDPEL_MODE
Definition: svq3.c:92
#define AV_RB32(x)
Definition: intreadwrite.h:232
#define PICT_FRAME
Definition: mpegutils.h:35
#define IS_INTRA4x4(a)
Definition: mpegutils.h:71
#define AV_RL32(x)
Definition: intreadwrite.h:248
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:371
#define av_log2
Definition: intmath.h:85
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:771
H264Picture cur_pic
Definition: h264.h:316
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:191
int mb_width
Definition: h264.h:502
enum AVPictureType pict_type
Definition: h264.h:611
TpelDSPContext tdsp
Definition: svq3.c:74
static const uint8_t svq3_scan[16]
Definition: svq3.c:104
AVBufferRef * ref_index_buf[2]
Definition: h264.h:279
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:77
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:130
uint32_t watermark_key
Definition: svq3.c:82
int8_t * intra4x4_pred_mode
Definition: h264.h:359
#define AV_ZERO128(d)
Definition: intreadwrite.h:542
#define AV_WN32A(p, v)
Definition: intreadwrite.h:458
exp golomb vlc stuff
int slice_num
Definition: h264.h:409
This structure stores compressed data.
Definition: avcodec.h:950
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1105
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:850
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:141
#define FFMIN(a, b)
Definition: common.h:57
for(j=16;j >0;--j)
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264_mb.c:804
int b_stride
Definition: h264.h:396
Predicted.
Definition: avutil.h:254
void * av_mallocz(size_t size) av_malloc_attrib 1(1)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
int halfpel_flag
Definition: svq3.c:78
int adaptive_quant
Definition: svq3.c:83
int8_t ref_cache[2][5 *8]
Definition: h264.h:379
static int16_t block[64]
Definition: dct-test.c:88