34 #if HAVE_DEV_BKTR_IOCTL_METEOR_H && HAVE_DEV_BKTR_IOCTL_BT848_H 35 # include <dev/bktr/ioctl_meteor.h> 36 # include <dev/bktr/ioctl_bt848.h> 37 #elif HAVE_MACHINE_IOCTL_METEOR_H && HAVE_MACHINE_IOCTL_BT848_H 38 # include <machine/ioctl_meteor.h> 39 # include <machine/ioctl_bt848.h> 40 #elif HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H && HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 41 # include <dev/video/meteor/ioctl_meteor.h> 42 # include <dev/video/bktr/ioctl_bt848.h> 43 #elif HAVE_DEV_IC_BT8XX_H 44 # include <dev/ic/bt8xx.h> 48 #include <sys/ioctl.h> 76 #define PAL_HEIGHT 576 77 #define SECAM_HEIGHT 576 78 #define NTSC_HEIGHT 480 81 #define VIDEO_FORMAT NTSC 84 static int bktr_dev[] = { METEOR_DEV0, METEOR_DEV1, METEOR_DEV2,
85 METEOR_DEV3, METEOR_DEV_SVIDEO };
102 struct meteor_geomet geo;
104 long ioctl_frequency;
107 struct sigaction act = { 0 }, old;
109 if (idev < 0 || idev > 4)
111 arg = getenv (
"BKTR_DEV");
114 if (idev < 0 || idev > 4)
118 if (format < 1 || format > 6)
120 arg = getenv (
"BKTR_FORMAT");
123 if (format < 1 || format > 6)
129 arg = getenv (
"BKTR_FREQUENCY");
131 frequency = atof (arg);
136 sigemptyset(&act.sa_mask);
138 sigaction(SIGUSR1, &act, &old);
153 geo.oformat = METEOR_GEO_YUV_422 | METEOR_GEO_YUV_12;
156 case PAL: h_max =
PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI;
break;
162 default: h_max =
PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI;
break;
165 if (height <= h_max / 2)
166 geo.oformat |= METEOR_GEO_EVEN_ONLY;
168 if (ioctl(*video_fd, METEORSETGEO, &geo) < 0) {
173 if (ioctl(*video_fd, BT848SFMT, &c) < 0) {
179 if (ioctl(*video_fd, METEORSINPUT, &c) < 0) {
187 PROT_READ, MAP_SHARED, *
video_fd, (off_t)0);
193 if (frequency != 0.0) {
194 ioctl_frequency = (
unsigned long)(frequency*16);
195 if (ioctl(*tuner_fd, TVTUNER_SETFREQ, &ioctl_frequency) < 0)
200 if (ioctl(*tuner_fd, BT848_SAUDIO, &c) < 0)
203 c = METEOR_CAP_CONTINOUS;
204 ioctl(*video_fd, METEORCAPTUR, &c);
207 ioctl(*video_fd, METEORSSIGNAL, &c);
222 "SLEPT NO signals - %d microseconds late\n",
313 c = METEOR_CAP_STOP_CONT;
314 ioctl(s->
video_fd, METEORCAPTUR, &c);
318 ioctl(s->
tuner_fd, BT848_SAUDIO, &c);
326 #define OFFSET(x) offsetof(VideoData, x) 327 #define DEC AV_OPT_FLAG_DECODING_PARAM 329 {
"standard",
"", offsetof(
VideoData,
standard),
AV_OPT_TYPE_INT, {.i64 =
VIDEO_FORMAT},
PAL,
NTSCJ,
AV_OPT_FLAG_DECODING_PARAM,
"standard" },
356 .priv_class = &bktr_class,
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_dlog(ac->avr, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
AVInputFormat ff_bktr_demuxer
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static void bktr_getframe(uint64_t per_frame)
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
int avpriv_open(const char *filename, int flags,...)
A wrapper for open() setting O_CLOEXEC.
static av_cold int read_close(AVFormatContext *ctx)
char * framerate
Set by a private option.
volatile sig_atomic_t nsignals
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
static const AVClass bktr_class
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
char * video_size
String describing video size, set by a private option.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void av_log(void *avcl, int level, const char *fmt,...)
static void catchsignal(int signal)
AVCodecContext * codec
Codec context associated with this stream.
common internal API header
char filename[1024]
input or output filename
int width
picture width / height.
static int read_header(FFV1Context *f)
int64_t av_gettime(void)
Get the current time in microseconds.
static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
#define AV_LOG_INFO
Standard information.
enum AVMediaType codec_type
char * av_strdup(const char *s)
Duplicate the string s.
static av_cold int bktr_init(const char *video_device, int width, int height, int format, int *video_fd, int *tuner_fd, int idev, double frequency)
static void close(AVCodecParserContext *s)
static int read_packet(AVFormatContext *ctx, AVPacket *pkt)
Describe the class of an AVClass context structure.
rational number numerator/denominator
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
static int grab_read_header(AVFormatContext *s1)
static const AVOption options[]
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
void * priv_data
Format private data.
static int grab_read_close(AVFormatContext *s1)
This structure stores compressed data.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...