[asterisk-commits] rizzo: branch rizzo/video_v2 r84121 - /team/rizzo/video_v2/channels/

SVN commits to the Asterisk project asterisk-commits at lists.digium.com
Sat Sep 29 02:13:57 CDT 2007


Author: rizzo
Date: Sat Sep 29 02:13:56 2007
New Revision: 84121

URL: http://svn.digium.com/view/asterisk?view=rev&rev=84121
Log:

save local changes to correctly support the various video representations
involved.
Code untested, it only compiles now, but it is unlikely to work.


Modified:
    team/rizzo/video_v2/channels/chan_oss.c
    team/rizzo/video_v2/channels/console_video.c

Modified: team/rizzo/video_v2/channels/chan_oss.c
URL: http://svn.digium.com/view/asterisk/team/rizzo/video_v2/channels/chan_oss.c?view=diff&rev=84121&r1=84120&r2=84121
==============================================================================
--- team/rizzo/video_v2/channels/chan_oss.c (original)
+++ team/rizzo/video_v2/channels/chan_oss.c Sat Sep 29 02:13:56 2007
@@ -385,13 +385,13 @@
 	int readpos;				/*!< read position above */
 	struct ast_frame read_f;	/*!< returned by oss_read */
 
-	struct video_desc env;		/* video support stuff */
+	struct video_desc* env;		/* video support stuff */
 };
 
 static struct video_desc *get_video_desc(struct ast_channel *c)
 {
 	struct chan_oss_pvt *o = c->tech_pvt;
-	return o ? &(o->env) : NULL;
+	return o ? o->env : NULL;
 }
 
 static struct chan_oss_pvt oss_default = {
@@ -875,7 +875,7 @@
 	c->tech_pvt = NULL;
 	o->owner = NULL;
 	ast_verbose(" << Hangup on console >> \n");
-	console_video_uninit(&o->env);
+	console_video_uninit(o->env);
 	ast_module_unref(ast_module_info->self);
 	if (o->hookstate) {
 		if (o->autoanswer || o->autohangup) {
@@ -1141,16 +1141,17 @@
 	value = a->argc > e->args ? a->argv[e->args] : NULL;
 	if (value)	/* handle setting */
 		store_config_core(o, var, value);
+	/* XXX these should be moved to console_video.c */
 	if (!strcasecmp(var, "videodevice")) {
-		ast_cli(a->fd, "videodevice is [%s]\n", o->env.videodevice);
+		ast_cli(a->fd, "videodevice is [%s]\n", o->env->videodevice);
 	} else if (!strcasecmp(var, "videowidth")) {
-		ast_cli(a->fd, "videowidth is [%d]\n", o->env.w);
+		ast_cli(a->fd, "videowidth is [%d]\n", o->env->w);
 	} else if (!strcasecmp(var, "videoheight")) {
-		ast_cli(a->fd, "videoheight is [%d]\n", o->env.h);
+		ast_cli(a->fd, "videoheight is [%d]\n", o->env->h);
 	} else if (!strcasecmp(var, "bitrate")) {
-		ast_cli(a->fd, "bitrate is [%d]\n", o->env.bitrate);
+		ast_cli(a->fd, "bitrate is [%d]\n", o->env->bitrate);
 	} else if (!strcasecmp(var, "fps")) {
-		ast_cli(a->fd, "fps is [%d]\n", o->env.fps);
+		ast_cli(a->fd, "fps is [%d]\n", o->env->fps);
 	} else if (!strcasecmp(var, "device")) {
 		ast_cli(a->fd, "device is [%s]\n", o->device);
 	}
@@ -1555,6 +1556,8 @@
 	if (!ast_jb_read_conf(&global_jbconf, (char *)var,(char *) value))
 		return;
 
+	if (!console_video_config(&o->env, var, value))
+		return;
 	M_BOOL("autoanswer", o->autoanswer)
 	M_BOOL("autohangup", o->autohangup)
 	M_BOOL("overridecontext", o->overridecontext)
@@ -1569,11 +1572,6 @@
 	M_F("mixer", store_mixer(o, value))
 	M_F("callerid", store_callerid(o, value))
 	M_F("boost", store_boost(o, value))
-	M_STR("videodevice", o->env.videodevice)
-	M_UINT("videowidth", o->env.w)
-	M_UINT("videoheight", o->env.h)
-	M_UINT("fps", o->env.fps)
-	M_UINT("bitrate", o->env.bitrate)
 
 	M_END(/* */);
 }

Modified: team/rizzo/video_v2/channels/console_video.c
URL: http://svn.digium.com/view/asterisk/team/rizzo/video_v2/channels/console_video.c?view=diff&rev=84121&r1=84120&r2=84121
==============================================================================
--- team/rizzo/video_v2/channels/console_video.c (original)
+++ team/rizzo/video_v2/channels/console_video.c Sat Sep 29 02:13:56 2007
@@ -18,7 +18,7 @@
 
 
 //#define DROP_PACKETS 5       // if set, simulate this percentage of lost video packets
-#define HAVE_V4L	1
+//#define HAVE_V4L	1
 #define HAVE_SDL	1
 #define HAVE_FFMPEG	1
 #define OLD_FFMPEG	1	/* set for old ffmpeg with no swscale */
@@ -30,6 +30,10 @@
 #if !defined(HAVE_SDL)
 #define HAVE_SDL	0
 #endif
+#if !defined(HAVE_V4L)
+#define HAVE_V4L	0
+#endif
+
 
 #if HAVE_FFMPEG == 0 || HAVE_SDL == 0
 /*
@@ -38,15 +42,17 @@
  * At the moment we also require SDL, though it is not strictly necessary
  * (we could just drop incoming video but still source it).
  */
-struct video_desc {	/* empty */
-	int 	w;
-	int 	h;
-	int 	fps;
-	int 	bitrate;
-	char videodevice[64];
-};
+struct video_desc;
+
 #define CONSOLE_FORMAT_VIDEO	0
 #define	console_write_video	NULL
+
+static int console_video_config(struct video_desc **penv,
+	const char *var, const char *val)
+{
+	return 1;	/* nothing recognised */
+}
+
 static void console_video_start(struct video_desc *e, struct ast_channel *c)
 {
 }
@@ -74,30 +80,36 @@
 console_write_video(), decoded and passed to SDL for display.
 
 For as unfortunate and confusing as it can be, we need to deal with a
-number of different video "formats" (meaning size, codec/pixel format,
+number of different video representations (size, codec/pixel format,
 codec parameters), as follows:
 
- loc_src_fmt	is the data coming from the camera/X11/etc.
-	This is typically constrained by the video device driver.
-
- enc_in_fmt	is the input format required by the encoder.
+ loc_src	is the data coming from the camera/X11/etc.
+	The format is typically constrained by the video device driver.
+
+ enc_in		is the input required by the encoder.
 	Typically constrained in size by the encoder type.
 
- enc_out_fmt	is the format used in the bitstream transmitted over RTP.
+ enc_out	is the bitstream transmitted over RTP.
 	Typically negotiated while the call is established.
 
- loc_dpy_fmt	is how we are going to display the local video source.
+ loc_dpy	is the format used to display the local video source.
 	Depending on user preferences this can have the same size as
 	loc_src_fmt, or enc_in_fmt, or thumbnail size (e.g. PiP output)
 
- dec_in_fmt	is the format received on the RTP bitstream. Negotiated
+ dec_in		is the incoming RTP bitstream. Negotiated
 	during call establishment, it is not necessarily the same as
 	enc_in_fmt
 
- dec_out_fmt	the output of the decoder
-
- rem_dpy_fmt	the format used to display the remote stream
-
+ dec_out	the output of the decoder.
+	The format is whatever the other side sends, and the
+	buffer is allocated by avcodec_decode_... so we only
+	copy the data here.
+
+ rem_dpy	the format used to display the remote stream
+
+We store the format info together with the buffer storing the data.
+As an optimization, a format/buffer may reference another one
+if the formats are equivalent.
 
  *
  * In order to decode video you need the following patch to the
@@ -141,9 +153,6 @@
 	uint8_t	*data;	/* malloc-ed memory */
 	int	size;	/* total size in bytes */
 	int	used;	/* space used so far */
-};
-
-struct vid_fmt {
 	int	w;
 	int	h;
 	int	pix_fmt;
@@ -174,14 +183,15 @@
 	int		fps;
 	int		bitrate;
 
-	struct fbuf_t	buf;		/* frame buffer */
-	int pix_fmt;			/* source pixel format */
-
-	AVCodecContext	*context;	/* encoding context */
+	struct fbuf_t	loc_src;	/* local source buffer */
+	struct fbuf_t	enc_in;		/* encoder input buffer */
+	struct fbuf_t	enc_out;	/* encoder output buffer */
+	struct fbuf_t	loc_dpy;	/* display source buffer */
+
+	AVCodecContext	*enc_ctx;	/* encoding context */
 	AVCodec		*codec;
-	AVFrame		*frame;
+	AVFrame		*frame;	/* The initial part is an AVPicture */
 	int		lasttxframe;	/* XXX useless: RTP overwrite seqno */
-	struct fbuf_t	encbuf;		/* encoding buffer */
 	int		mtu;
 	struct timeval	last_frame;	/* when we read the last frame ? */
 
@@ -200,14 +210,16 @@
  *	codec, no memory, etc.) and we must drop all incoming frames.
  */
 struct video_in_desc {
-	AVCodecContext          *context;	/* information about the codec in the stream */
+	AVCodecContext          *dec_ctx;	/* information about the codec in the stream */
 	AVCodec                 *codec;		/* reference to the codec */
 	AVFrame                 *frame;		/* place to store the frame */
 	AVCodecParserContext    *parser;
 	int                     completed;	/* probably unnecessary */
 	uint16_t 		next_seq;	/* must be 16 bit */
 	int                     discard;	/* flag for discard status */
-	struct fbuf_t buf;		/* decoded frame */
+	struct fbuf_t dec_in;	/* incoming bitstream */
+	struct fbuf_t dec_out;	/* decoded frame */
+	struct fbuf_t rem_dpy;	/* buffer for displayed remote img */
 };
 
 /*
@@ -232,6 +244,7 @@
 	int                     sdl_ok;
 	ast_mutex_t		sdl_lock;
 	SDL_Overlay             *bmp[2];
+	SDL_Rect		rect[2];	/* loc. of images */
 };
 
 /*
@@ -262,15 +275,20 @@
 #include <linux/videodev.h>
 #endif
 
+static void free_fbuf(struct fbuf_t *b)
+{
+	if (b->data)
+		ast_free(b->data);
+	bzero(b, sizeof(*b));
+}
+
 /*!
  * Open the local video source and allocate a buffer
  * for storing the image. Return 0 on success, -1 on error
  */
 static int video_open(struct video_out_desc *v)
 {
-	int i;
-
-	if (v->buf.data)	/* buffer allocated means device already open */
+	if (v->loc_src.data)	/* buffer allocated means device already open */
 		return v->fd;
 	v->fd = -1;
 	/*
@@ -288,7 +306,7 @@
 	}
 	v->image = im = XGetImage(v->dpy,
 		RootWindow(v->dpy, DefaultScreen(v->dpy)),
-		x_ofs, y_ofs, v->w, v->h, AllPlanes, ZPixmap);
+		x_ofs, y_ofs, v->loc_src.w, v->loc_src.h, AllPlanes, ZPixmap);
 	if (v->image == NULL) {
 		ast_log(LOG_WARNING, "error creating Ximage\n");
 		goto error;
@@ -297,7 +315,7 @@
 		im->data,
 		im->bits_per_pixel,
 		im->red_mask, im->green_mask, im->blue_mask);
-	v->pix_fmt = PIX_FMT_RGB565;
+	v->loc_src.pix_fmt = PIX_FMT_RGB565;
 	v->fd = -2;
     }
 #if HAVE_V4L > 0
@@ -305,6 +323,7 @@
 	/* V4L specific */
 	struct video_window vw = { 0 };	/* camera attributes */
 	struct video_picture vp;
+	int i;
 
 	v->fd = open(v->device, O_RDONLY | O_NONBLOCK);
 	if (v->fd < 0) {
@@ -317,8 +336,8 @@
 		ast_log(LOG_WARNING, "error F_SETFL for %s [%s]\n", v->device, strerror(errno));
 	}
 	/* set format */
-	vw.width = v->w;
-	vw.height = v->h;
+	vw.width = v->loc_src.w;
+	vw.height = v->loc_src.h;
 	vw.flags = v->fps << 16;
 	if (ioctl(v->fd, VIDIOCSWIN, &vw) == -1) {
 		ast_log(LOG_WARNING, "error setting format for %s [%s]\n", v->device, strerror(errno));
@@ -337,34 +356,35 @@
 		ast_log(LOG_WARNING, "error setting picture info\n");
 		goto error;
 	}
+	v->loc_src.pix_fmt = PIX_FMT_YUV420P;
     }
 #endif /* HAVE_V4L */
 	if (v->image == NULL && v->fd < 0)
 		goto error;
-	v->buf.size = (v->w * v->h * 3)/2;	/* yuv411 */
-	ast_log(LOG_WARNING, "videodev %s opened, size %dx%d %d\n", v->device, v->w, v->h, v->buf.size);
-	v->buf.data = ast_calloc(1, v->buf.size);
-	if (!v->buf.data) {
-		ast_log(LOG_WARNING, "error allocating buffer %d bytes\n", v->buf.size);
+	/* allocate the source buffer */
+	v->loc_src.size = (v->w * v->h * 3)/2;	/* yuv411 */
+	ast_log(LOG_WARNING, "videodev %s opened, size %dx%d %d\n",
+		v->device, v->loc_src.w, v->loc_src.h, v->loc_src.size);
+	v->loc_src.data = ast_calloc(1, v->loc_src.size);
+	if (!v->loc_src.data) {
+		ast_log(LOG_WARNING, "error allocating buffer %d bytes\n",
+			v->loc_src.size);
 		goto error;
 	}
 
-	v->buf.used = 0;
+	v->loc_src.used = 0;
 	return 0;
 
 error:
 	ast_log(LOG_WARNING, "fd %d dpy %p img %p data %p\n",
-		v->fd, v->dpy, v->image, v->buf.data);
+		v->fd, v->dpy, v->image, v->loc_src.data);
 	if (v->dpy)
 		XCloseDisplay(v->dpy);
 	v->dpy = NULL;
 	if (v->fd >= 0)
 		close(v->fd);
 	v->fd = -1;
-	if (v->buf.data)
-		ast_free(v->buf.data);
-	v->buf.data = NULL;
-	v->buf.size = v->buf.used = 0;	/* for safety */
+	free_fbuf(&v->loc_src);
 	return -1;
 }
 
@@ -375,7 +395,7 @@
 static int video_read(struct video_out_desc *v)
 {
 	struct timeval now = ast_tvnow();
-	if (v->buf.data == NULL)	/* not initialized */
+	if (v->loc_src.data == NULL)	/* not initialized */
 		return 0;
 
 	/* check if it is time to read */
@@ -397,7 +417,7 @@
 			int x, y;
 			int ulen = (v->h * v->w) / 4;
 			uint16_t *src = (uint16_t *)v->image->data;
-			uint8_t *dst = v->buf.data;
+			uint8_t *dst = v->loc_src.data;
 			uint8_t *up = dst + ulen*4;
 			uint8_t *vp = up + ulen;
 
@@ -410,22 +430,22 @@
 			memset(vp, 0x80, ulen);
 		}
 
-		return v->buf.size;	/* return the actual size */
+		return v->loc_src.size;	/* return the actual size */
 	}
 	if (v->fd < 0)			/* no other source */
 		return 0;
 	for (;;) {
-		int r, l = v->buf.size - v->buf.used;
-		r = read(v->fd, v->buf.data + v->buf.used, l);
+		int r, l = v->loc_src.size - v->loc_src.used;
+		r = read(v->fd, v->loc_src.data + v->loc_src.used, l);
 		// ast_log(LOG_WARNING, "read %d of %d bytes from webcam\n", r, l);
 		if (r < 0)	/* read error */
 			return 0;
 		if (r == 0)	/* no data */
 			return 0;
-		v->buf.used += r;
+		v->loc_src.used += r;
 		if (r == l) {
-			v->buf.used = 0; /* prepare for next frame */
-			return v->buf.size;
+			v->loc_src.used = 0; /* prepare for next frame */
+			return v->loc_src.size;
 		}
 	}
 }
@@ -455,14 +475,15 @@
 /*! \brief uninitialize the descriptor for remote video stream */
 static int video_in_uninit(struct video_in_desc *v)
 {
-	if (v->context) {
-		avcodec_close(v->context);
-		av_free(v->context);
+	if (v->dec_ctx) {
+		avcodec_close(v->dec_ctx);
+		av_free(v->dec_ctx);
 	}
 	if (v->frame)
 		av_free(v->frame);
-	if (v->buf.data)
-		free(v->buf.data);
+	free_fbuf(&v->dec_in);
+	free_fbuf(&v->dec_out);
+	free_fbuf(&v->rem_dpy);
 	bzero(v, sizeof(*v));
 	return -1;	/* error, in case someone cares */
 }
@@ -475,10 +496,9 @@
 	enum CodecID codec;
 
 	v->codec = NULL;
-	v->context = NULL;
+	v->dec_ctx = NULL;
 	v->frame = NULL;
 	v->parser = NULL;
-	v->buf.data = NULL;
 	v->completed = 0;
 	v->discard = 1;
 
@@ -492,12 +512,12 @@
 	/*
 	* Initialize the codec context.
 	*/
-	v->context = avcodec_alloc_context();
-	v->context->bit_rate_tolerance = v->context->bit_rate/5;
-	if (avcodec_open(v->context, v->codec) < 0) {
+	v->dec_ctx = avcodec_alloc_context();
+	/* XXX not here! */
+	if (avcodec_open(v->dec_ctx, v->codec) < 0) {
 		ast_log(LOG_WARNING, "Unable to open the codec context\n");
-		av_free(v->context);
-		v->context = NULL;
+		av_free(v->dec_ctx);
+		v->dec_ctx = NULL;
 		return video_in_uninit(v);
 	}
 
@@ -518,15 +538,17 @@
 /*! \brief uninitialize the descriptor for local video stream */
 static int video_out_uninit(struct video_out_desc *v)
 {
-	if (v->context) {
-		avcodec_close(v->context);
-		av_free(v->context);
+	if (v->enc_ctx) {
+		avcodec_close(v->enc_ctx);
+		av_free(v->enc_ctx);
 	}
 
 	if (v->frame) 
 		av_free(v->frame);
-	if (v->buf.data) 
-		free(v->buf.data);
+	free_fbuf(&v->loc_src);
+	free_fbuf(&v->enc_in);
+	free_fbuf(&v->enc_out);
+	free_fbuf(&v->loc_dpy);
 	if (v->image) {	/* X11 grabber */
 		XCloseDisplay(v->dpy);
 		v->dpy = NULL;
@@ -534,8 +556,6 @@
 	}
 	if (v->fd >= 0) 
 		close(v->fd);
-	if (v->encbuf.data)
-		free(v->encbuf.data);
 	bzero(v, sizeof(*v));
 	v->fd = -1;
 	return -1;
@@ -551,13 +571,18 @@
 {
 	int codec;
 	int size;
-
-	v->context		= NULL;
+	struct fbuf_t *enc_in;
+
+	v->enc_ctx		= NULL;
 	v->codec		= NULL;
 	v->frame		= NULL;
 	v->lasttxframe		= -1;
-	v->encbuf.data		= NULL;
-
+	v->enc_out.data		= NULL;
+
+	if (v->loc_src.data == NULL) {
+		ast_log(LOG_WARNING, "No local source active\n");
+		return video_out_uninit(v);
+	}
 	codec = map_video_format(format, CM_WR);
 	v->codec = avcodec_find_encoder(codec);
 	if (!v->codec) {
@@ -565,32 +590,39 @@
 		return video_out_uninit(v);
 	}
 
-	v->pix_fmt = PIX_FMT_YUV420P;	/* default - camera format */
-	v->context = avcodec_alloc_context();
-	v->context->pix_fmt = v->pix_fmt;
-	v->context->width = v->w;
-	v->context->height = v->h;
+	enc_in = &v->enc_in;
+	enc_in->size = (enc_in->w * enc_in->h * 3)/2;	/* yuv411 */
+	enc_in->data = ast_calloc(1, enc_in->size);
+	if (!enc_in->data) {
+		ast_log(LOG_WARNING, "Unable to allocate enc.in\n");
+		return video_out_uninit(v);
+	}
+	v->enc_ctx = avcodec_alloc_context();
+	v->enc_ctx->pix_fmt = enc_in->pix_fmt;
+	v->enc_ctx->width = enc_in->w;
+	v->enc_ctx->height = enc_in->h;
 	/* XXX rtp_callback ?
 	 * rtp_mode so ffmpeg inserts as many start codes as possible.
 	 */
-	v->context->rtp_mode = 1;
-	v->context->rtp_payload_size = v->mtu / 2; // mtu/2
+	v->enc_ctx->rtp_mode = 1;
+	v->enc_ctx->rtp_payload_size = v->mtu / 2; // mtu/2
+	v->enc_ctx->bit_rate_tolerance = v->enc_ctx->bit_rate/5;
 	if (0) {	/* normal h263 */
-		// v->context->codec = CODEC_ID_H263;
+		// v->enc_ctx->codec = CODEC_ID_H263;
 	} else {
-		v->context->flags|=CODEC_FLAG_H263P_UMV;
-		v->context->flags|=CODEC_FLAG_AC_PRED;
-		v->context->flags|=CODEC_FLAG_H263P_SLICE_STRUCT;
-	}
-	v->context->bit_rate = v->bitrate;
-	v->context->gop_size = (int) v->fps*5; // emit I frame every 5 seconds
+		v->enc_ctx->flags |=CODEC_FLAG_H263P_UMV;
+		v->enc_ctx->flags |=CODEC_FLAG_AC_PRED;
+		v->enc_ctx->flags|=CODEC_FLAG_H263P_SLICE_STRUCT;
+	}
+	v->enc_ctx->bit_rate = v->bitrate;
+	v->enc_ctx->gop_size = (int) v->fps*5; // emit I frame every 5 seconds
  
 	ast_log(LOG_WARNING, "w: %d h: %d fps: %d\n", v->w, v->h, v->fps);
-	v->context->time_base = (AVRational){1, v->fps};
-	if (avcodec_open(v->context, v->codec) < 0) {
+	v->enc_ctx->time_base = (AVRational){1, v->fps};
+	if (avcodec_open(v->enc_ctx, v->codec) < 0) {
 		ast_log(LOG_WARNING, "Unable to initialize the encoder parser\n");
-		av_free(v->context);
-		v->context = NULL;
+		av_free(v->enc_ctx);
+		v->enc_ctx = NULL;
 		return video_out_uninit(v);
 	}
 
@@ -600,22 +632,22 @@
 		return video_out_uninit(v);
 	}
 
-	if(!v->buf.data) {
-		ast_log(LOG_WARNING, "Unable to set frame buffer\n");
-		return video_out_uninit(v);
-	}
-	
-	size = v->w*v->h;
-	v->frame->data[0] = v->buf.data;
+	/* Here we assume that the encoder has some 411 format */
+	size = enc_in->w * enc_in->h;
+	v->frame->data[0] = enc_in->data;
 	v->frame->data[1] = v->frame->data[0] + size;
 	v->frame->data[2] = v->frame->data[1] + size/4;
-	v->frame->linesize[0] = v->context->width;
-	v->frame->linesize[1] = v->context->width/2;
-	v->frame->linesize[2] = v->context->width/2;
-
-	v->encbuf.data = ast_calloc(1, v->buf.size);
-	v->encbuf.size = v->buf.size;
-	v->encbuf.used = 0;
+	v->frame->linesize[0] = enc_in->w;
+	v->frame->linesize[1] = enc_in->w/2;
+	v->frame->linesize[2] = enc_in->w/2;
+
+	/*
+	 * Allocate enough for the encoded bitstream. As we are compressing,
+	 * we hope that the output is never larger than the input size.
+	 */
+	v->enc_out.data = ast_calloc(1, enc_in->size);
+	v->enc_out.size = enc_in->size;
+	v->enc_out.used = 0;
 
 	v->mtu = 1400;
 
@@ -741,19 +773,19 @@
  */
 static int decode_video(struct video_in_desc *v)
 {
-	uint8_t *src = v->buf.data;
-	int srclen = v->buf.used;
+	uint8_t *src = v->dec_in.data;
+	int srclen = v->dec_in.used;
 
 	if (!srclen)
 		return 0;
 	while (srclen) {
 		uint8_t *data;
 		int datalen;
-		int ret = av_parser_parse(v->parser, v->context, &data, &datalen, src, srclen, 0, 0);
+		int ret = av_parser_parse(v->parser, v->dec_ctx, &data, &datalen, src, srclen, 0, 0);
 		if (datalen) {
-			ret = avcodec_decode_video(v->context, v->frame, &(v->completed), data, datalen);
-			if(ret < 0) {
-				ast_log(LOG_NOTICE, "Errore nella decodifica\n");
+			ret = avcodec_decode_video(v->dec_ctx, v->frame, &(v->completed), data, datalen);
+			if (ret < 0) {
+				ast_log(LOG_NOTICE, "Error decoding\n");
 				return 0;
 			}
 			src += ret;
@@ -761,6 +793,57 @@
 		}
 	}
 	return 1;
+}
+
+/*! create an avpict from our fbuf info.
+ * XXX This depends on the format.
+ */
+static AVPicture *fill_pict(struct fbuf_t *b, AVPicture *p)
+{
+	int l4 = b->w * b->h/4; /* size of U or V frame */
+	bzero(p, sizeof(*p));
+	p->data[0] = b->data;
+	p->data[1] = b->data + 4*l4;
+	p->data[2] = b->data + 4*l4;
+	p->linesize[0] = b->w;
+	p->linesize[1] = b->w/2;
+	p->linesize[2] = b->w/2;
+	return p;
+}
+
+/*! convert/scale between an input and an output format */
+static void my_scale(struct fbuf_t *in, AVPicture *p_in,
+	struct fbuf_t *out, AVPicture *p_out)
+{
+	AVPicture my_p_in, my_p_out;
+
+	if (p_in == NULL)
+		p_in = fill_pict(in, &my_p_in);
+	if (p_out == NULL)
+		p_out = fill_pict(in, &my_p_out);
+		
+#ifdef OLD_FFMPEG /* XXX img_convert is deprecated */
+	/* env->sdl_ok guarantees that in.frame exists */
+	img_convert(p_out, out->pix_fmt,
+		p_in, in->pix_fmt, in->w, in->h);
+#else /* XXX replacement */
+	struct SwsContext *convert_ctx;
+
+	convert_ctx = sws_getContext(in->w, in->h, in->pix_fmt,
+		out->w, out->h, out->pix_fmt,
+		SWS_BICUBIC, NULL, NULL, NULL);
+	if (convert_ctx == NULL) {
+		ast_log(LOG_ERROR, "FFMPEG::convert_cmodel : swscale context initialization failed");
+		return;
+	}
+
+	sws_scale(convert_ctx,
+		p_in->data, p_in->linesize,
+		in->w, in->h, /* src slice */
+		p_out->data, p_out->linesize);
+
+	sws_freeContext(convert_ctx);
+#endif /* XXX replacement */
 }
 
 /*
@@ -771,87 +854,49 @@
  *
  * The size is taken from the configuration.
  *
- * TODO: change the call img_convert(): it is deprecated.
  * 'out' is 0 for received data, 1 for the local video, 2 on init (debug)
  */
 static void show_frame(struct video_desc *env, int out)
 {
-	AVPicture pict;
-	SDL_Rect rect;
-	int w = env->w, h = env->h;
-	int in_w, in_h;
-	AVCodecContext *c;	/* shorthand */
+	AVPicture *p_in, p_out;
+	struct fbuf_t *b_in, *b_out;
 	SDL_Overlay *bmp;
-	AVPicture *pict_in = NULL;	/* conversion source */
-	uint8_t *src = NULL;	/* pixel input */
-	int pix_fmt;
 
 	if (!env->sdl_ok)
 		return;
 
 	if (out) {	/* webcam/x11 to sdl */
-		src = env->out.buf.data;
-		c = env->out.context;
-		pix_fmt = env->out.pix_fmt;
-		in_w = env->w;
-		in_h = env->h;
+		int l4;
+		b_in = &env->out.loc_src;
+		b_out = &env->out.loc_dpy;
+		l4 = b_in->w * b_in->h/4; /* size of U or V frame */
+		p_in = NULL;
 	} else {
-		pict_in = (AVPicture *)env->in.frame;
-		c = env->in.context;
-		pix_fmt = c->pix_fmt;
-		in_w = c->width;
-		in_h = c->height;
+		/* copy input format from the decoding context */
+		AVCodecContext *c = env->in.dec_ctx;
+		b_in = &env->in.dec_out;
+                b_in->pix_fmt = c->pix_fmt;
+                b_in->w = c->width;
+                b_in->h = c->height;
+
+		b_out = &env->in.rem_dpy;
+		p_in = (AVPicture *)env->in.frame;
 	}
 	bmp = env->bmp[out];
 	SDL_LockYUVOverlay(bmp);
-	pict.data[0] = bmp->pixels[0];
-	pict.data[1] = bmp->pixels[1];
-	pict.data[2] = bmp->pixels[2];
-	pict.linesize[0] = bmp->pitches[0];
-	pict.linesize[1] = bmp->pitches[1];
-	pict.linesize[2] = bmp->pitches[2];
-
-	if (pict_in == NULL) {	/* raw stream in YUV format, usually from camera */
-		int l4 = w*h/4;	/* size of U or V frame */
-		if (!src) {
-			ast_log(LOG_WARNING, "no buffer for show frame\n");
-		} else {
-			bcopy(src, bmp->pixels[0], 4*l4);
-			bcopy(src + 4*l4, bmp->pixels[1], l4);
-			bcopy(src + 5*l4, bmp->pixels[2], l4);
-		}
-	} else {	/* decode */
-#ifdef OLD_FFMPEG /* XXX img_convert is deprecated */
-		/* env->sdl_ok guarantees that in.frame exists */
-		img_convert(&pict, PIX_FMT_YUV420P, pict_in, pix_fmt, in_w, in_h);
-#else /* XXX replacement */
-		struct SwsContext *convert_ctx;
-
-		convert_ctx = sws_getContext(in_w, in_h, pix_fmt /* input format */,
-			w, h, PIX_FMT_YUV420P /* output format for sdl */,
-			SWS_BICUBIC, NULL, NULL, NULL);
-		if (convert_ctx == NULL) {
-			ast_log(LOG_ERROR, "FFMPEG::convert_cmodel : swscale context initialization failed");
-			return;
-		}
-
-		sws_scale(convert_ctx,
-			pict_in->data, pict_in->linesize,
-			in_w, in_h, /* src slice */
-			pict.data, pict.linesize);
-
-		sws_freeContext(convert_ctx);
-
-#endif /* XXX replacement */
-	}
-	rect.x = w*out;
-	rect.y = 0;
-	rect.w = w;
-	rect.h = h;
+	/* output picture info - this is sdl, YUV420P */
+	p_out.data[0] = bmp->pixels[0];
+	p_out.data[1] = bmp->pixels[1];
+	p_out.data[2] = bmp->pixels[2];
+	p_out.linesize[0] = bmp->pitches[0];
+	p_out.linesize[1] = bmp->pitches[1];
+	p_out.linesize[2] = bmp->pitches[2];
+
+	my_scale(b_in, p_in, b_out, &p_out);
 
 	/* lock to protect access to Xlib by different threads. */
 	ast_mutex_lock(&env->sdl_lock);
-	SDL_DisplayYUVOverlay(bmp, &rect);
+	SDL_DisplayYUVOverlay(bmp, &env->rect[out]);
 	ast_mutex_unlock(&env->sdl_lock);
 	SDL_UnlockYUVOverlay(bmp);
 }
@@ -875,7 +920,7 @@
 	struct video_desc *env = get_video_desc(chan);
 	struct video_in_desc *v = &env->in;
 
-	if (v->context == NULL) {
+	if (v->dec_ctx == NULL) {
 		ast_log(LOG_WARNING, "cannot decode, dropping frame\n");
 		return 0;	/* error */
 	}
@@ -899,7 +944,7 @@
 		 * the payload type entry.
 		 */
 		if (f->subclass & 0x01) {
-			v->buf.used = 0;
+			v->dec_in.used = 0;
 			v->next_seq = f->seqno + 1;	/* wrap at 16 bit */
 			v->discard = 0;
 			ast_log(LOG_WARNING, "out of discard mode, frame %d\n", f->seqno);
@@ -937,28 +982,28 @@
 	 * and also wants 0 as a buffer terminator to prevent trouble.
 	 */
 	need = len + FF_INPUT_BUFFER_PADDING_SIZE;
-	if (v->buf.data == NULL) {
-		v->buf.size = need;
-		v->buf.used = 0;
-		v->buf.data = ast_malloc(v->buf.size);
-	} else if (v->buf.used + need > v->buf.size) {
-		v->buf.size = v->buf.used + need;
-		v->buf.data = ast_realloc(v->buf.data, v->buf.size);
-	}
-	if (v->buf.data == NULL) {
+	if (v->dec_in.data == NULL) {
+		v->dec_in.size = need;
+		v->dec_in.used = 0;
+		v->dec_in.data = ast_malloc(v->dec_in.size);
+	} else if (v->dec_in.used + need > v->dec_in.size) {
+		v->dec_in.size = v->dec_in.used + need;
+		v->dec_in.data = ast_realloc(v->dec_in.data, v->dec_in.size);
+	}
+	if (v->dec_in.data == NULL) {
 		ast_log(LOG_WARNING, "alloc failure for %d, discard\n",
-			v->buf.size);
+			v->dec_in.size);
 		v->discard = 1;
 		return 0;
 	}
-	memcpy(v->buf.data + v->buf.used, data, len);
-	v->buf.used += len;
-	v->buf.data[v->buf.used] = '\0';
+	memcpy(v->dec_in.data + v->dec_in.used, data, len);
+	v->dec_in.used += len;
+	v->dec_in.data[v->dec_in.used] = '\0';
 	if (f->subclass & 0x01) {	// RTP Marker
 		if (decode_video(v)) {
 			show_frame(env, 0);
 			v->completed = 0;
-			v->buf.used = 0;
+			v->dec_in.used = 0;
 		}
 	}
 	return 0;
@@ -975,7 +1020,7 @@
 static struct ast_frame *split_frame(struct video_out_desc *out, int len)
 {
 	struct ast_frame *cur = NULL, *first = NULL;
-	uint8_t *d = out->encbuf.data;
+	uint8_t *d = out->enc_out.data;
 	int l = len; /* size of the current fragment. If 0, must look for a psc */
 	int frags = 0;
 
@@ -1060,9 +1105,9 @@
 {
 	int buflen;
 	struct video_out_desc *v = &env->out;
-	struct fbuf_t *b = &v->encbuf;
-
-	if (!v->buf.data) {
+	struct fbuf_t *b = &v->enc_out;
+
+	if (!v->loc_src.data) {
 		ast_log(LOG_WARNING, "fail, no buffer\n");
 		return NULL;
 	}
@@ -1076,7 +1121,9 @@
 		ast_log(LOG_WARNING, "fail, no encbuf\n");
 		return NULL;
 	}
-	buflen = avcodec_encode_video(v->context, b->data, b->size, v->frame);
+	my_scale(&v->loc_src, NULL, &v->enc_in, NULL);
+
+	buflen = avcodec_encode_video(v->enc_ctx, b->data, b->size, v->frame);
 	return split_frame(v, buflen);
 }
 
@@ -1088,9 +1135,8 @@
  */
 static void *video_thread(void *arg)
 {
-	int i = 0;
 	struct video_desc *env = arg;
-	for (;;i++) {
+	for (;;) {
 		/* XXX 20 times/sec */
 		struct timeval t = { 0, 50000 };
 		struct ast_frame *p, *f;
@@ -1098,7 +1144,7 @@
 		int fd = chan->alertpipe[1];
 
 		if (env->shutdown) {
-			ast_log(LOG_WARNING, "video_thread shutting down%d\n", i);
+			ast_log(LOG_WARNING, "video_thread shutting down\n");
 			break;
 		}
 		/* sleep for a while */
@@ -1127,6 +1173,26 @@
 	return NULL;
 }
 
+/*! initialize the video environment.
+ * From the config file we get some basic parameters, we must
+ * propagate them to all the descriptor for the various video
+ * representations that we handle.
+ */
+static void init_env(struct video_desc *env)
+{
+	struct fbuf_t *x;
+
+	/* Local video chain */
+	x = &(env->out.loc_src);
+	x->pix_fmt = PIX_FMT_YUV420P;	/* default - camera format */
+	x->w = env->w;
+	x->h = env->h;
+	env->out.enc_in = *x;
+	env->out.loc_dpy = *x;
+	/* Remote video chain */
+	env->in.rem_dpy = *x;
+}
+
 /*!
  * The first call to the video code, called by oss_new() or similar.
  * Here we initialize the various components we use, namely SDL for display,
@@ -1137,19 +1203,23 @@
 static void console_video_start(struct video_desc *env,
 	struct ast_channel *owner)
 {
-	int fmt = SDL_IYUV_OVERLAY;	/* YV12 causes flicker in SDL */
-
+	int dpy_fmt = SDL_IYUV_OVERLAY;	/* YV12 causes flicker in SDL */
+	struct fbuf_t *b;
+
+	if (env == NULL)	/* video not initialized */
+		return;
 	if (owner == NULL)	/* nothing to do if we don't have a channel */
 		return;
 	env->owner = owner;
 	bzero(&env->in, sizeof(env->in));
 	bzero(&env->out, sizeof(env->out));
+	init_env(env);
 
 	/*
 	 * Register all codecs supported by the ffmpeg library.
-	 * Doing it once is enough.
+	 * We only need to do it once, but probably doesn't
+	 * harm to do it multiple times.
 	 */
-
 	avcodec_init();
 	avcodec_register_all();
 	if (video_in_init(&env->in, CONSOLE_FORMAT_VIDEO)) {
@@ -1178,13 +1248,25 @@
 		goto no_sdl;
 	}
 	SDL_WM_SetCaption("Asterisk console Video Output", NULL);
-	env->bmp[0] = SDL_CreateYUVOverlay(env->w, env->h, fmt, env->screen);
-	env->bmp[1] = SDL_CreateYUVOverlay(env->w, env->h, fmt, env->screen);
-	if (env->bmp[0] && env->bmp[1]) {
-		ast_mutex_init(&env->sdl_lock);
-		env->sdl_ok = 1;
-	}
-	/* otherwise should release the screen */
+	b = &env->in.rem_dpy;
+	env->bmp[0] = SDL_CreateYUVOverlay(b->w, b->h, dpy_fmt, env->screen);
+	b = &env->out.loc_dpy;
+	env->bmp[1] = SDL_CreateYUVOverlay(env->w, env->h, dpy_fmt, env->screen);
+	if (env->bmp[0] == NULL || env->bmp[1] == NULL) {
+		/* otherwise should release the screen */
+		goto no_sdl;
+	}
+	ast_mutex_init(&env->sdl_lock);
+	/* position of video windows on the output */
+	env->rect[0].x = 0;
+	env->rect[0].y = 0;
+	env->rect[0].w = env->in.rem_dpy.w;
+	env->rect[0].h = env->in.rem_dpy.h;
+	env->rect[1].x = env->in.rem_dpy.w;
+	env->rect[1].y = 0;
+	env->rect[1].w = env->out.loc_dpy.w;
+	env->rect[1].h = env->out.loc_dpy.h;
+	env->sdl_ok = 1;
 
 no_sdl:
 	/*
@@ -1217,4 +1299,53 @@
 		video_out_init(&env->out, CONSOLE_FORMAT_VIDEO);
 	}
 }
+
+/* see chan_oss.c for these macros */
+#ifndef M_START
+#define _UNDO_M_START
+#define M_START(var, val) \
+        const char *__s = var; const char *__val = val;
+#define M_END(x)		x;
+#define M_F(tag, f)		if (!strcasecmp((__s), tag)) { f; } else
+#define M_BOOL(tag, dst)        M_F(tag, (dst) = ast_true(__val) )
+#define M_UINT(tag, dst)        M_F(tag, (dst) = strtoul(__val, NULL, 0) )
+#define M_STR(tag, dst)         M_F(tag, ast_copy_string(dst, __val, sizeof(dst)
+#endif
+
+/*! parse config command for video support */
+static int console_video_config(struct video_desc **penv,
+	const char *var, const char *val)
+{
+	struct video_desc *env;
+	M_START(var, val);
+
+	if (penv == NULL) {
+		ast_log(LOG_WARNING, "bad argument penv=NULL\n");
+		return 1;	/* error */
+	}
+	if (*penv == NULL)
+		*penv = ast_calloc(1, sizeof(struct video_desc));
+	if (*penv == NULL) {
+		ast_log(LOG_WARNING, "fail to allocate video_desc\n");
+		return 1;	/* error */
+	}
+	env = *penv;
+        M_STR("videodevice", env->videodevice)
+        M_UINT("videowidth", env->w)
+        M_UINT("videoheight", env->h)
+        M_UINT("fps", env->fps)
+        M_UINT("bitrate", env->bitrate)
+	M_END(return 1;)	/* the 'nothing found' case */
+	return 0;		/* found something */
+}
+#ifdef _UNDO_M_START
+#undef M_START
+#undef M_END
+#undef M_F
+#undef M_BOOL
+#undef M_UINT
+#undef M_STR
+#undef _UNDO_M_START
+#endif
+
 #endif	/* video support */




More information about the asterisk-commits mailing list