[asterisk-commits] rizzo: branch rizzo/astobj2 r77806 - /team/rizzo/astobj2/channels/chan_oss.c

SVN commits to the Asterisk project asterisk-commits at lists.digium.com
Mon Jul 30 18:00:45 CDT 2007


Author: rizzo
Date: Mon Jul 30 18:00:45 2007
New Revision: 77806

URL: http://svn.digium.com/view/asterisk?view=rev&rev=77806
Log:
split the descriptors for video in, out, data buffers and sdl windows.


Modified:
    team/rizzo/astobj2/channels/chan_oss.c

Modified: team/rizzo/astobj2/channels/chan_oss.c
URL: http://svn.digium.com/view/asterisk/team/rizzo/astobj2/channels/chan_oss.c?view=diff&rev=77806&r1=77805&r2=77806
==============================================================================
--- team/rizzo/astobj2/channels/chan_oss.c (original)
+++ team/rizzo/astobj2/channels/chan_oss.c Mon Jul 30 18:00:45 2007
@@ -330,27 +330,39 @@
 /* Structures for ffmpeg processing */
 /*
  * Information for decoding incoming video stream.
- * We need one of these for each incoming video stream.
- */
-struct video_desc {
+ * We need:
+ * - one descriptor per output stream (camera or file, plus encoding context)
+ * - one descriptor per incoming stream (network data, plus decoding context)
+ * - one descriptor per window (SDL).
+ */
+
+struct dbuf_t {		/* buffers, dynamically allocated */
+	uint8_t	*data;
+	int	size;
+	int	used;
+};
+
+struct video_out_desc {
+};
+struct video_in_desc {
 	AVCodecContext          *context;	/* information about the codec in the stream */
 	AVCodec                 *codec;		/* reference to the codec */
 	AVFrame                 *frame;		/* place to store the frame */
 	AVCodecParserContext    *parser;
-
-	int                     completed;
-	/* buffer for the incoming bitstream */
-	uint8_t                 *data;
-	int			datasize;	/* buffer size */
-	int                     used;	/* bytes used so far */
-
-	SDL_Surface             *screen;
-	int                     initialized;
-	SDL_Overlay             *bmp;
+	int                     completed;	/* probably unnecessary */
+	struct dbuf_t buf;
 	int                     lastrxframe;
 	int                     discard;
 	struct timeval	ts;
 	int received;
+};
+struct video_desc {
+	struct video_in_desc in;
+	struct video_out_desc out;
+
+	SDL_Surface             *screen;
+	int                     initialized;
+	SDL_Overlay             *bmp;
 
 	struct ast_frame *echo;
 
@@ -523,6 +535,64 @@
 	return CODEC_ID_NONE;
 }
 
+static int video_in_uninit(struct video_in_desc *v)
+{
+	if (v->context) {
+		avcodec_close(v->context);
+		av_free(v->context);
+	}
+	if (v->frame)
+		av_free(v->frame);
+	if (v->buf.data)
+		free(v->buf.data);
+	bzero(v, sizeof(*v));
+	return -1;	/* error, in case someone cares */
+}
+
+static int video_in_init(struct video_in_desc *v, uint32_t format)
+{
+	enum CodecID codec;
+
+	v->codec = NULL;
+	v->context = NULL;
+	v->frame = NULL;
+	v->parser = NULL;
+	v->buf.data = NULL;
+	v->completed = 0;
+	v->lastrxframe        = -1;
+
+	v->ts = ast_tvnow();
+	codec = map_video_format(format);
+	ast_log(LOG_WARNING, "init for format 0x%x gives %d\n", format, codec);
+
+	v->codec = avcodec_find_decoder(codec);
+	if (!v->codec) {
+		ast_log(LOG_WARNING, "Unable to find the decoder for format %d\n", codec);
+		return video_in_uninit(v);
+	}
+	/*
+	* Initialize the codec context.
+	*/
+	v->context = avcodec_alloc_context();
+	if (avcodec_open(v->context, v->codec) < 0) {
+		ast_log(LOG_WARNING, "Unable to open the codec context\n");
+		return video_in_uninit(v);
+	}
+
+	v->parser = av_parser_init(codec);
+	if (!v->parser) {
+		ast_log(LOG_WARNING, "Unable to initialize the codec parser\n");
+		return video_in_uninit(v);
+	}
+
+	v->frame = avcodec_alloc_frame();
+	if (!v->frame) {
+		ast_log(LOG_WARNING, "Unable to allocate the video frame\n");
+		return video_in_uninit(v);
+	}
+	return 0;	/* ok */
+}
+
 /*
  * It initializes the video_desc struct which contains all the structures
  * needed by ffmpeg and SDL libraries.
@@ -536,24 +606,9 @@
  */
 static void ffmpeg_init(struct video_desc *env, uint32_t format)
 {
-	enum CodecID codec;
-
-	env->codec = NULL;
-	env->context = NULL;
-	env->frame = NULL;
-	env->parser = NULL;
-	env->data = NULL;	/* used = size = 0 */
-	env->used = 0;
-	env->datasize = 0;
-	env->completed = 0;
 	env->screen = NULL;
 	env->initialized = 0;
 	env->bmp = NULL;
-	env->lastrxframe        = -1;
-
-	env->ts = ast_tvnow();
-	codec = map_video_format(format);
-	ast_log(LOG_WARNING, "init for format 0x%x gives %d\n", format, codec);
 	avcodec_init();
 	/*
 	 * Register all codecs supported by the ffmpeg library.
@@ -561,36 +616,6 @@
 	 */
 	avcodec_register_all();
 
-	/*
-	 * Searching for the H.263+ decoder; in the decoding process
-	 * the H.263 decoder in compatible with H.263+ stream.
-	 */
-	env->codec = avcodec_find_decoder(codec);
-	if(!env->codec) {
-		ast_log(LOG_WARNING, "Unable to find the decoder for format %d\n", codec);
-		return;
-	}
-
-	/*
-	* Initialize the codec context.
-	*/
-	env->context = avcodec_alloc_context();
-	if(avcodec_open(env->context, env->codec) < 0) {
-		ast_log(LOG_WARNING, "Unable to open the codec context\n");
-		return;
-	}
-
-	env->parser = av_parser_init(codec);
-	if(!env->parser) {
-		ast_log(LOG_WARNING, "Unable to initialize the H.263 codec parser\n");
-		return;
-	}
-
-	env->frame = avcodec_alloc_frame();
-	if(!env->frame) {
-		ast_log(LOG_WARNING, "Unable to allocate the video frame\n");
-		return;
-	}
 
 	// SDL specific
 	if(SDL_Init(SDL_INIT_VIDEO)) {
@@ -598,6 +623,9 @@
 		return;
 	}
 
+	if (video_in_init(&env->in, format))
+		return;
+
 	env->initialized = 1;
 }
 
@@ -616,14 +644,7 @@
 		i++;
 	}
 	ast_log(LOG_WARNING, "ffmpeg_uninit drop %d frames\n", i);
-	if(env->context) {
-		avcodec_close(env->context);
-		av_free(env->context);
-	}
-	if(env->frame)
-		av_free(env->frame);
-	if(env->data)
-		free(env->data);
+	video_in_uninit(&env->in);
 	if(env->bmp)
 		SDL_FreeYUVOverlay(env->bmp);
 	SDL_Quit();
@@ -719,19 +740,19 @@
  * The av_parser_parse should merge a randomly choped up stream into proper frames.
  * After that, if we have a valid frame, we decode it until the entire frame is processed.
  */
-static int decode_video(struct video_desc *env)
-{
-	uint8_t *src = env->data;
-	int srclen = env->used;
+static int decode_video(struct video_in_desc *v)
+{
+	uint8_t *src = v->buf.data;
+	int srclen = v->buf.used;
 
 	if (!srclen)
 		return 0;
 	while (srclen) {
 		uint8_t *data;
 		int datalen;
-		int ret = av_parser_parse(env->parser, env->context, &data, &datalen, src, srclen, 0, 0);
+		int ret = av_parser_parse(v->parser, v->context, &data, &datalen, src, srclen, 0, 0);
 		if (datalen) {
-			ret = avcodec_decode_video(env->context, env->frame, &(env->completed), data, datalen);
+			ret = avcodec_decode_video(v->context, v->frame, &(v->completed), data, datalen);
 			if(ret < 0) {
 				ast_log(LOG_NOTICE, "Errore nella decodifica\n");
 				return 0;
@@ -739,7 +760,7 @@
 			src += ret;
 			srclen -= ret;
 		}
-		// ast_log(LOG_WARNING, "in %d ret %d/%d outlen %d complete %d\n", env->used, srclen, ret, datalen, env->completed);
+		// ast_log(LOG_WARNING, "in %d ret %d/%d outlen %d complete %d\n", v->buf.used, srclen, ret, datalen, v->completed);
 	}
 	return 1;
 }
@@ -755,11 +776,12 @@
 {
 	AVPicture pict;
 	SDL_Rect rect;
+	AVCodecContext *c = env->in.context;	/* shorthand */
 
 	if (!env->initialized)
 		return;
 	if(env->screen == NULL) {
-		env->screen = SDL_SetVideoMode(env->context->width, env->context->height, 0, 0);
+		env->screen = SDL_SetVideoMode(c->width, c->height, 0, 0);
 		if(!env->screen) {
 			ast_log(LOG_ERROR, "SDL: could not set video mode - exiting\n");
 			return;
@@ -768,7 +790,7 @@
 	}
 
 	if(!env->bmp)
-		env->bmp = SDL_CreateYUVOverlay(env->context->width, env->context->height,
+		env->bmp = SDL_CreateYUVOverlay(c->width, c->height,
 			SDL_YV12_OVERLAY, env->screen);
 
 	SDL_LockYUVOverlay(env->bmp);
@@ -781,19 +803,15 @@
 
 #if 0 /* XXX img_convert is deprecated */
 	img_convert(&pict, PIX_FMT_YUV420P,
-		(AVPicture *)env->frame, env->context->pix_fmt,
-		env->context->width, env->context->height);
+		(AVPicture *)env->frame, c->pix_fmt,
+		c->width, c->height);
 #else /* XXX replacement */
 	{
 		struct SwsContext *convert_ctx;
-		AVPicture *pict_in = (AVPicture *)env->frame;
-
-		convert_ctx = sws_getContext(env->context->width,
-			env->context->height,
-			env->context->pix_fmt /* input format */,
-			env->context->width,
-			env->context->height,
-			PIX_FMT_YUV420P /* output format ? */,
+		AVPicture *pict_in = (AVPicture *)env->in.frame;
+
+		convert_ctx = sws_getContext(c->width, c->height, c->pix_fmt /* input format */,
+			c->width, c->height, PIX_FMT_YUV420P /* output format ? */,
 			SWS_BICUBIC, NULL, NULL, NULL);
 		if (convert_ctx == NULL) {
 			ast_log(LOG_ERROR, "FFMPEG::convert_cmodel : swscale context initialization failed");
@@ -802,7 +820,7 @@
 
 		sws_scale(convert_ctx,
 			pict_in->data, pict_in->linesize,
-			env->context->width, env->context->height,
+			c->width, c->height,
 			pict.data, pict.linesize);
 
 		sws_freeContext(convert_ctx);
@@ -814,12 +832,13 @@
 #if 0	/* more testing, overlay the received image with the local picture */
 	ast_log(LOG_WARNING, "show_frame: linesize %d %d %d\n", pict.linesize[0], pict.linesize[1], pict.linesize[2]);
 	if (env->webcam_imgbuf) {
-		bcopy(env->webcam_imgbuf, pict.data[0], env->context->width*env->context->height);
+		bcopy(env->webcam_imgbuf, pict.data[0], c->width*c->height);
 	}
 #endif
-	rect.x = 0; rect.y = 0;
-	rect.w = env->context->width;
-	rect.h = env->context->height;
+	rect.x = 0;
+	rect.y = 0;
+	rect.w = c->width;
+	rect.h = c->height;
 	SDL_DisplayYUVOverlay(env->bmp, &rect);
 }
 
@@ -853,13 +872,13 @@
 		*fp = f1;
 	}
 
-	i = ast_tvdiff_ms(now, env->ts);
+	i = ast_tvdiff_ms(now, env->in.ts);
 	if (i > 1000) {
-		ast_log(LOG_WARNING, "received %d video frames in %d ms\n", env->received, i);
-		env->received = 0;
-		env->ts = now;
-	}
-	env->received++;
+		ast_log(LOG_WARNING, "received %d video frames in %d ms\n", env->in.received, i);
+		env->in.received = 0;
+		env->in.ts = now;
+	}
+	env->in.received++;
 #if defined(DROP_PACKETS) && DROP_PACKETS > 0
 	/*
 	* Fragment of code to simulate lost/delayed packets
@@ -876,13 +895,13 @@
 	 * subclass. This is slightly annoying as it goes to overwrite
 	 * the payload type entry.
 	 */
-	if(env->discard) {
+	if(env->in.discard) {
 		if(f->subclass & 0x01) {
-			free(env->data);
-			env->data = NULL;
-			env->used = 0;
-			env->lastrxframe = f->seqno;
-			env->discard = 0;
+			free(env->in.buf.data);
+			env->in.buf.data = NULL;
+			env->in.buf.used = 0;
+			env->in.lastrxframe = f->seqno;
+			env->in.discard = 0;
 		}
 		return 0;
 	}
@@ -892,8 +911,8 @@
 	* We can handle the circular seqno with the following operation
 	* (seqno is a 16 bits number)
 	*/
-	if((env->lastrxframe+1)%0x10000 != f->seqno && env->lastrxframe != -1) {
-		env->discard = 1;
+	if((env->in.lastrxframe+1)%0x10000 != f->seqno && env->in.lastrxframe != -1) {
+		env->in.discard = 1;
 		return 0;
 	}
 
@@ -903,24 +922,24 @@
 	/* allocate buffer as we see fit. ffmpeg wants an extra FF_INPUT_BUFFER_PADDING_SIZE
 	 * and a 0 as a buffer terminator to prevent trouble.
 	 */
-	if(env->data == NULL) {
-		env->datasize = len + FF_INPUT_BUFFER_PADDING_SIZE;
-		env->used = 0;
-		env->data = malloc(len);
-	} else if (env->used + len + FF_INPUT_BUFFER_PADDING_SIZE> env->datasize) {
-		env->datasize = env->used + len + FF_INPUT_BUFFER_PADDING_SIZE;
-		env->data = realloc(env->data, env->datasize);
-	}
-	memcpy(env->data+env->used, data, len);
-	env->used += len;
-	env->data[env->used] = '\0';
+	if(env->in.buf.data == NULL) {
+		env->in.buf.size = len + FF_INPUT_BUFFER_PADDING_SIZE;
+		env->in.buf.used = 0;
+		env->in.buf.data = malloc(len);
+	} else if (env->in.buf.used + len + FF_INPUT_BUFFER_PADDING_SIZE> env->in.buf.size) {
+		env->in.buf.size = env->in.buf.used + len + FF_INPUT_BUFFER_PADDING_SIZE;
+		env->in.buf.data = realloc(env->in.buf.data, env->in.buf.size);
+	}
+	memcpy(env->in.buf.data+env->in.buf.used, data, len);
+	env->in.buf.used += len;
+	env->in.buf.data[env->in.buf.used] = '\0';
 	if(f->subclass & 0x01) // RTP Marker
-		if(decode_video(env)) {
+		if(decode_video(&env->in)) {
 			show_frame(env);
-			env->completed = 0;
-			env->used = 0;
-		}
-	env->lastrxframe = f->seqno;
+			env->in.completed = 0;
+			env->in.buf.used = 0;
+		}
+	env->in.lastrxframe = f->seqno;
 
 	return 0;
 }




More information about the asterisk-commits mailing list