[asterisk-commits] rizzo: branch rizzo/astobj2 r77153 - /team/rizzo/astobj2/channels/chan_oss.c

SVN commits to the Asterisk project asterisk-commits at lists.digium.com
Wed Jul 25 16:26:23 CDT 2007


Author: rizzo
Date: Wed Jul 25 16:26:23 2007
New Revision: 77153

URL: http://svn.digium.com/view/asterisk?view=rev&rev=77153
Log:
save the version that can render incoming video using sdl and ffmpeg.


Modified:
    team/rizzo/astobj2/channels/chan_oss.c

Modified: team/rizzo/astobj2/channels/chan_oss.c
URL: http://svn.digium.com/view/asterisk/team/rizzo/astobj2/channels/chan_oss.c?view=diff&rev=77153&r1=77152&r2=77153
==============================================================================
--- team/rizzo/astobj2/channels/chan_oss.c (original)
+++ team/rizzo/astobj2/channels/chan_oss.c Wed Jul 25 16:26:23 2007
@@ -37,6 +37,13 @@
  ***/
 
 #include "asterisk.h"
+
+/*
+ * experimental support to decode a video session.
+ */
+//#define DROP_PACKETS	5	// if set, simulate this percentage of lost video packets
+#define HAVE_SDL	1
+#define HAVE_FFMPEG	1
 
 ASTERISK_FILE_VERSION(__FILE__, "$Revision$")
 
@@ -75,6 +82,14 @@
 #include "asterisk/stringfields.h"
 #include "asterisk/abstract_jb.h"
 #include "asterisk/musiconhold.h"
+#include "asterisk/app.h"
+
+#if HAVE_FFMPEG
+#include <ffmpeg/avcodec.h>
+#endif
+#if HAVE_SDL
+#include <SDL/SDL.h>
+#endif
 
 /* ringtones we use */
 #include "busy.h"
@@ -285,7 +300,385 @@
 
 static int oss_debug;
 
+#if HAVE_FFMPEG && HAVE_SDL
 /*
+ * In order to decode video you need the following patch to the
+ * main Makefile:
+
+@@ -269,6 +273,11 @@
+   SOLINK=-shared -fpic -L/usr/local/ssl/lib
+ endif
+ 
++# GCC configuration flags for SDL library
++ASTCFLAGS+=`sdl-config --cflags`
++# Add library for ffmpeg and SDL lib.
++SOLINK+=-lavcodec -lz -lm -g `sdl-config --libs`
++
+ # This is used when generating the doxygen documentation
+ ifneq ($(DOT),:)
+   HAVEDOT=yes
+
+Then you need to add to sip.conf:
+	[general](+)
+		allow=h263p
+
+and this one to main/rtp.c:
+
+@@ -1509,5 +1511,6 @@
+        [31] = {1, AST_FORMAT_H261},
+        [34] = {1, AST_FORMAT_H263},
+        [97] = {1, AST_FORMAT_ILBC},
++       [98] = {1, AST_FORMAT_H263_PLUS},
+        [99] = {1, AST_FORMAT_H264},
+        [101] = {0, AST_RTP_DTMF},
+
+ */
+
+/* Structures for ffmpeg processing */
+/*
+ * Information for decoding incoming video stream.
+ * We need one of these for each incoming video stream.
+ */
+struct video_desc {
+	AVCodecContext          *context;
+	AVCodec                 *codec;
+	AVFrame                 *frame;
+	AVCodecParserContext    *parser;
+	int                     completed;
+	uint8_t                 *data;
+	int                     datalen;
+	SDL_Surface             *screen;
+	int                     initialized;
+	SDL_Overlay             *bmp;
+	int                     lastrxframe;
+	int                     discard;
+};
+
+/* Helper function to process incoming video.
+ * For each incoming video call invoke ffmpeg_init() to intialize
+ * the decoding structure then incoming video frames are processed
+ * by write_video() which in turn calls pre_process_data(), to extract
+ * the bitstream; accumulates data into a buffer within video_desc. When
+ * a frame is complete (determined by the marker bit in the RTP header)
+ * call decode_video() to decoding and if it successful call show_frame()
+ * to display the frame.
+ *
+ */
+/* Initialize the decoding structure */
+static void ffmpeg_init(struct video_desc *);
+/* Uninitialize the decoding structure */
+static void ffmpeg_uninit(struct video_desc *);
+/* Clean the bitstream in the RTP payload */
+static uint8_t *pre_process_data(uint8_t *, int *);
+/* Decode video frame once completed */
+static int decode_video(struct video_desc *);
+/* Dispaly decoded frame */
+static void show_frame(struct video_desc *);
+
+static struct video_desc *get_video_desc(struct ast_channel *c);
+
+/* Macros used as a wrapper around the actual video format we want to use */
+#define AST_FORMAT_CUSTOM (AST_FORMAT_H263_PLUS)
+#define CODEC_ID_CUSTOM CODEC_ID_H263
+static int write_video(struct ast_channel *chan, struct ast_frame *f);
+
+/*
+ * It initializes the video_desc struct which contains all the structures
+ * needed by ffmpeg and SDL libraries.
+ * - Registering of all codecs supported by the ffmpeg.
+ * - Searching for H.263+ decoder (H.263 decoder can decode H.263+ stream).
+ * - Allocation and initialization of codec context.
+ * - Initialization of codec parser (it should be used
+ *     to reconstruct the entire bitstream from a fragmented stream)
+ * - Allocation of a new frame
+ * - Initializzation of the SDL environment to support the video
+ */
+static void ffmpeg_init(struct video_desc *env)
+{
+	env->codec              = NULL;
+	env->context            = NULL;
+	env->frame              = NULL;
+	env->parser             = NULL;
+	env->data               = NULL;
+	env->completed          = 0;
+	env->datalen            = 0;
+	env->screen             = NULL;
+	env->initialized        = 0;
+	env->bmp                = NULL;
+	env->lastrxframe        = -1;
+
+	avcodec_init();
+	/*
+	 * Register all codecs supported by the ffmpeg library.
+	 */
+	avcodec_register_all();
+
+	/*
+	 * Searching for the H.263+ decoder; in the decoding process
+	 * the H.263 decoder in compatible with H.263+ stream.
+	 */
+	env->codec = avcodec_find_decoder(CODEC_ID_H263);
+	if(!env->codec) {
+		ast_log(LOG_WARNING, "Unable to find the H.263 decoder\n");;
+		return;
+	}
+
+	/*
+	* Initialize the codec context.
+	*/
+	env->context = avcodec_alloc_context();
+	if(avcodec_open(env->context, env->codec) < 0) {
+		ast_log(LOG_WARNING, "Unable to open the codec context\n");
+		return;
+	}
+
+	env->parser = av_parser_init(CODEC_ID_H263);
+	if(!env->parser) {
+		ast_log(LOG_WARNING, "Unable to initialize the H.263 codec parser\n");
+		return;
+	}
+
+	env->frame = avcodec_alloc_frame();
+	if(!env->frame) {
+		ast_log(LOG_WARNING, "Unable to allocate the video frame\n");
+		return;
+	}
+
+	// SDL specific
+	if(SDL_Init(SDL_INIT_VIDEO)) {
+		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
+		return;
+	}
+
+	env->initialized = 1;
+}
+
+/*
+ * Freeing all memory used and uninitialize
+ * the ffmpeg and SDL environments.
+ */
+static void ffmpeg_uninit(struct video_desc *env)
+{
+	if (!env) {
+		ast_log(LOG_WARNING, "ffmpeg_uninit on null\n");
+		return;
+	}
+	if(env->context) {
+		avcodec_close(env->context);
+		av_free(env->context);
+	}
+	if(env->frame)
+		av_free(env->frame);
+	if(env->data)
+		free(env->data);
+	if(env->bmp)
+		SDL_FreeYUVOverlay(env->bmp);
+	SDL_Quit();
+	bzero(env, sizeof(struct video_desc));
+	env->initialized = 0;
+}
+
+#define MAKE_MASK(bits)                ( (1<<(bits)) -1 )
+
+/*
+ * Get the P flag from the H.263+ header from the RTP payload (see RFC 2429).
+ */
+static inline unsigned int rfc2429_get_P(const uint8_t *header){
+	return (header[0]>>2) & 0x1;
+}
+
+/*
+ * Get the PLEN variable from the H.263+ header from the RTP payload (see RFC 2429).
+ */
+static inline unsigned int rfc2429_get_PLEN(const uint8_t *header){
+	unsigned short *p=(unsigned short*)header;
+return (ntohs(p[0])>>3) & MAKE_MASK(6);
+}
+
+/*
+ * It skips the extra header in the bitstream and constructs a valid
+ * H.263+ bitstream start code (see RFC 2429).
+ */
+static uint8_t *pre_process_data(uint8_t *data, int *len)
+{
+	int PLEN;
+	int P;
+
+	if(data == NULL)
+		return NULL;
+	if(*len < 2)
+		return NULL;
+
+	PLEN = rfc2429_get_PLEN(data);
+	P = rfc2429_get_P(data);
+
+	if(PLEN > 0) {
+		data += PLEN;
+		(*len) -= PLEN;
+	}
+	if(P)
+		data[0] = data[1] = 0;
+	else {
+		data += 2;
+		(*len) -= 2;
+	}
+
+	return data;
+}
+
+/*
+ * It decodes a valid H.263 frame.
+ * The av_parser_parse should merge a randomly choped up stream into proper frames.
+ * After that, if we have a valid frame, we decode it until the entire frame is processed.
+ */
+static int decode_video(struct video_desc *env)
+{
+	uint8_t *aux = env->data;
+	int len = env->datalen;
+	int ret;
+	uint8_t *data;
+	int datalen;
+
+	while(len) {
+		ret = av_parser_parse(env->parser, env->context, &data, &datalen, aux, len, 0, 0);
+		if(datalen) {
+			ret = avcodec_decode_video(env->context, env->frame, &(env->completed), data, datalen);
+			if(ret < 0) {
+				ast_log(LOG_NOTICE, "Errore nella decodifica\n");
+				return 0;
+			}
+			aux += ret;
+			len -= ret;
+		}
+	}
+
+	return 1;
+}
+
+/*
+ * It displays the decoded video frame using the SDL library.
+ * - Set the video mode to use the resolution specified by the codec context
+ * - Create a YUV Overlay to copy into it the decoded frame
+ * - After the decoded frame is copied into the overlay, we display it
+ * TODO: change the call img_convert(): it is deprecated.
+ */
+static void show_frame(struct video_desc *env)
+{
+	AVPicture pict;
+	SDL_Rect rect;
+
+	if(env->screen == NULL) {
+		env->screen = SDL_SetVideoMode(env->context->width, env->context->height, 0, 0);
+		if(!env->screen) {
+			ast_log(LOG_ERROR, "SDL: could not set video mode - exiting\n");
+			return;
+		}
+		SDL_WM_SetCaption("Asterisk console Video Output", NULL);
+	}
+
+	if(!env->bmp)
+		env->bmp = SDL_CreateYUVOverlay(env->context->width, env->context->height,
+			SDL_YV12_OVERLAY, env->screen);
+
+ast_log(LOG_WARNING, "locked sdl\n");
+	SDL_LockYUVOverlay(env->bmp);
+	pict.data[0] = env->bmp->pixels[0];
+	pict.data[1] = env->bmp->pixels[2];
+	pict.data[2] = env->bmp->pixels[1];
+	pict.linesize[0] = env->bmp->pitches[0];
+	pict.linesize[1] = env->bmp->pitches[2];
+	pict.linesize[2] = env->bmp->pitches[1];
+
+	img_convert(&pict, PIX_FMT_YUV420P,
+		(AVPicture *)env->frame, env->context->pix_fmt,
+		env->context->width, env->context->height);
+	SDL_UnlockYUVOverlay(env->bmp);
+ast_log(LOG_WARNING, "unlocked sdl\n");
+
+	rect.x = 0; rect.y = 0;
+	rect.w = env->context->width;
+	rect.h = env->context->height;
+	SDL_DisplayYUVOverlay(env->bmp, &rect);
+}
+
+/*
+ * This function is called (by asterisk) for each video fragment that needs to be processed.
+ * We need to recontruct the entire video before we can decode it.
+ * After a video fragment is received we have to:
+ * - clean the bitstream with pre_process_data()
+ * - append the bitstream in a buffer
+ * - if the fragment is the last (RTP Marker) we decode it with decode_video()
+ * - after the decoding is completed we display the decoded frame with show_frame()
+ */
+static int write_video(struct ast_channel *chan, struct ast_frame *f)
+{
+	uint8_t *data;
+	int len;
+	struct video_desc *env = get_video_desc(chan);
+
+	if(!env->initialized)
+		return -1;	/* error */
+
+#if defined(DROP_PACKETS) && DROP_PACKETS > 0
+	/*
+	* Fragment of code to simulate lost/delayed packets
+	*/
+	if((random() % 10000) <= 100*DROP_PACKETS) {
+		ast_log(LOG_NOTICE, "Packet lost [%d]\n", f->seqno);
+		return 0;
+	}
+#endif
+	/*
+	* If there is the discard flag, every packet must be discarded.
+	* When a marked packet arrive we can restart the decoding.
+	*/
+	if(env->discard) {
+		if(f->subclass & 0x01) {
+			free(env->data);
+			env->data = NULL;
+			env->datalen = 0;
+			env->lastrxframe = f->seqno;
+			env->discard = 0;
+		}
+		return 0;
+	}
+
+	/*
+	* Only ordered fragment will be accepted.
+	* We can handle the circular seqno with the following operation
+	* (seqno is a 16 bits number)
+	*/
+	if((env->lastrxframe+1)%0x10000 != f->seqno && env->lastrxframe != -1) {
+		env->discard = 1;
+		return 0;
+	}
+
+	len = f->datalen;
+	data = pre_process_data(f->data, &len);
+	if(env->data == NULL)
+		env->data = malloc(len);
+	else
+		env->data = realloc(env->data, env->datalen+len);
+	memcpy(env->data+env->datalen, data, len);
+	env->datalen += len;
+	if(f->subclass & 0x01) // RTP Marker
+		if(decode_video(env)) {
+			show_frame(env);
+			env->completed = 0;
+			free(env->data);
+			env->data = NULL;
+			env->datalen = 0;
+		}
+	env->lastrxframe = f->seqno;
+
+	return 0;
+}
+
+#else
+#define	AST_FORMAT_CUSTOM 0
+#endif	/* FFMPEG */
+
+/*!
  * Each sound is made of 'datalen' samples of sound, repeated as needed to
  * generate 'samplen' samples of data, then followed by 'silencelen' samples
  * of silence. The loop is repeated if 'repeat' is set.
@@ -310,8 +703,9 @@
 };
 
 
-/*
- * descriptor for one of our channels.
+/*!
+ * \brief descriptor for one of our channels.
+ *
  * There is one used for 'default' values (from the [general] entry in
  * the configuration file), and then one instance for each device
  * (the default is cloned from [general], others are only created
@@ -321,45 +715,45 @@
 	struct chan_oss_pvt *next;
 
 	char *name;
-	/*
+	/*!
 	 * cursound indicates which in struct sound we play. -1 means nothing,
 	 * any other value is a valid sound, in which case sampsent indicates
 	 * the next sample to send in [0..samplen + silencelen]
 	 * nosound is set to disable the audio data from the channel
 	 * (so we can play the tones etc.).
 	 */
-	int sndcmd[2];				/* Sound command pipe */
-	int cursound;				/* index of sound to send */
-	int sampsent;				/* # of sound samples sent  */
-	int nosound;				/* set to block audio from the PBX */
-
-	int total_blocks;			/* total blocks in the output device */
+	int sndcmd[2];				/*!< Sound command pipe */
+	int cursound;				/*!< index of sound to send */
+	int sampsent;				/*!< # of sound samples sent  */
+	int nosound;				/*!< set to block audio from the PBX */
+
+	int total_blocks;			/*!< total blocks in the output device */
 	int sounddev;
 	enum { M_UNSET, M_FULL, M_READ, M_WRITE } duplex;
 	int autoanswer;
 	int autohangup;
 	int hookstate;
-	char *mixer_cmd;			/* initial command to issue to the mixer */
-	unsigned int queuesize;		/* max fragments in queue */
-	unsigned int frags;			/* parameter for SETFRAGMENT */
-
-	int warned;					/* various flags used for warnings */
+	char *mixer_cmd;			/*!< initial command to issue to the mixer */
+	unsigned int queuesize;		/*!< max fragments in queue */
+	unsigned int frags;			/*!< parameter for SETFRAGMENT */
+
+	int warned;					/*!< various flags used for warnings */
 #define WARN_used_blocks	1
 #define WARN_speed		2
 #define WARN_frag		4
-	int w_errors;				/* overfull in the write path */
+	int w_errors;				/*!< overfull in the write path */
 	struct timeval lastopen;
 
 	int overridecontext;
 	int mute;
 
-	/* boost support. BOOST_SCALE * 10 ^(BOOST_MAX/20) must
-	 * be representable in 16 bits to avoid overflows.
+	/*! boost support. BOOST_SCALE * 10 ^(BOOST_MAX/20) must
+	 *  be representable in 16 bits to avoid overflows.
 	 */
 #define	BOOST_SCALE	(1<<9)
-#define	BOOST_MAX	40			/* slightly less than 7 bits */
-	int boost;					/* input boost, scaled by BOOST_SCALE */
-	char device[64];			/* device to open */
+#define	BOOST_MAX	40			/*!< slightly less than 7 bits */
+	int boost;					/*!< input boost, scaled by BOOST_SCALE */
+	char device[64];			/*!< device to open */
 
 	pthread_t sthread;
 
@@ -371,16 +765,28 @@
 	char cid_num[256];			/*XXX */
 	char mohinterpret[MAX_MUSICCLASS];
 
-	/* buffers used in oss_write */
+	/*! buffers used in oss_write */
 	char oss_write_buf[FRAME_SIZE * 2];
 	int oss_write_dst;
-	/* buffers used in oss_read - AST_FRIENDLY_OFFSET space for headers
-	 * plus enough room for a full frame
+	/*! buffers used in oss_read - AST_FRIENDLY_OFFSET space for headers
+	 *  plus enough room for a full frame
 	 */
 	char oss_read_buf[FRAME_SIZE * 2 + AST_FRIENDLY_OFFSET];
-	int readpos;				/* read position above */
-	struct ast_frame read_f;	/* returned by oss_read */
+	int readpos;				/*!< read position above */
+	struct ast_frame read_f;	/*!< returned by oss_read */
+
+#if HAVE_FFMPEG
+	struct video_desc env;
+#endif
 };
+
+#if HAVE_FFMPEG
+static struct video_desc *get_video_desc(struct ast_channel *c)
+{
+	struct chan_oss_pvt *o = c->tech_pvt;
+	return o ? &(o->env) : NULL;
+}
+#endif
 
 static struct chan_oss_pvt oss_default = {
 	.cursound = -1,
@@ -397,14 +803,14 @@
 	.boost = BOOST_SCALE,
 };
 
-static char *oss_active;	 /* the active device */
+static char *oss_active;	 /*!< the active device */
 
 static int setformat(struct chan_oss_pvt *o, int mode);
 
 static struct ast_channel *oss_request(const char *type, int format, void *data
 , int *cause);
 static int oss_digit_begin(struct ast_channel *c, char digit);
-static int oss_digit_end(struct ast_channel *c, char digit);
+static int oss_digit_end(struct ast_channel *c, char digit, unsigned int duration);
 static int oss_text(struct ast_channel *c, const char *text);
 static int oss_hangup(struct ast_channel *c);
 static int oss_answer(struct ast_channel *c);
@@ -418,7 +824,10 @@
 static const struct ast_channel_tech oss_tech = {
 	.type = "Console",
 	.description = tdesc,
-	.capabilities = AST_FORMAT_SLINEAR,
+	/* Format that we need to process.
+	 * This option is overriden by the configuration file
+	 */
+	.capabilities = AST_FORMAT_SLINEAR | AST_FORMAT_CUSTOM,
 	.requester = oss_request,
 	.send_digit_begin = oss_digit_begin,
 	.send_digit_end = oss_digit_end,
@@ -428,12 +837,14 @@
 	.read = oss_read,
 	.call = oss_call,
 	.write = oss_write,
+	/* We need this to declare the capabilities to process video frame */
+	.write_video = write_video,
 	.indicate = oss_indicate,
 	.fixup = oss_fixup,
 };
 
-/*
- * returns a pointer to the descriptor with the given name
+/*!
+ * \brief returns a pointer to the descriptor with the given name
  */
 static struct chan_oss_pvt *find_desc(char *dev)
 {
@@ -450,14 +861,16 @@
 	return o;
 }
 
-/*
- * split a string in extension-context, returns pointers to malloc'ed
- * strings.
+/* !
+ * \brief split a string in extension-context, returns pointers to malloc'ed
+ *        strings.
+ *
  * If we do not have 'overridecontext' then the last @ is considered as
  * a context separator, and the context is overridden.
  * This is usually not very necessary as you can play with the dialplan,
  * and it is nice not to need it because you have '@' in SIP addresses.
- * Return value is the buffer address.
+ *
+ * \return the buffer address.
  */
 static char *ast_ext_ctx(const char *src, char **ext, char **ctx)
 {
@@ -484,8 +897,8 @@
 	return *ext;
 }
 
-/*
- * Returns the number of blocks used in the audio output channel
+/*!
+ * \brief Returns the number of blocks used in the audio output channel
  */
 static int used_blocks(struct chan_oss_pvt *o)
 {
@@ -508,7 +921,7 @@
 	return o->total_blocks - info.fragments;
 }
 
-/* Write an exactly FRAME_SIZE sized frame */
+/*! Write an exactly FRAME_SIZE sized frame */
 static int soundcard_writeframe(struct chan_oss_pvt *o, short *data)
 {
 	int res;
@@ -533,8 +946,9 @@
 	return write(o->sounddev, (void *)data, FRAME_SIZE * 2);
 }
 
-/*
- * Handler for 'sound writable' events from the sound thread.
+/*!
+ * \brief Handler for 'sound writable' events from the sound thread.
+ *
  * Builds a frame from the high level description of the sounds,
  * and passes it to the audio device.
  * The actual sound is made of 1 or more sequences of sound samples
@@ -661,7 +1075,7 @@
 	return NULL;				/* Never reached */
 }
 
-/*
+/*!
  * reset and close the device if opened,
  * then open and initialize it in the desired mode,
  * trigger reads and writes so we can start using it.
@@ -771,10 +1185,11 @@
 	return 0;
 }
 
-static int oss_digit_end(struct ast_channel *c, char digit)
+static int oss_digit_end(struct ast_channel *c, char digit, unsigned int duration)
 {
 	/* no better use for received digits than print them */
-	ast_verbose(" << Console Received digit %c >> \n", digit);
+	ast_verbose(" << Console Received digit %c of duration %u ms >> \n", 
+		digit, duration);
 	return 0;
 }
 
@@ -785,23 +1200,39 @@
 	return 0;
 }
 
-/* Play ringtone 'x' on device 'o' */
+/*! \brief Play ringtone 'x' on device 'o' */
 static void ring(struct chan_oss_pvt *o, int x)
 {
 	write(o->sndcmd[1], &x, sizeof(x));
 }
 
 
-/*
- * handler for incoming calls. Either autoanswer, or start ringing
+/*!
+ * \brief handler for incoming calls. Either autoanswer, or start ringing
  */
 static int oss_call(struct ast_channel *c, char *dest, int timeout)
 {
 	struct chan_oss_pvt *o = c->tech_pvt;
 	struct ast_frame f = { 0, };
+	AST_DECLARE_APP_ARGS(args,
+		AST_APP_ARG(name);
+		AST_APP_ARG(flags);
+	);
+	char *parse = ast_strdupa(dest);
+
+	AST_NONSTANDARD_APP_ARGS(args, parse, '/');
 
 	ast_verbose(" << Call to device '%s' dnid '%s' rdnis '%s' on console from '%s' <%s> >>\n", dest, c->cid.cid_dnid, c->cid.cid_rdnis, c->cid.cid_name, c->cid.cid_num);
-	if (o->autoanswer) {
+	if (!ast_strlen_zero(args.flags) && strcasecmp(args.flags, "answer") == 0) {
+		f.frametype = AST_FRAME_CONTROL;
+		f.subclass = AST_CONTROL_ANSWER;
+		ast_queue_frame(c, &f);
+	} else if (!ast_strlen_zero(args.flags) && strcasecmp(args.flags, "noanswer") == 0) {
+		f.frametype = AST_FRAME_CONTROL;
+		f.subclass = AST_CONTROL_RINGING;
+		ast_queue_frame(c, &f);
+		ring(o, AST_CONTROL_RING);
+	} else if (o->autoanswer) {
 		ast_verbose(" << Auto-answered >> \n");
 		f.frametype = AST_FRAME_CONTROL;
 		f.subclass = AST_CONTROL_ANSWER;
@@ -816,8 +1247,8 @@
 	return 0;
 }
 
-/*
- * remote side answered the phone
+/*!
+ * \brief remote side answered the phone
  */
 static int oss_answer(struct ast_channel *c)
 {
@@ -843,6 +1274,10 @@
 	c->tech_pvt = NULL;
 	o->owner = NULL;
 	ast_verbose(" << Hangup on console >> \n");
+#if HAVE_FFMPEG
+	ffmpeg_uninit(&o->env);
+#endif
+	ast_module_unref(ast_module_info->self);
 	if (o->hookstate) {
 		if (o->autoanswer || o->autohangup) {
 			/* Assume auto-hangup too */
@@ -856,7 +1291,7 @@
 	return 0;
 }
 
-/* used for data coming from the network */
+/*! \brief used for data coming from the network */
 static int oss_write(struct ast_channel *c, struct ast_frame *f)
 {
 	int src;
@@ -991,14 +1426,14 @@
 	return 0;
 }
 
-/*
- * allocate a new channel.
+/*!
+ * \brief allocate a new channel.
  */
 static struct ast_channel *oss_new(struct chan_oss_pvt *o, char *ext, char *ctx, int state)
 {
 	struct ast_channel *c;
 
-	c = ast_channel_alloc(1, state, o->cid_num, o->cid_name, "OSS/%s", o->device + 5);
+	c = ast_channel_alloc(1, state, o->cid_num, o->cid_name, "", ext, ctx, 0, "OSS/%s", o->device + 5);
 	if (c == NULL)
 		return NULL;
 	c->tech = &oss_tech;
@@ -1006,14 +1441,15 @@
 		setformat(o, O_RDWR);
 	c->fds[0] = o->sounddev;	/* -1 if device closed, override later */
 	c->nativeformats = AST_FORMAT_SLINEAR;
+
+	/* if the console makes the call, add video */
+	if (state == 5)
+		c->nativeformats |= AST_FORMAT_CUSTOM;
+
 	c->readformat = AST_FORMAT_SLINEAR;
 	c->writeformat = AST_FORMAT_SLINEAR;
 	c->tech_pvt = o;
 
-	if (!ast_strlen_zero(ctx))
-		ast_copy_string(c->context, ctx, sizeof(c->context));
-	if (!ast_strlen_zero(ext))
-		ast_copy_string(c->exten, ext, sizeof(c->exten));
 	if (!ast_strlen_zero(o->language))
 		ast_string_field_set(c, language, o->language);
 	/* Don't use ast_set_callerid() here because it will
@@ -1025,6 +1461,7 @@
 		c->cid.cid_dnid = ast_strdup(ext);
 
 	o->owner = c;
+	ast_module_ref(ast_module_info->self);
 	ast_jb_configure(c, &global_jbconf);
 	if (state != AST_STATE_DOWN) {
 		if (ast_pbx_start(c)) {
@@ -1035,17 +1472,32 @@
 		}
 	}
 
+#if HAVE_FFMPEG
+	/* Let's initialize the environment only if a new call arrives */
+	/* Initializations for ffmpeg decoding */
+	/* XXX This should be allocated for each video session */
+	ffmpeg_init(&o->env);
+#endif
+
 	return c;
 }
 
 static struct ast_channel *oss_request(const char *type, int format, void *data, int *cause)
 {
 	struct ast_channel *c;
-	struct chan_oss_pvt *o = find_desc(data);
+	struct chan_oss_pvt *o;
+	AST_DECLARE_APP_ARGS(args,
+		AST_APP_ARG(name);
+		AST_APP_ARG(flags);
+	);
+	char *parse = ast_strdupa(data);
+
+	AST_NONSTANDARD_APP_ARGS(args, parse, '/');
+	o = find_desc(args.name);
 
 	ast_log(LOG_WARNING, "oss_request ty <%s> data 0x%p <%s>\n", type, data, (char *) data);
 	if (o == NULL) {
-		ast_log(LOG_NOTICE, "Device %s not found\n", (char *) data);
+		ast_log(LOG_NOTICE, "Device %s not found\n", args.name);
 		/* XXX we could default to 'dsp' perhaps ? */
 		return NULL;
 	}
@@ -1104,8 +1556,8 @@
 	return CLI_SUCCESS;
 }
 
-/*
- * answer command from the console
+/*!
+ * \brief answer command from the console
  */
 static char *console_answer(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
 {
@@ -1136,8 +1588,10 @@
 	return CLI_SUCCESS;
 }
 
-/*
- * concatenate all arguments into a single string. argv is NULL-terminated
+/*!
+ * \brief Console send text CLI command
+ *
+ * \note concatenate all arguments into a single string. argv is NULL-terminated
  * so we can use it right away
  */
 static char *console_sendtext(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
@@ -1189,6 +1643,7 @@
 
 	if (a->argc != e->args)
 		return CLI_SHOWUSAGE;
+	/* XXX this is similar to what is done in oss_hangup */
 	o->cursound = -1;
 	o->nosound = 0;
 	if (!o->owner && !o->hookstate) { /* XXX maybe only one ? */
@@ -1199,6 +1654,9 @@
 	if (o->owner)
 		ast_queue_hangup(o->owner);
 	setformat(o, O_CLOSE);
+#if HAVE_FFMPEG
+	ffmpeg_uninit(&o->env);
+#endif
 	return CLI_SUCCESS;
 }
 
@@ -1276,7 +1734,7 @@
 	} else
 		ast_cli(a->fd, "No such extension '%s' in context '%s'\n", mye, myc);
 	if (s)
-		free(s);
+		ast_free(s);
 	return CLI_SUCCESS;
 }
 
@@ -1332,7 +1790,7 @@
 			ast_cli(fd, "Failed to transfer :(\n");
 	}
 	if (tmp)
-		free(tmp);
+		ast_free(tmp);
 	return RESULT_SUCCESS;
 }
 
@@ -1369,8 +1827,8 @@
 	"console.  If a device is specified, the console sound device is changed to\n"
 	"the device specified.\n";
 
-/*
- * store the boost factor
+/*!
+ * \brief store the boost factor
  */
 static void store_boost(struct chan_oss_pvt *o, char *s)
 {
@@ -1424,7 +1882,7 @@
 	active_usage },
 };
 
-/*
+/*!
  * store the mixer argument from the config file, filtering possibly
  * invalid or dangerous values (the string is used as argument for
  * system("mixer %s")
@@ -1440,12 +1898,12 @@
 		}
 	}
 	if (o->mixer_cmd)
-		free(o->mixer_cmd);
+		ast_free(o->mixer_cmd);
 	o->mixer_cmd = ast_strdup(s);
 	ast_log(LOG_WARNING, "setting mixer %s\n", s);
 }
 
-/*
+/*!
  * store the callerid components
  */
 static void store_callerid(struct chan_oss_pvt *o, char *s)
@@ -1453,7 +1911,7 @@
 	ast_callerid_split(s, o->cid_name, sizeof(o->cid_name), o->cid_num, sizeof(o->cid_num));
 }
 
-/*
+/*!
  * grab fields from the config file, init the descriptor and open the device.
  */
 static struct chan_oss_pvt *store_config(struct ast_config *cfg, char *ctg)
@@ -1513,13 +1971,13 @@
 		asprintf(&cmd, "mixer %s", o->mixer_cmd);
 		ast_log(LOG_WARNING, "running [%s]\n", cmd);
 		system(cmd);
-		free(cmd);
+		ast_free(cmd);
 	}
 	if (o == &oss_default)		/* we are done with the default */
 		return NULL;
 
   openit:
-#if TRYOPEN
+#ifdef TRYOPEN
 	if (setformat(o, O_RDWR) < 0) {	/* open device */
 		if (option_verbose > 0) {
 			ast_verbose(VERBOSE_PREFIX_2 "Device %s not detected\n", ctg);
@@ -1544,7 +2002,7 @@
 
   error:
 	if (o != &oss_default)
-		free(o);
+		ast_free(o);
 	return NULL;
 }
 
@@ -1576,7 +2034,7 @@
 	}
 
 	if (ast_channel_register(&oss_tech)) {
-		ast_log(LOG_ERROR, "Unable to register channel class 'MGCP'\n");
+		ast_log(LOG_ERROR, "Unable to register channel type 'OSS'\n");
 		return AST_MODULE_LOAD_FAILURE;
 	}
 




More information about the asterisk-commits mailing list