[asterisk-commits] rizzo: branch rizzo/astobj2 r77805 - /team/rizzo/astobj2/channels/chan_oss.c
SVN commits to the Asterisk project
asterisk-commits at lists.digium.com
Mon Jul 30 16:54:21 CDT 2007
Author: rizzo
Date: Mon Jul 30 16:54:20 2007
New Revision: 77805
URL: http://svn.digium.com/view/asterisk?view=rev&rev=77805
Log:
comment and optimize a bit the video decoding routines:
+ do not allocate the frame buffer too often, but just try
to extend the existing one.
+ document a bit the various calls to ffmpeg and sdl.
Things are relatively simple, but given the number of parameters to
each function it is not always trivial to understand what each parameter
means.
Modified:
team/rizzo/astobj2/channels/chan_oss.c
Modified: team/rizzo/astobj2/channels/chan_oss.c
URL: http://svn.digium.com/view/asterisk/team/rizzo/astobj2/channels/chan_oss.c?view=diff&rev=77805&r1=77804&r2=77805
==============================================================================
--- team/rizzo/astobj2/channels/chan_oss.c (original)
+++ team/rizzo/astobj2/channels/chan_oss.c Mon Jul 30 16:54:20 2007
@@ -324,7 +324,7 @@
*/
#include <ffmpeg/avcodec.h>
-// #include <ffmpeg/swscale.h>
+#include <ffmpeg/swscale.h>
#include <SDL/SDL.h>
/* Structures for ffmpeg processing */
@@ -333,14 +333,17 @@
* We need one of these for each incoming video stream.
*/
struct video_desc {
- AVCodecContext *context;
- AVCodec *codec;
- AVFrame *frame;
+ AVCodecContext *context; /* information about the codec in the stream */
+ AVCodec *codec; /* reference to the codec */
+ AVFrame *frame; /* place to store the frame */
AVCodecParserContext *parser;
int completed;
+ /* buffer for the incoming bitstream */
uint8_t *data;
- int datalen;
+ int datasize; /* buffer size */
+ int used; /* bytes used so far */
+
SDL_Surface *screen;
int initialized;
SDL_Overlay *bmp;
@@ -535,16 +538,17 @@
{
enum CodecID codec;
- env->codec = NULL;
- env->context = NULL;
- env->frame = NULL;
- env->parser = NULL;
- env->data = NULL;
- env->completed = 0;
- env->datalen = 0;
- env->screen = NULL;
- env->initialized = 0;
- env->bmp = NULL;
+ env->codec = NULL;
+ env->context = NULL;
+ env->frame = NULL;
+ env->parser = NULL;
+ env->data = NULL; /* used = size = 0 */
+ env->used = 0;
+ env->datasize = 0;
+ env->completed = 0;
+ env->screen = NULL;
+ env->initialized = 0;
+ env->bmp = NULL;
env->lastrxframe = -1;
env->ts = ast_tvnow();
@@ -553,6 +557,7 @@
avcodec_init();
/*
* Register all codecs supported by the ffmpeg library.
+ * Doing it once is enough.
*/
avcodec_register_all();
@@ -716,25 +721,25 @@
*/
static int decode_video(struct video_desc *env)
{
- uint8_t *aux = env->data;
- int len = env->datalen;
- int ret;
- uint8_t *data;
- int datalen;
-
- if (!len)
+ uint8_t *src = env->data;
+ int srclen = env->used;
+
+ if (!srclen)
return 0;
- while(len) {
- ret = av_parser_parse(env->parser, env->context, &data, &datalen, aux, len, 0, 0);
- if(datalen) {
+ while (srclen) {
+ uint8_t *data;
+ int datalen;
+ int ret = av_parser_parse(env->parser, env->context, &data, &datalen, src, srclen, 0, 0);
+ if (datalen) {
ret = avcodec_decode_video(env->context, env->frame, &(env->completed), data, datalen);
if(ret < 0) {
ast_log(LOG_NOTICE, "Errore nella decodifica\n");
return 0;
}
- aux += ret;
- len -= ret;
- }
+ src += ret;
+ srclen -= ret;
+ }
+ // ast_log(LOG_WARNING, "in %d ret %d/%d outlen %d complete %d\n", env->used, srclen, ret, datalen, env->completed);
}
return 1;
}
@@ -774,7 +779,7 @@
pict.linesize[1] = env->bmp->pitches[2];
pict.linesize[2] = env->bmp->pitches[1];
-#if 1 /* XXX img_convert is deprecated */
+#if 0 /* XXX img_convert is deprecated */
img_convert(&pict, PIX_FMT_YUV420P,
(AVPicture *)env->frame, env->context->pix_fmt,
env->context->width, env->context->height);
@@ -819,11 +824,11 @@
}
/*
- * This function is called (by asterisk) for each video fragment that needs to be processed.
- * We need to recontruct the entire video before we can decode it.
- * After a video fragment is received we have to:
- * - clean the bitstream with pre_process_data()
- * - append the bitstream in a buffer
+ * This function is called (by asterisk) for each video packet that needs to be processed.
+ * We need to reconstruct the entire video frame before we can decode it.
+ * After a video packet is received we have to:
+ * - extract the bitstream with pre_process_data()
+ * - append the bitstream to a buffer
* - if the fragment is the last (RTP Marker) we decode it with decode_video()
* - after the decoding is completed we display the decoded frame with show_frame()
*/
@@ -875,7 +880,7 @@
if(f->subclass & 0x01) {
free(env->data);
env->data = NULL;
- env->datalen = 0;
+ env->used = 0;
env->lastrxframe = f->seqno;
env->discard = 0;
}
@@ -894,19 +899,26 @@
len = f->datalen;
data = pre_process_data(f->data, &len);
- if(env->data == NULL)
+
+ /* allocate buffer as we see fit. ffmpeg wants an extra FF_INPUT_BUFFER_PADDING_SIZE
+ * and a 0 as a buffer terminator to prevent trouble.
+ */
+ if(env->data == NULL) {
+ env->datasize = len + FF_INPUT_BUFFER_PADDING_SIZE;
+ env->used = 0;
env->data = malloc(len);
- else
- env->data = realloc(env->data, env->datalen+len);
- memcpy(env->data+env->datalen, data, len);
- env->datalen += len;
+ } else if (env->used + len + FF_INPUT_BUFFER_PADDING_SIZE> env->datasize) {
+ env->datasize = env->used + len + FF_INPUT_BUFFER_PADDING_SIZE;
+ env->data = realloc(env->data, env->datasize);
+ }
+ memcpy(env->data+env->used, data, len);
+ env->used += len;
+ env->data[env->used] = '\0';
if(f->subclass & 0x01) // RTP Marker
if(decode_video(env)) {
show_frame(env);
env->completed = 0;
- free(env->data);
- env->data = NULL;
- env->datalen = 0;
+ env->used = 0;
}
env->lastrxframe = f->seqno;
More information about the asterisk-commits
mailing list