X-Git-Url: http://de.git.xonotic.org/?a=blobdiff_plain;f=cl_video.c;h=4eac2aea923314e61883ff220b40a8f4dcf3d72e;hb=145be4b7f63608b1b43cdae981761cc74c34efa0;hp=c6c094c365995c03bd292ffa81f83dca56c4b5ba;hpb=ff46d6ff516fda192c5adc55a5c9b82007545bd2;p=xonotic%2Fdarkplaces.git diff --git a/cl_video.c b/cl_video.c index c6c094c3..4eac2aea 100644 --- a/cl_video.c +++ b/cl_video.c @@ -1,191 +1,326 @@ #include "quakedef.h" +#include "cl_dyntexture.h" #include "cl_video.h" #include "dpvsimpledecode.h" -mempool_t *clvideomempool; +// constants (and semi-constants) +static int cl_videormask; +static int cl_videobmask; +static int cl_videogmask; +static int cl_videobytesperpixel; -int cl_videoplaying = false; -void *cl_videostream; +static int cl_num_videos; +static clvideo_t cl_videos[ MAXCLVIDEOS ]; +static rtexturepool_t *cl_videotexturepool; -double cl_videostarttime; -int cl_videoframenum; -double cl_videoframerate; +static clvideo_t *FindUnusedVid( void ) +{ + int i; + for( i = 1 ; i < MAXCLVIDEOS ; i++ ) + if( cl_videos[ i ].state == CLVIDEO_UNUSED ) + return &cl_videos[ i ]; + return NULL; +} -int cl_videoimagewidth; -int cl_videoimageheight; -int cl_videoimagedata_rmask; -int cl_videoimagedata_gmask; -int cl_videoimagedata_bmask; -int cl_videoimagedata_bytesperpixel; -void *cl_videoimagedata; +static qboolean OpenStream( clvideo_t * video ) +{ + char *errorstring; + video->stream = dpvsimpledecode_open( video->filename, &errorstring); + if (!video->stream ) + { + Con_Printf("unable to open \"%s\", error: %s\n", video->filename, errorstring); + return false; + } + return true; +} -int cl_videosoundrate; -int cl_videosoundlength; -short *cl_videosounddata; -int cl_videosoundresamplelength; -short *cl_videosoundresampledata; +static void VideoUpdateCallback(rtexture_t *rt, void *data) { + clvideo_t *video = (clvideo_t *) data; + R_UpdateTexture( video->cpif.tex, (unsigned char *)video->imagedata, 0, 0, video->cpif.width, video->cpif.height ); +} -rtexture_t *cl_videotexture; -rtexturepool_t *cl_videotexturepool; +static void LinkVideoTexture( clvideo_t *video ) { + video->cpif.tex = R_LoadTexture2D( cl_videotexturepool, video->cpif.name, + video->cpif.width, video->cpif.height, NULL, TEXTYPE_BGRA, TEXF_ALWAYSPRECACHE | TEXF_PERSISTENT, NULL ); + R_MakeTextureDynamic( video->cpif.tex, VideoUpdateCallback, video ); + CL_LinkDynTexture( video->cpif.name, video->cpif.tex ); +} + +static void UnlinkVideoTexture( clvideo_t *video ) { + CL_UnlinkDynTexture( video->cpif.name ); + // free the texture + R_FreeTexture( video->cpif.tex ); + // free the image data + Mem_Free( video->imagedata ); +} -void CL_VideoFrame(void) +static void SuspendVideo( clvideo_t * video ) { - int frames, framenum, samples, s; - if (!cl_videoplaying) + if( video->suspended ) return; - framenum = (realtime - cl_videostarttime) * cl_videoframerate; - //Con_Printf("frame %i\n", framenum); - if (framenum < 0) - framenum = 0; - frames = 0; - while (cl_videoframenum < framenum) - { - frames++; - cl_videoframenum++; - if (dpvsimpledecode_video(cl_videostream, cl_videoimagedata, cl_videoimagedata_rmask, cl_videoimagedata_gmask, cl_videoimagedata_bmask, cl_videoimagedata_bytesperpixel, cl_videoimagewidth * cl_videoimagedata_bytesperpixel)) - { - CL_VideoStop(); - return; + video->suspended = true; + UnlinkVideoTexture( video ); + // if we are in firstframe mode, also close the stream + if( video->state == CLVIDEO_FIRSTFRAME ) + dpvsimpledecode_close( video->stream ); +} + +static qboolean WakeVideo( clvideo_t * video ) +{ + if( !video->suspended ) + return true; + video->suspended = false; + + if( video->state == CLVIDEO_FIRSTFRAME ) + if( !OpenStream( video ) ) { + video->state = CLVIDEO_UNUSED; + return false; } - } - if (frames) - { - R_UpdateTexture(cl_videotexture, cl_videoimagedata); - //Draw_NewPic("engine_videoframe", cl_videoimagewidth, cl_videoimageheight, false, cl_videoimagedata); - } - if (cl_videosoundrate && (samples = S_RawSamples_QueueWantsMore())) - { - Con_DPrintf("%i = S_RawSamples_QueueWantsMore()\n", samples); - // calculate how much source data we need to fill the output... - s = samples * cl_videosoundrate / S_RawSamples_SampleRate(); + video->imagedata = Mem_Alloc( cls.permanentmempool, video->cpif.width * video->cpif.height * cl_videobytesperpixel ); + LinkVideoTexture( video ); - // reallocate processing buffer if needed - if (cl_videosoundresamplelength < samples) - { - cl_videosoundresamplelength = samples + 100; - if (cl_videosoundresampledata) - Mem_Free(cl_videosoundresampledata); - cl_videosoundresampledata = Mem_Alloc(clvideomempool, cl_videosoundresamplelength * sizeof(short[2])); - } + // update starttime + video->starttime += realtime - video->lasttime; - // reallocate loading buffer if needed - if (cl_videosoundlength < s) - { - cl_videosoundlength = s + 100; - if (cl_videosounddata) - Mem_Free(cl_videosounddata); - cl_videosounddata = Mem_Alloc(clvideomempool, cl_videosoundlength * sizeof(short[2])); + return true; +} + +static clvideo_t* OpenVideo( clvideo_t *video, const char *filename, const char *name, int owner ) +{ + strlcpy( video->filename, filename, sizeof(video->filename) ); + video->ownertag = owner; + if( strncmp( name, CLVIDEOPREFIX, sizeof( CLVIDEOPREFIX ) - 1 ) ) + return NULL; + strlcpy( video->cpif.name, name, sizeof(video->cpif.name) ); + + if( !OpenStream( video ) ) + return NULL; + + video->state = CLVIDEO_FIRSTFRAME; + video->framenum = -1; + video->framerate = dpvsimpledecode_getframerate( video->stream ); + video->lasttime = realtime; + + video->cpif.width = dpvsimpledecode_getwidth( video->stream ); + video->cpif.height = dpvsimpledecode_getheight( video->stream ); + video->imagedata = Mem_Alloc( cls.permanentmempool, video->cpif.width * video->cpif.height * cl_videobytesperpixel ); + LinkVideoTexture( video ); + + return video; +} + +clvideo_t* CL_OpenVideo( const char *filename, const char *name, int owner ) +{ + clvideo_t *video; + // sanity check + if( !name || !*name || strncmp( name, CLVIDEOPREFIX, sizeof( CLVIDEOPREFIX ) - 1 ) != 0 ) { + if( developer.integer > 0 ) { + Con_Printf( "CL_OpenVideo: Bad video texture name '%s'!\n", name ); } + return NULL; + } - dpvsimpledecode_audio(cl_videostream, cl_videosounddata, s); - S_ResampleBuffer16Stereo(cl_videosounddata, s, cl_videosoundresampledata, samples); - S_RawSamples_Enqueue(cl_videosoundresampledata, samples); + video = FindUnusedVid(); + if( !video ) { + Con_Printf( "CL_OpenVideo: unable to open video \"%s\" - video limit reached\n", filename ); + return NULL; } + video = OpenVideo( video, filename, name, owner ); + // expand the active range to include the new entry + if (video) { + cl_num_videos = max(cl_num_videos, (int)(video - cl_videos) + 1); + } + return video; } -void CL_DrawVideo(void) +static clvideo_t* CL_GetVideoBySlot( int slot ) { - if (cl_videoplaying) + clvideo_t *video = &cl_videos[ slot ]; + + if( video->suspended ) { - drawqueuemesh_t mesh; - float vertex3f[12]; - float texcoord2f[8]; - float color4f[16]; - float s1, t1, s2, t2, x1, y1, x2, y2; - x1 = 0; - y1 = 0; - x2 = vid.conwidth; - y2 = vid.conheight; - R_FragmentLocation(cl_videotexture, NULL, NULL, &s1, &t1, &s2, &t2); - texcoord2f[0] = s1;texcoord2f[1] = t1; - texcoord2f[2] = s2;texcoord2f[3] = t1; - texcoord2f[4] = s2;texcoord2f[5] = t2; - texcoord2f[6] = s1;texcoord2f[7] = t2; - R_FillColors(color4f, 4, 1, 1, 1, 1); - vertex3f[ 0] = x1;vertex3f[ 1] = y1;vertex3f[ 2] = 0; - vertex3f[ 3] = x2;vertex3f[ 4] = y1;vertex3f[ 5] = 0; - vertex3f[ 6] = x2;vertex3f[ 7] = y2;vertex3f[ 8] = 0; - vertex3f[ 9] = x1;vertex3f[10] = y2;vertex3f[11] = 0; - mesh.texture = cl_videotexture; - mesh.num_triangles = 2; - mesh.num_vertices = 4; - mesh.data_element3i = polygonelements; - mesh.data_vertex3f = vertex3f; - mesh.data_texcoord2f = texcoord2f; - mesh.data_color4f = color4f; - DrawQ_Mesh(&mesh, 0); - //DrawQ_Pic(0, 0, "engine_videoframe", vid.conwidth, vid.conheight, 1, 1, 1, 1, 0); + if( !WakeVideo( video ) ) + return NULL; + else if( video->state == CLVIDEO_RESETONWAKEUP ) + video->framenum = -1; } + + video->lasttime = realtime; + + return video; } -void CL_VideoStart(char *filename) +clvideo_t *CL_GetVideoByName( const char *name ) { - char *errorstring; - cl_videostream = dpvsimpledecode_open(filename, &errorstring); - if (!cl_videostream) - { - Con_Printf("unable to open \"%s\", error: %s\n", filename, errorstring); + int i; + + for( i = 0 ; i < cl_num_videos ; i++ ) + if( cl_videos[ i ].state != CLVIDEO_UNUSED + && !strcmp( cl_videos[ i ].cpif.name , name ) ) + break; + if( i != cl_num_videos ) + return CL_GetVideoBySlot( i ); + else + return NULL; +} + +void CL_SetVideoState( clvideo_t *video, clvideostate_t state ) +{ + if( !video ) return; + + video->lasttime = realtime; + video->state = state; + if( state == CLVIDEO_FIRSTFRAME ) + CL_RestartVideo( video ); +} + +void CL_RestartVideo( clvideo_t *video ) +{ + if( !video ) + return; + + video->starttime = video->lasttime = realtime; + video->framenum = -1; + + dpvsimpledecode_close( video->stream ); + if( !OpenStream( video ) ) + video->state = CLVIDEO_UNUSED; +} + +void CL_CloseVideo( clvideo_t * video ) +{ + if( !video || video->state == CLVIDEO_UNUSED ) + return; + + if( !video->suspended || video->state != CLVIDEO_FIRSTFRAME ) + dpvsimpledecode_close( video->stream ); + if( !video->suspended ) { + UnlinkVideoTexture( video ); } - cl_videoplaying = true; - cl_videostarttime = realtime; - cl_videoframenum = -1; - cl_videoframerate = dpvsimpledecode_getframerate(cl_videostream); - cl_videoimagewidth = dpvsimpledecode_getwidth(cl_videostream); - cl_videoimageheight = dpvsimpledecode_getheight(cl_videostream); - - // RGBA format - cl_videoimagedata_bytesperpixel = 4; - cl_videoimagedata_rmask = BigLong(0xFF000000); - cl_videoimagedata_gmask = BigLong(0x00FF0000); - cl_videoimagedata_bmask = BigLong(0x0000FF00); - cl_videoimagedata = Mem_Alloc(clvideomempool, cl_videoimagewidth * cl_videoimageheight * cl_videoimagedata_bytesperpixel); - //memset(cl_videoimagedata, 97, cl_videoimagewidth * cl_videoimageheight * cl_videoimagedata_bytesperpixel); - - cl_videosoundrate = dpvsimpledecode_getsoundrate(cl_videostream); - cl_videosoundlength = 0; - cl_videosounddata = NULL; - cl_videosoundresamplelength = 0; - cl_videosoundresampledata = NULL; + video->state = CLVIDEO_UNUSED; +} - cl_videotexturepool = R_AllocTexturePool(); - cl_videotexture = R_LoadTexture2D(cl_videotexturepool, "videotexture", cl_videoimagewidth, cl_videoimageheight, NULL, TEXTYPE_RGBA, TEXF_FRAGMENT, NULL); +static void VideoFrame( clvideo_t *video ) +{ + int destframe; + + if( video->state == CLVIDEO_FIRSTFRAME ) + destframe = 0; + else + destframe = (int)((realtime - video->starttime) * video->framerate); + if( destframe < 0 ) + destframe = 0; + if( video->framenum < destframe ) { + do { + video->framenum++; + if( dpvsimpledecode_video( video->stream, video->imagedata, cl_videormask, + cl_videogmask, cl_videobmask, cl_videobytesperpixel, + cl_videobytesperpixel * video->cpif.width ) + ) { // finished? + CL_RestartVideo( video ); + if( video->state == CLVIDEO_PLAY ) + video->state = CLVIDEO_FIRSTFRAME; + return; + } + } while( video->framenum < destframe ); + R_MarkDirtyTexture( video->cpif.tex ); + } } -void CL_VideoStop(void) +void CL_Video_Frame( void ) // update all videos { - cl_videoplaying = false; + int i; + clvideo_t *video; - S_RawSamples_ClearQueue(); + if (!cl_num_videos) + return; - if (cl_videostream) - dpvsimpledecode_close(cl_videostream); - cl_videostream = NULL; + for( video = cl_videos, i = 0 ; i < cl_num_videos ; video++, i++ ) + if( video->state != CLVIDEO_UNUSED && !video->suspended ) + { + if( realtime - video->lasttime > CLTHRESHOLD ) + SuspendVideo( video ); + else if( video->state == CLVIDEO_PAUSE ) + video->starttime = realtime - video->framenum * video->framerate; + else + VideoFrame( video ); + } - if (cl_videoimagedata) - Mem_Free(cl_videoimagedata); - cl_videoimagedata = NULL; + if( cl_videos->state == CLVIDEO_FIRSTFRAME ) + CL_VideoStop(); - if (cl_videosounddata) - Mem_Free(cl_videosounddata); - cl_videosounddata = NULL; + // reduce range to exclude unnecessary entries + while (cl_num_videos > 0 && cl_videos[cl_num_videos-1].state == CLVIDEO_UNUSED) + cl_num_videos--; +} - if (cl_videosoundresampledata) - Mem_Free(cl_videosoundresampledata); - cl_videosoundresampledata = NULL; +void CL_Video_Shutdown( void ) +{ + int i; + for( i = 0 ; i < cl_num_videos ; i++ ) + CL_CloseVideo( &cl_videos[ i ] ); +} - cl_videotexture = NULL; - R_FreeTexturePool(&cl_videotexturepool); +void CL_PurgeOwner( int owner ) +{ + int i; + for( i = 0 ; i < cl_num_videos ; i++ ) + if( cl_videos[ i ].ownertag == owner ) + CL_CloseVideo( &cl_videos[ i ] ); +} + +int cl_videoplaying = false; // old, but still supported - Draw_FreePic("engine_videoframe"); +void CL_DrawVideo(void) +{ + if (cl_videoplaying) + DrawQ_Pic(0, 0, &CL_GetVideoBySlot( 0 )->cpif, vid_conwidth.integer, vid_conheight.integer, 1, 1, 1, 1, 0); +} + +void CL_VideoStart(char *filename) +{ + Host_StartVideo(); + + if( cl_videos->state != CLVIDEO_UNUSED ) + CL_CloseVideo( cl_videos ); + // already contains video/ + if( !OpenVideo( cl_videos, filename, va( CLDYNTEXTUREPREFIX "%s", filename ), 0 ) ) + return; + // expand the active range to include the new entry + cl_num_videos = max(cl_num_videos, 1); + + cl_videoplaying = true; + + CL_SetVideoState( cl_videos, CLVIDEO_PLAY ); + CL_RestartVideo( cl_videos ); +} + +void CL_Video_KeyEvent( int key, int ascii, qboolean down ) +{ + // only react to up events, to allow the user to delay the abortion point if it suddenly becomes interesting.. + if( !down ) { + if( key == K_ESCAPE || key == K_ENTER || key == K_SPACE ) { + CL_VideoStop(); + } + } +} + +void CL_VideoStop(void) +{ + cl_videoplaying = false; + + CL_CloseVideo( cl_videos ); } static void CL_PlayVideo_f(void) { - char name[1024]; + char name[MAX_QPATH]; + + Host_StartVideo(); if (Cmd_Argc() != 2) { @@ -193,7 +328,7 @@ static void CL_PlayVideo_f(void) return; } - sprintf(name, "video/%s.dpv", Cmd_Argv(1)); + dpsnprintf(name, sizeof(name), "video/%s.dpv", Cmd_Argv(1)); CL_VideoStart(name); } @@ -202,10 +337,48 @@ static void CL_StopVideo_f(void) CL_VideoStop(); } -void CL_Video_Init(void) +static void cl_video_start( void ) +{ + int i; + clvideo_t *video; + + cl_videotexturepool = R_AllocTexturePool(); + + for( video = cl_videos, i = 0 ; i < cl_num_videos ; i++, video++ ) + if( video->state != CLVIDEO_UNUSED && !video->suspended ) + LinkVideoTexture( video ); +} + +static void cl_video_shutdown( void ) +{ + // TODO: unlink video textures? + R_FreeTexturePool( &cl_videotexturepool ); +} + +static void cl_video_newmap( void ) +{ +} + +void CL_Video_Init( void ) { - Cmd_AddCommand("playvideo", CL_PlayVideo_f); - Cmd_AddCommand("stopvideo", CL_StopVideo_f); + union + { + unsigned char b[4]; + unsigned int i; + } + bgra; + + cl_num_videos = 0; + cl_videobytesperpixel = 4; - clvideomempool = Mem_AllocPool("CL_Video"); + // set masks in an endian-independent way (as they really represent bytes) + bgra.i = 0;bgra.b[0] = 0xFF;cl_videobmask = bgra.i; + bgra.i = 0;bgra.b[1] = 0xFF;cl_videogmask = bgra.i; + bgra.i = 0;bgra.b[2] = 0xFF;cl_videormask = bgra.i; + + Cmd_AddCommand( "playvideo", CL_PlayVideo_f, "play a .dpv video file" ); + Cmd_AddCommand( "stopvideo", CL_StopVideo_f, "stop playing a .dpv video file" ); + + R_RegisterModule( "CL_Video", cl_video_start, cl_video_shutdown, cl_video_newmap ); } +