Faceposer code which parses the info:
bool CChoreoView::AutoaddGestureKeys( CChoreoEvent *e, bool checkonly )
{
StudioModel *model = FindAssociatedModel( e->GetScene(),
e->GetActor() );
if ( !model )
return false;
if ( !model->GetStudioHdr() )
return false;
int iSequence = model->LookupSequence( e->GetParameters() );
if ( iSequence < 0 )
return false;
KeyValues *seqKeyValues = new KeyValues("");
if ( seqKeyValues->LoadFromBuffer( model->GetFileName( ),
model->GetKeyValueText( iSequence ) ) )
{
// Do we have a build point section?
KeyValues *pkvAllFaceposer =
seqKeyValues->FindKey("faceposer");
if ( pkvAllFaceposer )
{
// Start grabbing the sounds and slotting them
in
KeyValues *pkvFaceposer;
char
szStartLoop[CEventAbsoluteTag::MAX_EVENTTAG_LENGTH] = { "loop" };
char
szEndLoop[CEventAbsoluteTag::MAX_EVENTTAG_LENGTH] = { "end" };
char
szEntry[CEventAbsoluteTag::MAX_EVENTTAG_LENGTH] = { "apex" };
char
szExit[CEventAbsoluteTag::MAX_EVENTTAG_LENGTH] = { "end" };
for ( pkvFaceposer =
pkvAllFaceposer->GetFirstSubKey(); pkvFaceposer; pkvFaceposer =
pkvFaceposer->GetNextKey() )
{
if (!stricmp( pkvFaceposer->GetName(),
"startloop" ))
{
strcpy( szStartLoop,
pkvFaceposer->GetString() );
}
else if (!stricmp(
pkvFaceposer->GetName(), "endloop" ))
{
strcpy( szEndLoop,
pkvFaceposer->GetString() );
}
else if (!stricmp(
pkvFaceposer->GetName(), "entrytag" ))
{
strcpy( szEntry,
pkvFaceposer->GetString() );
}
else if (!stricmp(
pkvFaceposer->GetName(), "exittag" ))
{
strcpy( szExit,
pkvFaceposer->GetString() );
}
else if (!stricmp(
pkvFaceposer->GetName(), "tags" ))
{
KeyValues *pkvTags;
for ( pkvTags =
pkvFaceposer->GetFirstSubKey(); pkvTags; pkvTags = pkvTags->GetNextKey()
)
{
int maxFrame =
model->GetNumFrames( iSequence ) - 1;
if ( maxFrame > 0)
{
float percentage
= (float)pkvTags->GetInt() / maxFrame;
CEventAbsoluteTag *ptag = e->FindAbsoluteTag( CChoreoEvent::ORIGINAL,
pkvTags->GetName() );
if (ptag)
{
//
reposition tag
ptag->SetPercentage( percentage );
}
else
{
e->AddAbsoluteTag( CChoreoEvent::ORIGINAL, pkvTags->GetName(),
percentage );
e->AddAbsoluteTag( CChoreoEvent::PLAYBACK, pkvTags->GetName(),
percentage );
}
// lock the
original tags so they can't be edited
ptag =
e->FindAbsoluteTag( CChoreoEvent::ORIGINAL, pkvTags->GetName() );
Assert( ptag );
ptag->SetLocked(
true );
}
}
e->VerifyTagOrder();
e->PreventTagOverlap();
}
}
// FIXME: lookup linear tags in sequence data
{
CEventAbsoluteTag *ptag;
ptag = e->FindAbsoluteTag(
CChoreoEvent::ORIGINAL, szStartLoop );
if (ptag)
{
ptag->SetLinear( true );
}
ptag = e->FindAbsoluteTag(
CChoreoEvent::PLAYBACK, szStartLoop );
if (ptag)
{
ptag->SetLinear( true );
}
ptag = e->FindAbsoluteTag(
CChoreoEvent::ORIGINAL, szEndLoop );
if (ptag)
{
ptag->SetLinear( true );
}
ptag = e->FindAbsoluteTag(
CChoreoEvent::PLAYBACK, szEndLoop );
if (ptag)
{
ptag->SetLinear( true );
}
ptag = e->FindAbsoluteTag(
CChoreoEvent::ORIGINAL, szEntry );
if (ptag)
{
ptag->SetEntry( true );
}
ptag = e->FindAbsoluteTag(
CChoreoEvent::PLAYBACK, szEntry );
if (ptag)
{
ptag->SetEntry( true );
}
ptag = e->FindAbsoluteTag(
CChoreoEvent::ORIGINAL, szExit );
if (ptag)
{
ptag->SetExit( true );
}
ptag = e->FindAbsoluteTag(
CChoreoEvent::PLAYBACK, szExit );
if (ptag)
{
ptag->SetExit( true );
}
}
}
seqKeyValues->deleteThis();
}
return true;
}
Text retrieval:
const char *StudioModel::GetKeyValueText( int iSequence )
{
CStudioHdr *pStudioHdr = GetStudioHdr();
return Studio_GetKeyValueText( pStudioHdr, iSequence );
}
And implementation:
const char *Studio_GetKeyValueText( const CStudioHdr *pStudioHdr, int
iSequence )
{
if (pStudioHdr && pStudioHdr->SequencesAvailable())
{
if (iSequence >= 0 && iSequence <
pStudioHdr->GetNumSeq())
{
return pStudioHdr->pSeqdesc( iSequence
).KeyValueText();
}
}
return NULL;
}
YMMV...
Yahn
-----Original Message-----
From: [EMAIL PROTECTED]
[mailto:[EMAIL PROTECTED] On Behalf Of Nate Nichols
Sent: Tuesday, February 20, 2007 8:06 PM
To: [email protected]
Subject: [hlcoders] Animation timing tag extraction
Hi,
I am trying to extract the timing tag information from Source gestures.
(Timing tags are the little "apex", "accent", etc. tags you see when you
add a gesture to a scene in Face Poser.)
It seems like most of the gestures, ie Alyx's "A_g_wave" have related
gestures named A_g_waveapexArms, A_g_waveloopSpine. Since the number of
frames in "A_g_wave" tell how long the gesture is, I thought that the
framecount of A_g_waveloopspine et. al. would give me the timing tag
information. Unfortunately, that doesn't seem to be the case.
(This seems to give the right timing for the first tag, but not the
later ones.) I also tried seeing if the tags were implemented as
animation events, but that doesn't seem to be the case either.
Obviously there is some way to get this information, because FacePoser
is doing it, but I don't know how. And I don't think any of the code we
have access to extracts that information. Any ideas?
Also, if anyone else is ever interested in automatically generating
scenes for Half-Life 2, let me know. We have a bunch of Python code
that generates scenes, and are building a corpus with all the existing
Source scenes parsed into structures with associated speech timing
information, closed captioning, etc.
Thanks for any help,
Nate
_______________________________________________
To unsubscribe, edit your list preferences, or view the list archives,
please visit:
http://list.valvesoftware.com/mailman/listinfo/hlcoders
_______________________________________________
To unsubscribe, edit your list preferences, or view the list archives, please
visit:
http://list.valvesoftware.com/mailman/listinfo/hlcoders