本文整理汇总了C++中CEvent::Init方法的典型用法代码示例。如果您正苦于以下问题:C++ CEvent::Init方法的具体用法?C++ CEvent::Init怎么用?C++ CEvent::Init使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CEvent
的用法示例。
在下文中一共展示了CEvent::Init方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: RunMPIPortalFlow
//-----------------------------------------
//
// Run PortalFlow across all available processing nodes
//
void RunMPIPortalFlow()
{
Msg( "%-20s ", "MPIPortalFlow:" );
if ( g_bMPIMaster )
StartPacifier("");
// Workers wait until we get the MC socket address.
g_PortalMCThreadUniqueID = StatsDB_GetUniqueJobID();
if ( g_bMPIMaster )
{
CCycleCount cnt;
cnt.Sample();
CUniformRandomStream randomStream;
randomStream.SetSeed( cnt.GetMicroseconds() );
g_PortalMCAddr.port = randomStream.RandomInt( 22000, 25000 ); // Pulled out of something else.
g_PortalMCAddr.ip[0] = (unsigned char)RandomInt( 225, 238 );
g_PortalMCAddr.ip[1] = (unsigned char)RandomInt( 0, 255 );
g_PortalMCAddr.ip[2] = (unsigned char)RandomInt( 0, 255 );
g_PortalMCAddr.ip[3] = (unsigned char)RandomInt( 3, 255 );
g_pPortalMCSocket = CreateIPSocket();
int i=0;
for ( i; i < 5; i++ )
{
if ( g_pPortalMCSocket->BindToAny( randomStream.RandomInt( 20000, 30000 ) ) )
break;
}
if ( i == 5 )
{
Error( "RunMPIPortalFlow: can't open a socket to multicast on." );
}
char cPacketID[2] = { VMPI_VVIS_PACKET_ID, VMPI_SUBPACKETID_MC_ADDR };
VMPI_Send2Chunks( cPacketID, sizeof( cPacketID ), &g_PortalMCAddr, sizeof( g_PortalMCAddr ), VMPI_PERSISTENT );
}
else
{
VMPI_SetCurrentStage( "wait for MC address" );
while ( !g_bGotMCAddr )
{
VMPI_DispatchNextMessage();
}
// Open our multicast receive socket.
g_pPortalMCSocket = CreateMulticastListenSocket( g_PortalMCAddr );
if ( !g_pPortalMCSocket )
{
char err[512];
IP_GetLastErrorString( err, sizeof( err ) );
Error( "RunMPIPortalFlow: CreateMulticastListenSocket failed. (%s).", err );
}
// Make a thread to listen for the data on the multicast socket.
DWORD dwDummy = 0;
g_MCThreadExitEvent.Init( false, false );
// Make sure we kill the MC thread if the app exits ungracefully.
CmdLib_AtCleanup( MCThreadCleanupFn );
g_hMCThread = CreateThread(
NULL,
0,
PortalMCThreadFn,
NULL,
0,
&dwDummy );
if ( !g_hMCThread )
{
Error( "RunMPIPortalFlow: CreateThread failed for multicast receive thread." );
}
}
VMPI_SetCurrentStage( "RunMPIBasePortalFlow" );
g_pDistributeWorkCallbacks = &g_VisDistributeWorkCallbacks;
g_CPUTime.Init();
double elapsed = DistributeWork(
g_numportals * 2, // # work units
VMPI_DISTRIBUTEWORK_PACKETID, // packet ID
ProcessPortalFlow, // Worker function to process work units
ReceivePortalFlow // Master function to receive work results
);
g_pDistributeWorkCallbacks = NULL;
CheckExitedEarly();
// Stop the multicast stuff.
VMPI_DeletePortalMCSocket();
if( !g_bMPIMaster )
//.........这里部分代码省略.........
示例2: atp
//---------------------------------------------------------------------------
// @function:
// CSyncHashtableTest::EresUnittest_Concurrency
//
// @doc:
// Spawn a number of tasks to access hash table; in order to increase
// the chances of concurrent access, we force each task to wait until
// all other tasks have actually started
//
//---------------------------------------------------------------------------
GPOS_RESULT
CSyncHashtableTest::EresUnittest_Concurrency()
{
// create memory pool
CAutoMemoryPool amp;
IMemoryPool *pmp = amp.Pmp();
CWorkerPoolManager *pwpm = CWorkerPoolManager::Pwpm();
GPOS_ASSERT(GPOS_SHT_THREADS <= pwpm->UlWorkersMax() &&
"Insufficient number of workers to run test");
SElemHashtable sht;
sht.Init
(
pmp,
GPOS_SHT_SMALL_BUCKETS,
GPOS_OFFSET(SElem, m_link),
GPOS_OFFSET(SElem, m_ulKey),
&(SElem::m_ulInvalid),
SElem::UlHash,
SElem::FEqualKeys
);
SElem *rgelem = GPOS_NEW_ARRAY(pmp, SElem, GPOS_SHT_ELEMENTS);
// insert an initial set of elements in hash table
for (ULONG i = 0; i < GPOS_SHT_ELEMENTS; i ++)
{
rgelem[i] = SElem(i, i);
if (i < GPOS_SHT_INITIAL_ELEMENTS)
{
sht.Insert(&rgelem[i]);
}
}
// create an event for tasks synchronization
CMutex mutex;
CEvent event;
event.Init(&mutex);
// scope for tasks
{
CAutoTaskProxy atp(pmp, pwpm);
CTask *rgtask[GPOS_SHT_THREADS];
pfuncHashtableTask rgpfuncTask[] =
{
PvUnittest_Inserter,
PvUnittest_Remover,
PvUnittest_Reader,
PvUnittest_Iterator
};
SElemTest elemtest(sht, rgelem, &event);
const ULONG ulTypes = GPOS_ARRAY_SIZE(rgpfuncTask);
// create tasks
for (ULONG i = 0; i < GPOS_SHT_THREADS; i++)
{
ULONG ulTaskIndex = i % ulTypes;
rgtask[i] = atp.PtskCreate
(
rgpfuncTask[ulTaskIndex],
&elemtest
);
atp.Schedule(rgtask[i]);
}
// wait for completion
for (ULONG i = 0; i < GPOS_SHT_THREADS; i++)
{
GPOS_CHECK_ABORT;
atp.Wait(rgtask[i]);
}
}
GPOS_DELETE_ARRAY(rgelem);
return GPOS_OK;
}
示例3: main
int main(int argc, char* argv[])
{
SpewOutputFunc( MySpewFunc );
// Figure out a random port to use.
CCycleCount cnt;
cnt.Sample();
CUniformRandomStream randomStream;
randomStream.SetSeed( cnt.GetMicroseconds() );
int iPort = randomStream.RandomInt( 20000, 30000 );
g_ClientPacketEvent.Init( false, false );
// Setup the "server".
CHandlerCreator_Server serverHandler;
CIPAddr addr( 127, 0, 0, 1, iPort );
ITCPConnectSocket *pListener = ThreadedTCP_CreateListener(
&serverHandler,
(unsigned short)iPort );
// Setup the "client".
CHandlerCreator_Client clientCreator;
ITCPConnectSocket *pConnector = ThreadedTCP_CreateConnector(
CIPAddr( 127, 0, 0, 1, iPort ),
CIPAddr(),
&clientCreator );
// Wait for them to connect.
while ( !g_pClientSocket )
{
if ( !pConnector->Update( &g_pClientSocket ) )
{
Error( "Error in client connector!\n" );
}
}
pConnector->Release();
while ( !g_pServerSocket )
{
if ( !pListener->Update( &g_pServerSocket ) )
Error( "Error in server connector!\n" );
}
pListener->Release();
// Send some data.
__int64 totalBytes = 0;
CCycleCount startTime;
int iPacket = 1;
startTime.Sample();
CUtlVector<char> buf;
while ( (GetAsyncKeyState( VK_SHIFT ) & 0x8000) == 0 )
{
int size = randomStream.RandomInt( 1024*0, 1024*320 );
if ( buf.Count() < size )
buf.SetSize( size );
if ( g_pClientSocket->Send( buf.Base(), size ) )
{
// Server receives the data and echoes it back. Verify that the data is good.
WaitForSingleObject( g_ClientPacketEvent.GetEventHandle(), INFINITE );
Assert( memcmp( g_ClientPacket.Base(), buf.Base(), size ) == 0 );
totalBytes += size;
CCycleCount curTime, elapsed;
curTime.Sample();
CCycleCount::Sub( curTime, startTime, elapsed );
double flSeconds = elapsed.GetSeconds();
Msg( "Packet %d, %d bytes, %dk/sec\n", iPacket++, size, (int)(((totalBytes+511)/1024) / flSeconds) );
}
}
g_pClientSocket->Release();
g_pServerSocket->Release();
return 0;
}