本文整理汇总了C++中MyServer类的典型用法代码示例。如果您正苦于以下问题:C++ MyServer类的具体用法?C++ MyServer怎么用?C++ MyServer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MyServer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char *argv[])
{
QCoreApplication a(argc, argv);
MyServer myServer;
qDebug() << "Begin return:" << myServer.begin();
return a.exec();
}
示例2: main
int main()
{
MyServer server;
while (true)
{
server.process(std::chrono::seconds(1));
}
return 0;
}
示例3: main
int main()
{
MyServer m;
m.create("127.0.0.1", 4002, 1, 1024);
while(true)
{
m.poll(1);
}
return 0;
}
示例4: main
int main(int argc, char *argv[])
{
QCoreApplication app(argc, argv);
/*Console console;
console.run();
QObject::connect(&console, SIGNAL(quit()), &app, SLOT(quit()));*/
MyServer Server;
Server.StartServer();
return app.exec();
}
示例5: main
int main(int argc, char **argv)
{
TRACEON();
if(argc == 2)
{
int port = atoi(argv[1]);
TRACE("Acting as server: port=%d\n", port);
MyServer bs;
bs.run(port);
}
else
if(argc == 4)
{
const char *host = argv[1];
int port = atoi(argv[2]);
TRACE("Acting as client: host=%s port=%d\n", host, port);
BlockSocket bs(host, port);
if(bs.isConnected())
{
TRACE("connected\n");
char s[256];
strcpy(s, argv[3]);
int w = bs.write(s, strlen(s));
if(w != -1)
{
TRACE("wrote %d bytes\n", w);
int r = bs.read(s, sizeof(s));
if(r != -1)
{
TRACE("read %d bytes\n", r);
s[r] = 0;
TRACE("response: '%s'\n", s);
}
else
TRACE("Read error\n");
}
else
TRACE("Write error\n");
}
else
TRACE("Couldn't connect\n");
}
TRACE("Done.\n");
return 0;
}
示例6: main
int main(int argc, char *argv[])
{
QGuiApplication app(argc, argv);
QQmlApplicationEngine engine;
//QObject::connect(engine.QObject, SIGNAL(quit()), &app, SLOT(quit()));
Receiver receiver;
MyServer server;
server.startServer();
QQmlContext* ctx = engine.rootContext();
ctx->setContextProperty("receiver", &receiver);
ctx->setContextProperty("myserver", &server);
engine.load(QUrl(QStringLiteral("qrc:/main2.qml")));
return app.exec();
}
示例7: main
int main() {
if( init() ) {
register_packets();
MyServer server;
DistManager::SetOwner(server);
if( server.start("localhost", 2000) ) {
while(!server.quit) {
server.update(1000);
}
server.stop();
server.update(1000);
}
}
}
示例8: main
int main(){
MyServer* s = MyServer::createMyServer(8090);
s->start();
std::vector<MyClient*> vec;
//vec.reserve(MAX);
for (int i=0; i< MAX; i++){
vec.push_back(MyClient::createMyClient("127.0.0.1", 8090));
vec[i]->start();
}
ThreadBase::sleep(10000);
tlog("Test finished!");
for (int i=0; i<MAX; i++){
vec[i]->destroy();
delete vec[i];
}
s->destroy();
delete s;
}
示例9: main
int main(int argc, char **argv) {
string serverConf = "server.properties";
int ch;
opterr = 0;
while ((ch = getopt(argc, argv, "f:")) != -1) {
switch (ch) {
case 'f':
serverConf = optarg;
break;
default:
printf("other option :%c\n", ch);
}
}
MyServer server;
if (0 != server.Run(serverConf)) {
printf("Server::Run failed, %s\n", server.GetLastErr());
return -1;
}
return 0;
}
示例10: _tmain
int _tmain(int argc, _TCHAR* argv[])
{
MyServer server;
server.registerService(WebsocketOpt, new WebsocketService, "WebService");
server.setClientFactory(new MyClientFactory);
server.setMaxConnections(100);
server.setLoginExpiryDuration(60);
server.setProtocol(new MyProtocol);
server.createListener(81);
try
{
server.start(true);
}
catch (std::exception& e)
{
cout << "Failed to start server. Exception : " << e.what() << std::endl;
return 0;
}
while (true)
{
char ch;
cin >> ch;
if (ch == 'q')
{
break;
}
}
server.stop();
return 0;
}
示例11: DoClientSpecificVerification
void DoClientSpecificVerification( MyServer& server, int /*n_thread*/ )
{
ASSERT( server.current_balance()==int(tbb::internal::AvailableHwConcurrency())-1, NULL );
}
示例12: FireUpJobs
void FireUpJobs( MyServer& server, MyClient& client, int n_thread, int n_extra, Checker* checker ) {
REMARK("client %d: calling adjust_job_count_estimate(%d)\n", client.client_id(),n_thread);
// Exercise independent_thread_number_changed, even for zero values.
server.independent_thread_number_changed( n_extra );
#if _WIN32||_WIN64
::rml::server::execution_resource_t me;
server.register_master( me );
#endif /* _WIN32||_WIN64 */
// Experiments indicate that when oversubscribing, the main thread should wait a little
// while for the RML worker threads to do some work.
if( checker ) {
// Give RML time to respond to change in number of threads.
Harness::Sleep(1);
for( int k=0; k<n_thread; ++k )
client.job_array[k].processing_count = 0;
}
//close the gate to keep worker threads from returning to RML until a snapshot is taken
client.close_the_gate();
server.adjust_job_count_estimate( n_thread );
int n_used = 0;
if( checker ) {
Harness::Sleep(100);
for( int k=0; k<n_thread; ++k )
if( client.job_array[k].processing_count )
++n_used;
}
// open the gate
client.open_the_gate();
// Logic further below presumes that jobs never starve, so undo previous call
// to independent_thread_number_changed before waiting on those jobs.
server.independent_thread_number_changed( -n_extra );
REMARK("client %d: wait for each job to be processed at least once\n",client.client_id());
// Calculate the number of jobs that are expected to get threads.
int expected = n_thread;
// Wait for expected number of jobs to be processed.
#if RML_USE_WCRM
int default_concurrency = server.default_concurrency();
if( N_TestConnections>0 ) {
if( default_concurrency+1>=8 && n_thread<=3 && N_TestConnections<=3 && (default_concurrency/int(N_TestConnections)-1)>=n_thread ) {
#endif /* RML_USE_WCRM */
for(;;) {
int n = 0;
for( int k=0; k<n_thread; ++k )
if( client.job_array[k].processing_count!=0 )
++n;
if( n>=expected ) break;
server.yield();
}
#if RML_USE_WCRM
} else if( n_thread>0 ) {
for( int m=0; m<20; ++m ) {
int n = 0;
for( int k=0; k<n_thread; ++k )
if( client.job_array[k].processing_count!=0 )
++n;
if( n>=expected ) break;
Harness::Sleep(1);
}
}
}
#endif /* RML_USE_WCRM */
server.adjust_job_count_estimate(-n_thread);
#if _WIN32||_WIN64
server.unregister_master( me );
#endif
// Give RML some time to respond
if( checker ) {
Harness::Sleep(1);
checker->check_number_of_threads_delivered( n_used, n_thread, n_extra );
}
}
示例13: FireUpJobs
void FireUpJobs( MyServer& server, MyClient& client, int max_thread, int n_extra, Checker* checker ) {
ASSERT( max_thread>=0, NULL );
#if _WIN32||_WIN64
::rml::server::execution_resource_t me;
server.register_master( me );
#endif /* _WIN32||_WIN64 */
client.server = &server;
MyTeam team(server,size_t(max_thread));
MyServer::size_type n_thread = 0;
for( int iteration=0; iteration<4; ++iteration ) {
for( size_t i=0; i<team.max_thread; ++i )
team.info[i].ran = false;
switch( iteration ) {
default:
n_thread = int(max_thread);
break;
case 1:
// No change in number of threads
break;
case 2:
// Decrease number of threads.
n_thread = int(max_thread)/2;
break;
// Case 3 is same code as the default, but has effect of increasing the number of threads.
}
team.barrier = 0;
REMARK("client %d: server.run with n_thread=%d\n", client.client_id(), int(n_thread) );
server.independent_thread_number_changed( n_extra );
if( checker ) {
// Give RML time to respond to change in number of threads.
Harness::Sleep(1);
}
int n_delivered = server.try_increase_load( n_thread, StrictTeam );
ASSERT( !StrictTeam || n_delivered==int(n_thread), "server failed to satisfy strict request" );
if( n_delivered<0 ) {
REMARK( "client %d: oversubscription occurred (by %d)\n", client.client_id(), -n_delivered );
server.independent_thread_number_changed( -n_extra );
n_delivered = 0;
} else {
team.n_thread = n_delivered;
::rml::job* job_array[JobArraySize];
job_array[n_delivered] = (::rml::job*)intptr_t(-1);
server.get_threads( n_delivered, &team, job_array );
__TBB_ASSERT( job_array[n_delivered]== (::rml::job*)intptr_t(-1), NULL );
for( int i=0; i<n_delivered; ++i ) {
MyJob* j = static_cast<MyJob*>(job_array[i]);
int s = j->state;
ASSERT( s==MyJob::idle||s==MyJob::busy, NULL );
}
server.independent_thread_number_changed( -n_extra );
REMARK("client %d: team size is %d\n", client.client_id(), n_delivered);
if( checker ) {
checker->check_number_of_threads_delivered( n_delivered, n_thread, n_extra );
}
// Protocol requires that master wait until workers have called "done_processing"
while( team.barrier!=n_delivered ) {
ASSERT( team.barrier>=0, NULL );
ASSERT( team.barrier<=n_delivered, NULL );
__TBB_Yield();
}
REMARK("client %d: team completed\n", client.client_id() );
for( int i=0; i<n_delivered; ++i ) {
ASSERT( team.info[i].ran, "thread on team allegedly delivered, but did not run?" );
}
}
for( MyServer::size_type i=n_delivered; i<MyServer::size_type(max_thread); ++i ) {
ASSERT( !team.info[i].ran, "thread on team ran with illegal index" );
}
}
#if _WIN32||_WIN64
server.unregister_master( me );
#endif
}
示例14: main
int main(int argc, const char *argv[])
{
MyServer s;
return s.run();
}
示例15: main
int main(int argc, char *argv[]) {
int port = atoi(argv[1]);
MyServer server;
server.run(port);
return 0;
}