本文整理汇总了C++中Executor类的典型用法代码示例。如果您正苦于以下问题:C++ Executor类的具体用法?C++ Executor怎么用?C++ Executor使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Executor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: TEST
TEST(Process, executor)
{
ASSERT_TRUE(GTEST_IS_THREADSAFE);
volatile bool event1Called = false;
volatile bool event2Called = false;
EventReceiver receiver;
EXPECT_CALL(receiver, event1(42))
.WillOnce(Assign(&event1Called, true));
EXPECT_CALL(receiver, event2("event2"))
.WillOnce(Assign(&event2Called, true));
Executor executor;
deferred<void(int)> event1 =
executor.defer(std::tr1::bind(&EventReceiver::event1,
&receiver,
std::tr1::placeholders::_1));
event1(42);
deferred<void(const std::string&)> event2 =
executor.defer(std::tr1::bind(&EventReceiver::event2,
&receiver,
std::tr1::placeholders::_1));
event2("event2");
while (!event1Called);
while (!event2Called);
}
示例2: shooSH_run
/*!
* \brief Funcao que executa a Shell.
*/
void shooSH_run (void) {
Job* job;
Parser p;
Executor executor;
bool exited = false;
while (!exited) {
std::cout << "shooSH> ";
job = p.parseLine ();
history.push_back (job->getCommand());
if (job->hasFailed ()) {
std::cout << "Erro de sintaxe" << std::endl;
} else {
if (!(job->isNop()||job->hasExited())) {
if (!job->hasPipe()) {
if (executeBuiltin (job->getProcess(0)) == -1) {
job->setID (++currID);
jobList.push_back (job);
executor.execute (job);
}
} else {
job->setID (++currID);
jobList.push_back (job);
executor.execute (job);
}
} else {
exited = job->hasExited();
}
}
job = NULL;
}
shooSH_clean();
}
示例3: Prepare
void ASTFuncExpr::Prepare(Executor& exe)const
{
// | res |
// | res | <-top
exe.AddUnit( new ExePush() ); // save result
// | res |
// | param | 1
// | param | 2
// ... ..
// | param | n
// | param | n <-top
for( std::size_t i=0; i<paths.Size(); ++i ){
paths[i]->Prepare(exe);
exe.AddUnit( new ExePush() ); // save params
}
// | res |
// | param | 1
// | param | 2
// ... ..
// | param | n <-top
exe.AddUnit( new ExePop() ); // pop the last dup param
// save result to old res
exe.AddUnit( new ExeFunc() );
// | res | <-top
exe.AddUnit( new ExePop( paths.Size() ) ); // pop n params
}
示例4: DO_ENTER
void ntlm::ExecuteGPU(WorkUnit& wu, Device* pDevice, CudaContext* pContext)
{
DO_ENTER("ntlm", "ExecuteGPU");
Module *hashmod;
CudaKernel *hashker;
unsigned int nTargetHashes = wu.m_hashvalues.size();
/* Loads the ptx code into memory and creates the module */
hashmod = new Module("ntlm", ReadPtx("ntlm"), *pContext);
/* Identify the function to use */
string func = "ntlm";
if(wu.m_hashvalues.size() > 1)
func = func + "BatchKernel";
else
func = func + "Kernel";
/* Load the function */
hashker = hashmod->GetKernel(func.c_str());
executor_parameters parameters;
parameters["hashmod"] = &hashmod;
parameters["hashker"] = &hashker;
Executor *exec = ExecutorFactory::Get("BasicExecutor");
exec->Execute(this, wu, pDevice, pContext, parameters);
delete hashker;
delete hashmod;
}
示例5: TEST
TEST(ProcessTest, Executor)
{
ASSERT_TRUE(GTEST_IS_THREADSAFE);
std::atomic_bool event1Called(false);
std::atomic_bool event2Called(false);
EventReceiver receiver;
EXPECT_CALL(receiver, event1(42))
.WillOnce(Assign(&event1Called, true));
EXPECT_CALL(receiver, event2("event2"))
.WillOnce(Assign(&event2Called, true));
Executor executor;
Deferred<void(int)> event1 =
executor.defer([&receiver](int i) {
return receiver.event1(i);
});
event1(42);
Deferred<void(const string&)> event2 =
executor.defer([&receiver](const string& s) {
return receiver.event2(s);
});
event2("event2");
while (event1Called.load() == false);
while (event2Called.load() == false);
}
示例6: while
void ActionManager::Update(int64_t tick, int32_t span)
{
Executor* cur_exe = executors_.front();
while (cur_exe) {
ActionBase* cur_act = cur_exe->actions().front();
// if the node has no more actions to execute, unregister it
if (!cur_act) {
Executor* tmp = cur_exe;
cur_exe = executors_.erase(cur_exe);
tmp->OnAllActionFinish();
tmp->Release();
continue;
}
while (cur_act) {
if (cur_act->stopped()) {
ActionBase* tmp = cur_act;
cur_act = cur_exe->actions().erase(cur_act);
tmp->Release();
} else {
cur_act->OnUpdate(tick, span);
if (cur_act->stopped() || cur_act->IsDone()) {
ActionBase* tmp = cur_act;
cur_act = cur_exe->actions().erase(cur_act);
tmp->Release();
} else {
cur_act = cur_act->list_next();
}
}
}
cur_exe = cur_exe->list_next();
}
}
示例7: main
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
Executor exe;
exe.start();
return a.exec();
}
示例8: __fun_call
virtual int __fun_call(Executor& ewsl,int pm)
{
ewsl.check_pmc(this, pm,1,2);
String filename=ewsl.make_path(variant_cast<String>(ewsl.ci0.nbx[1]));
int type=pm>1?variant_cast<int>(ewsl.ci0.nbx[2]):FILE_TYPE_BINARY;
bool flag=ewsl.ci1.nbp[StackState1::SBASE_THIS].ref<StringBuffer<T> >().save(filename,type);
ewsl.ci0.nbx[1].reset(flag);
return 1;
}
示例9: checkExecution
void checkExecution(const char* snippet[],
const expression::Context& context, double expected) {
size_t n = 0;
while (snippet[n]) ++n;
grammar::Program program(snippet, snippet + n);
runtime::CallbackDispatcher dispatcher;
Executor executor;
EXPECT_EQ(expected, executor.run(program, dispatcher, context));
}
示例10: main
int main(int argc, char** argv)
{
// Executor can execute member functions of an object.
Executor<Foo> executeMember;
executeMember.setAction(new Foo(), &Foo::execute);
executeMember.setInterval("once in 10 executions");
// Executor can execute static member functions of an object.
Executor<> executeStaticMember;
executeStaticMember.setAction(&Foo::staticExecute);
executeStaticMember.setInterval("thrice in 10 executions");
// Executor can execute static functions.
Executor<> executeStatic;
executeStatic.setAction(&execute);
executeStatic.setInterval("2 times in 10 executions");
while (true)
{
executeMember();
executeStatic();
executeStaticMember();
}
return 0;
}
示例11: trainWithBuiltInRNNOp
void trainWithBuiltInRNNOp(const string file, int batch_size, int max_epoch, int start_epoch) {
Context device(DeviceType::kGPU, 0);
BucketSentenceIter dataIter(file, batch_size, device);
string prefix = file.substr(0, file.rfind("."));
dataIter.saveCharIndices(prefix + ".dictionary");
input_dim = static_cast<int>(dataIter.characterSize());
sequence_length_max = dataIter.maxSequenceLength();
auto RNN = LSTMWithBuiltInRNNOp(num_lstm_layer, sequence_length_max, input_dim, num_hidden,
num_embed, dropout);
map<string, NDArray> args_map;
args_map["data"] = NDArray(Shape(batch_size, sequence_length_max), device, false);
// Avoiding SwapAxis, batch_size is of second dimension.
args_map["LSTM_init_c"] = NDArray(Shape(num_lstm_layer, batch_size, num_hidden), device, false);
args_map["LSTM_init_h"] = NDArray(Shape(num_lstm_layer, batch_size, num_hidden), device, false);
args_map["softmax_label"] = NDArray(Shape(batch_size, sequence_length_max), device, false);
vector<mx_float> zeros(batch_size * num_lstm_layer * num_hidden, 0);
Executor* exe = RNN.SimpleBind(device, args_map);
if (start_epoch == -1) {
RNNXavier xavier = RNNXavier(Xavier::gaussian, Xavier::in, 2.34);
for (auto &arg : exe->arg_dict())
xavier(arg.first, &arg.second);
} else {
LoadCheckpoint(prefix + "-" + to_string(start_epoch) + ".params", exe);
}
start_epoch++;
mx_float learning_rate = 0.0002;
mx_float weight_decay = 0.000002;
Optimizer* opt = OptimizerRegistry::Find("ccsgd");
// opt->SetParam("momentum", 0.9)->SetParam("rescale_grad", 1.0 / batch_size)
// ->SetParam("clip_gradient", 10);
for (int epoch = start_epoch; epoch < max_epoch; ++epoch) {
dataIter.Reset();
auto tic = chrono::system_clock::now();
while (dataIter.Next()) {
auto data_batch = dataIter.GetDataBatch();
data_batch.data.CopyTo(&exe->arg_dict()["data"]);
data_batch.label.CopyTo(&exe->arg_dict()["softmax_label"]);
exe->arg_dict()["LSTM_init_c"].SyncCopyFromCPU(zeros);
exe->arg_dict()["LSTM_init_h"].SyncCopyFromCPU(zeros);
NDArray::WaitAll();
exe->Forward(true);
exe->Backward();
exe->UpdateAll(opt, learning_rate, weight_decay);
NDArray::WaitAll();
}
auto toc = chrono::system_clock::now();
cout << "Epoch[" << epoch << "] Time Cost:" <<
chrono::duration_cast<chrono::seconds>(toc - tic).count() << " seconds ";
OutputPerplexity(&exe->arg_dict()["softmax_label"], &exe->outputs[0]);
string filepath = prefix + "-" + to_string(epoch) + ".params";
SaveCheckpoint(filepath, RNN, exe);
}
}
示例12: predictWithBuiltInRNNOp
void predictWithBuiltInRNNOp(wstring* ptext, int sequence_length, const string param_file,
const string dictionary_file) {
Context device(DeviceType::kGPU, 0);
auto results = BucketSentenceIter::loadCharIndices(dictionary_file);
auto dictionary = get<0>(results);
auto charIndices = get<1>(results);
input_dim = static_cast<int>(charIndices.size());
auto RNN = LSTMWithBuiltInRNNOp(num_lstm_layer, 1, input_dim, num_hidden, num_embed, 0);
map<string, NDArray> args_map;
args_map["data"] = NDArray(Shape(1, 1), device, false);
args_map["softmax_label"] = NDArray(Shape(1, 1), device, false);
vector<mx_float> zeros(1 * num_lstm_layer * num_hidden, 0);
// Avoiding SwapAxis, batch_size=1 is of second dimension.
args_map["LSTM_init_c"] = NDArray(Shape(num_lstm_layer, 1, num_hidden), device, false);
args_map["LSTM_init_h"] = NDArray(Shape(num_lstm_layer, 1, num_hidden), device, false);
args_map["LSTM_init_c"].SyncCopyFromCPU(zeros);
args_map["LSTM_init_h"].SyncCopyFromCPU(zeros);
Executor* exe = RNN.SimpleBind(device, args_map);
LoadCheckpoint(param_file, exe);
mx_float index;
wchar_t next = 0;
vector<mx_float> softmax;
softmax.resize(input_dim);
for (auto c : *ptext) {
exe->arg_dict()["data"].SyncCopyFromCPU(&dictionary[c], 1);
exe->Forward(false);
exe->outputs[0].SyncCopyToCPU(softmax.data(), input_dim);
exe->outputs[1].CopyTo(&args_map["LSTM_init_h"]);
exe->outputs[2].CopyTo(&args_map["LSTM_init_c"]);
size_t n = max_element(softmax.begin(), softmax.end()) - softmax.begin();
index = (mx_float) n;
next = charIndices[n];
}
ptext->push_back(next);
for (int i = 0; i < sequence_length; i++) {
exe->arg_dict()["data"].SyncCopyFromCPU(&index, 1);
exe->Forward(false);
exe->outputs[0].SyncCopyToCPU(softmax.data(), input_dim);
exe->outputs[1].CopyTo(&args_map["LSTM_init_h"]);
exe->outputs[2].CopyTo(&args_map["LSTM_init_c"]);
size_t n = max_element(softmax.begin(), softmax.end()) - softmax.begin();
index = (mx_float) n;
next = charIndices[n];
ptext->push_back(next);
}
}
示例13: in
void DTest::run(const char* path) {
std::ifstream in(path, std::ios_base::binary);
SCOPE_ASSERT(in);
Executor Exec;
while (in.peek() != -1) {
TestCase tcase;
SCOPE_ASSERT(readTestData(in, tcase.patterns, tcase.text, tcase.expected));
Exec.submit(tcase);
}
}
示例14: main
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
#ifdef RESEARCH
ResearchWindow rw;
rw.show();
#else
Executor ex;
ex.start();
#endif
return a.exec();
}
示例15: multi_agent_then_execute_returning_void
__AGENCY_ANNOTATION
typename executor_traits<Executor>::template future<void>
multi_agent_then_execute_returning_void(use_multi_agent_then_execute_returning_void_member_function,
Executor& ex, Function f, typename executor_traits<Executor>::shape_type shape, Future& fut)
{
return ex.then_execute(f, shape, fut);
} // end multi_agent_then_execute_returning_void()