本文整理汇总了C++中TaskScheduler类的典型用法代码示例。如果您正苦于以下问题:C++ TaskScheduler类的具体用法?C++ TaskScheduler怎么用?C++ TaskScheduler使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TaskScheduler类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main()
{
TaskScheduler scheduler;
for (size_t i = 0; i < 10; i++) {
scheduler.schedule({ TaskCategory::Standard, TaskPriority::Standard, [&scheduler, i]() {
std::cout << "[standard] hello from standard task " << i << "!\nscheduling new task\n";
scheduler.schedule({ TaskCategory::IO, TaskPriority::Standard, [i] {
std::cout << "[io] hello from io task " << i << "!\n[io] sleeping...\n";
std::this_thread::sleep_for(7s); }
});
std::cout << "[standard] sleeping...\n";
std::this_thread::sleep_for(5s);
std::cout << "[standard] Done sleeping!\n"; }
});
}
scheduler.schedule({ TaskCategory::LongComputation, TaskPriority::Standard, [] {
std::cout << "[long_comp] hello from long_comp task!\n[long_comp] sleeping...\n";
std::this_thread::sleep_for(3s);
std::cout << "[long_comp] Done sleeping!\n";
}
});
std::cin.get();
return 0;
}
示例2: CoInitializeEx
void TaskScheduler::TaskingThreadFunction( const ThreadArgs& args_ )
{
CoInitializeEx(NULL, COINIT_MULTITHREADED);
uint32_t threadNum = args_.threadNum;
TaskScheduler* pTS = args_.pTaskScheduler;
gtl_threadNum = threadNum;
gtl_pCurrTS = pTS;
pTS->m_NumThreadsRunning.fetch_add(1, std::memory_order_relaxed );
uint32_t spinCount = 0;
uint32_t hintPipeToCheck_io = threadNum + 1; // does not need to be clamped.
while( pTS->m_bRunning.load( std::memory_order_relaxed ) )
{
if( !pTS->TryRunTask( threadNum, hintPipeToCheck_io ) )
{
// no tasks, will spin then wait
++spinCount;
if( spinCount > SPIN_COUNT )
{
pTS->WaitForTasks<false>( threadNum );
}
}
else
{
spinCount = 0;
}
}
pTS->m_NumThreadsRunning.fetch_sub( 1, std::memory_order_relaxed );
gtl_threadNum = NO_THREAD_NUM;
gtl_pCurrTS = NULL;
return;
}
示例3: test10_testThread
OSTHREAD_FUNC test10_testThread(void *parm){
TaskInfo *ti;
TaskScheduler *ts = tgetTaskScheduler();
ti = ts->createTask(test10_PROGTest, 0); assert(ti);
ts->run();
return (void*)-1;
}
示例4: ThreadMain
DWORD WINAPI TaskScheduler::ThreadMain(VOID* thread_instance)
{
TaskScheduler *pScheduler = reinterpret_cast<TaskScheduler*>(thread_instance);
pScheduler->ExecuteTasks();
return 0;
}
示例5: lock
void RTSPManager::createRTSPServer(unsigned int id , unsigned int port , volatile char * watcher)
{
std::unique_lock<std::mutex> lock(_lock);
TaskScheduler* taskSchedular = BasicTaskScheduler::createNew();
BasicUsageEnvironment* usageEnvironment = BasicUsageEnvironment::createNew(*taskSchedular);
RTSPServer* rtspServer = RTSPServer::createNew(*usageEnvironment, port, NULL);
if(rtspServer == NULL)
{
logger::log(usageEnvironment->getResultMsg() , logger::logType::FAILURE);
*watcher = -1;
this->_done = true;
this->_condition.notify_all();
return;
}
H264LiveServerMediaSession *liveSubSession = H264LiveServerMediaSession::createNew(*usageEnvironment, true , id);
std::string streamName = "camera_" + std::to_string(id);
ServerMediaSession* sms = ServerMediaSession::createNew(*usageEnvironment, streamName.c_str(), streamName.c_str(), "Live H264 Stream");
sms->addSubsession(liveSubSession);
rtspServer->addServerMediaSession(sms);
char* url = rtspServer->rtspURL(sms);
logger::log(INFO_RTSP_URL(url) , logger::logType::PRIORITY);
delete[] url;
this->_done = true;
this->_condition.notify_all();
lock.unlock();
taskSchedular->doEventLoop(watcher);
return;
}
示例6: assert
void TaskScheduler::TaskFunc(Task *task)
{
assert(task->m_State == Task::State::Scheduled || task->m_State == Task::State::Runnable);
TaskScheduler *scheduler = task->m_Scheduler;
scheduler->m_Current = task;
task->m_Func(*task);
scheduler->SetState(*task, Task::State::Finished);
}
示例7: signalNewFrameData
// The following code would be called to signal that a new frame of data has become available.
// This (unlike other "LIVE555 Streaming Media" library code) may be called from a separate thread.
// (Note, however, that "triggerEvent()" cannot be called with the same 'event trigger id' from different threads.
// Also, if you want to have multiple device threads, each one using a different 'event trigger id', then you will need
// to make "eventTriggerId" a non-static member variable of "DeviceSource".)
void signalNewFrameData() {
TaskScheduler* ourScheduler = NULL; //%%% TO BE WRITTEN %%%
DeviceSource* ourDevice = NULL; //%%% TO BE WRITTEN %%%
if (ourScheduler != NULL) { // sanity check
ourScheduler->triggerEvent(DeviceSource::eventTriggerId, ourDevice);
}
}
示例8:
void * TaskScheduler::thread_func(void * arg)
{
boost::shared_ptr<ThreadParameters> tp = *(boost::shared_ptr<ThreadParameters>*)arg;
TaskScheduler *that = tp->that;
pthread_cleanup_push(cleanitup,&tp);
that->runTask(tp);
pthread_cleanup_pop(1);
return NULL;
}
示例9: signalNewVideoFrameData
static void
signalNewVideoFrameData(int channelId) {
TaskScheduler* ourScheduler = (TaskScheduler*) liveserver_taskscheduler(); //%%% TO BE WRITTEN %%%
GAVideoLiveSource* ourDevice = vLiveSource[channelId]; //%%% TO BE WRITTEN %%%
if (ourScheduler != NULL) { // sanity check
ourScheduler->triggerEvent(eventTriggerId[channelId], ourDevice);
}
}
示例10: test10_incThread
OSTHREAD_FUNC test10_incThread(void *parm){
TaskInfo *ti;
TaskScheduler *ts = tgetTaskScheduler();
ti = ts->createTask(test10_PROGIncrement, 0);
ts->assignFixedTask(1, ti);
test10_event.set();
ts->run();
return (void*)-1;
}
示例11: RepetitiveTask
void RepetitiveTask(Task::TaskData & parTask)
{
TaskScheduler * scheduler = parTask.RepetetiveTaskData.Scheduler;
TaskId taskToExec = parTask.RepetetiveTaskData.RepetiveTask;
std::chrono::milliseconds repeatTimer = parTask.RepetetiveTaskData.RepeatTimer;
Task * task = scheduler->GetTask(taskToExec);
assert(task != nullptr);
task->Run();
scheduler->ScheduleEvery(repeatTimer, taskToExec, false);
}
示例12: FIBER_START_FUNCTION_CLASS_IMPL
FIBER_START_FUNCTION_CLASS_IMPL(TaskScheduler, FiberStart) {
GlobalArgs *globalArgs = reinterpret_cast<GlobalArgs *>(arg);
TaskScheduler *taskScheduler = &globalArgs->g_taskScheduler;
while (!taskScheduler->m_quit.load()) {
// Check if any of the waiting tasks are ready
WaitingTask waitingTask;
bool waitingTaskReady = false;
taskScheduler->m_waitingTaskLock.lock();
auto iter = taskScheduler->m_waitingTasks.begin();
for (; iter != taskScheduler->m_waitingTasks.end(); ++iter) {
if (iter->Counter->load() == iter->Value) {
waitingTaskReady = true;
break;
}
}
if (waitingTaskReady) {
waitingTask = *iter;
// Optimization for removing an item from a vector as suggested by ryeguy on reddit
// Explained here: http://stackoverflow.com/questions/4442477/remove-ith-item-from-c-stdvector/4442529#4442529
// Essentially, rather than forcing a memcpy to shift all the remaining elements down after the erase,
// we move the last element into the place where the erased element was. Then we pop off the last element
// Check that we're not already the last item
// Move assignment to self is not defined
if (iter != (--taskScheduler->m_waitingTasks.end())) {
*iter = std::move(taskScheduler->m_waitingTasks.back());
}
taskScheduler->m_waitingTasks.pop_back();
}
taskScheduler->m_waitingTaskLock.unlock();
if (waitingTaskReady) {
taskScheduler->SwitchFibers(waitingTask.Fiber);
}
TaskBundle nextTask;
if (!taskScheduler->GetNextTask(&nextTask)) {
std::this_thread::yield();
} else {
nextTask.TaskToExecute.Function(&globalArgs->g_taskScheduler, &globalArgs->g_heap, &globalArgs->g_allocator, nextTask.TaskToExecute.ArgData);
nextTask.Counter->fetch_sub(1);
}
}
FTLConvertFiberToThread(FTLGetCurrentFiber());
globalArgs->g_taskScheduler.m_numActiveWorkerThreads.fetch_sub(1);
FTLEndCurrentThread();
}
示例13: setOutput
void UIWindow::setOutput()
{
TaskScheduler tsd;
QString qstr = textEdit->toPlainText();
tsd.input(1,qstr.toStdString());
string str="";
tsd.execute();
tsd.output(str);
QString qst = "";
qst=qst+str.c_str();
output->setText(qst);
outputLabel->setText("Output");
repaint();
}
示例14: parfor
void parfor(std::size_t idx_start,
std::size_t idx_end,
Lambda &&loopBody,
TaskScheduler &scheduler,
std::size_t blockSize = 32)
{
static_assert(std::is_same<void, typename std::result_of<Lambda(std::size_t)>::type>::value,
"Loop body must return void");
auto loopLen = (idx_end - idx_start);
//Execute short loops in serial
if(loopLen < 10*blockSize) {
for(std::size_t i=idx_start; i<idx_end; ++i) {
loopBody(i);
}
return;
}
auto full_blocks = loopLen / blockSize;
auto cleanup_start = full_blocks * blockSize + idx_start;
auto Nblocks = full_blocks + ((cleanup_start < idx_end) ? 1 : 0);
std::vector<std::future<void>> futs;
futs.reserve(Nblocks);
for (std::size_t iblock = 0; iblock < Nblocks; ++iblock) {
std::size_t i_start = idx_start + iblock * blockSize;
std::size_t i_end = i_start + blockSize;
i_end = (i_end < idx_end) ? i_end : idx_end;
auto [task, fut] = scheduler.createTask([&loopBody, i_start, i_end]() {
for (auto i = i_start; i < i_end; ++i) {
loopBody(i);
}
});
scheduler.enqueue(task);
futs.push_back(std::move(fut));
}
wait_all(futs);
//return futs;
}
示例15: MainFiberStart
void TaskScheduler::MainFiberStart(intptr_t arg) {
MainFiberStartArgs *mainFiberArgs = reinterpret_cast<MainFiberStartArgs *>(arg);
TaskScheduler *taskScheduler = mainFiberArgs->taskScheduler;
// Call the main task procedure
mainFiberArgs->MainTask(taskScheduler, mainFiberArgs->Arg);
// Request that all the threads quit
taskScheduler->m_quit.store(true, std::memory_order_release);
// Switch to the thread fibers
ThreadLocalStorage &tls = taskScheduler->m_tls[taskScheduler->GetCurrentThreadIndex()];
taskScheduler->m_fibers[tls.CurrentFiberIndex].SwitchToFiber(&tls.ThreadFiber);
// We should never get here
printf("Error: FiberStart should never return");
}