本文整理汇总了C++中TaskQueue::end方法的典型用法代码示例。如果您正苦于以下问题:C++ TaskQueue::end方法的具体用法?C++ TaskQueue::end怎么用?C++ TaskQueue::end使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TaskQueue
的用法示例。
在下文中一共展示了TaskQueue::end方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: check_wait_queue
void* check_wait_queue(void* args) {
Worker *worker = (Worker*) args;
TaskQueue_Item *qi;
while (ON) {
while (wqueue.size() > 0) {
//for(TaskQueue::iterator it = wqueue.begin(); it != wqueue.end(); ++it) {
int size = wqueue.size();
for (int i = 0; i < size; i++) {
//qi = *it;
qi = wqueue[i];
if (qi != NULL) {
int status = worker->check_if_task_is_ready(qi->task_id); //cout << "task = " << qi->task_id << " status = " << status << endl;
if (status == 0) {
//cout << "task = " << qi->task_id << " status = " << status << endl;
int ret = worker->move_task_to_ready_queue(&qi);
pthread_mutex_lock(&w_lock);
wqueue[i] = NULL;
pthread_mutex_unlock(&w_lock);
}
/*if(status < 0) {
cout << "negative numwait" << endl;
}*/
}
}
pthread_mutex_lock(&w_lock);
TaskQueue::iterator last = remove_if(wqueue.begin(), wqueue.end(),
check);
wqueue.erase(last, wqueue.end());
pthread_mutex_unlock(&w_lock);
sleep(1);
}
}
}
示例2: str
map<uint32_t, NodeList> Worker::get_map(TaskQueue &mqueue) {
map<uint32_t, NodeList> update_map;
/*Package package;
package.set_operation(operation);
if(operation == 25) {
package.set_currnode(toid);
}*/
uint32_t num_nodes = svrclient.memberList.size();
for (TaskQueue::iterator it = mqueue.begin(); it != mqueue.end(); ++it) {
uint32_t serverid = myhash(((*it)->task_id).c_str(), num_nodes);
string str((*it)->task_id);
str.append("\'");
if (update_map.find(serverid) == update_map.end()) {
str.append("\"");
NodeList new_list;
new_list.push_back(str);
update_map.insert(make_pair(serverid, new_list));
} else {
NodeList &exist_list = update_map[serverid];
string last_str(exist_list.back());
if ((last_str.size() + str.size()) > STRING_THRESHOLD) {
str.append("\"");
exist_list.push_back(str);
} else {
exist_list.pop_back();
str.append(last_str);
exist_list.push_back(str);
}
}
}
return update_map;
}
示例3: updateFramesClass
void TaskScheduler::updateFramesClass(TaskQueue& queue, Class* klass)
{
for(TaskQueue::iterator it = queue.begin(); it != queue.end(); it++)
{
Task* task = *it;
task->getContext()->updateFramesClass(klass);
}
}
示例4: migrateTasks
// pack the jobs into multiple packages - 2000 jobs per package
// and insert it into the ready queue of server that requested to steal tasks
//int Worker::migrateTasks(int num_tasks, ZHTClient &clientRet, int index){
void* migrateTasks(void *args) {
Worker *worker = (Worker*) args;
int index;
while (ON) {
//while(migrateq.size() > 0) {
while (migratev.any()) {
pthread_mutex_lock(&mq_lock);
if (migratev.any()) {
//int *index = (int*)args;
//index = migrateq.front();
index = migratev.pop();
//migrateq.pop();
//cout << "1 worker = " << worker->selfIndex << " to index = " << index << " size = " << rqueue.size() << endl;
} else {
//cout << "migratev count = " << migratev.count() << endl;
pthread_mutex_unlock(&mq_lock);
continue;
}
if (index < 0 || index >= worker->num_nodes) {
//cout << "bad index: worker = " << worker->selfIndex << " to index = " << index << endl;
pthread_mutex_unlock(&mq_lock);
continue;
}
pthread_mutex_unlock(&mq_lock);
//cout << "2 worker = " << worker->selfIndex << " to index = " << index << " size = " << rqueue.size() << endl;
pthread_mutex_lock(&m_lock);
pthread_mutex_lock(&lock);
int32_t num_tasks = rqueue.size() / 2;
if (num_tasks < 1) {
pthread_mutex_unlock(&lock);
pthread_mutex_unlock(&m_lock);
continue;;
}
try { //cout << "going to send " << num_tasks << " tasks" << endl;
mqueue.assign(rqueue.end() - num_tasks, rqueue.end());
rqueue.erase(rqueue.end() - num_tasks, rqueue.end());
//cout << "rqueue size = " << rqueue.size() << " mqueue size = " << mqueue.size() << endl;
} catch (...) {
cout
<< "migrateTasks: cannot allocate memory while copying tasks to migrate queue"
<< endl;
pthread_exit(NULL);
}
pthread_mutex_unlock(&lock);
map<uint32_t, NodeList> update_map = worker->get_map(mqueue);
int update_ret = worker->zht_update(update_map, "nodehistory",
index);
/*if(index == worker->selfIndex) {
cout << "ALERT: MIGRATING TO ITSELF" << endl;
}*/
int num_packages = 0;
long total_submitted = 0;
num_tasks = mqueue.size();
while (total_submitted != num_tasks) {
Package package;
string alltasks;
package.set_virtualpath(worker->ip);
package.set_operation(22);
num_packages++;
int num_tasks_this_package = max_tasks_per_package;
int num_tasks_left = num_tasks - total_submitted;
if (num_tasks_left < max_tasks_per_package) {
num_tasks_this_package = num_tasks_left;
}
for (int j = 0; j < num_tasks_this_package; j++) {
//TaskQueue_item* qi = migrate_queue->remove_element();
if (mqueue.size() < 1) {
if (j > 0) {
total_submitted = total_submitted + j;
package.set_realfullpath(alltasks);
string str = package.SerializeAsString();
pthread_mutex_lock(&msg_lock);
int32_t ret = worker->svrclient.svrtosvr(str,
str.length(), index);
pthread_mutex_unlock(&msg_lock);
}
//pthread_mutex_unlock (&m_lock);
//return total_submitted;
//pthread_exit(NULL);
total_submitted = num_tasks;
break;
}
try {
alltasks.append(mqueue.front()->task_id);
alltasks.append("\'\""); // Task ID
/*stringstream num_moves_ss;
num_moves_ss << (mqueue.front()->num_moves + 1);
alltasks.append(num_moves_ss.str()); alltasks.append("\'\""); // Number of moves*/
if (LOGGING) {
migrate_fp << " taskid = "
<< mqueue.front()->task_id;
//migrate_fp << " num moves = " << (mqueue.front()->num_moves + 1);
}
//.........这里部分代码省略.........