本文整理匯總了C++中BLI_mutex_unlock函數的典型用法代碼示例。如果您正苦於以下問題:C++ BLI_mutex_unlock函數的具體用法?C++ BLI_mutex_unlock怎麽用?C++ BLI_mutex_unlock使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了BLI_mutex_unlock函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的C++代碼示例。
示例1: COM_execute
void COM_execute(RenderData *rd, Scene *scene, bNodeTree *editingtree, int rendering,
const ColorManagedViewSettings *viewSettings,
const ColorManagedDisplaySettings *displaySettings,
const char *viewName)
{
/* initialize mutex, TODO this mutex init is actually not thread safe and
* should be done somewhere as part of blender startup, all the other
* initializations can be done lazily */
if (is_compositorMutex_init == false) {
BLI_mutex_init(&s_compositorMutex);
is_compositorMutex_init = true;
}
BLI_mutex_lock(&s_compositorMutex);
if (editingtree->test_break(editingtree->tbh)) {
// during editing multiple calls to this method can be triggered.
// make sure one the last one will be doing the work.
BLI_mutex_unlock(&s_compositorMutex);
return;
}
/* Make sure node tree has previews.
* Don't create previews in advance, this is done when adding preview operations.
* Reserved preview size is determined by render output for now.
*/
float aspect = rd->xsch > 0 ? (float)rd->ysch / (float)rd->xsch : 1.0f;
BKE_node_preview_init_tree(editingtree, COM_PREVIEW_SIZE, (int)(COM_PREVIEW_SIZE * aspect), false);
/* initialize workscheduler, will check if already done. TODO deinitialize somewhere */
bool use_opencl = (editingtree->flag & NTREE_COM_OPENCL) != 0;
WorkScheduler::initialize(use_opencl, BKE_render_num_threads(rd));
/* set progress bar to 0% and status to init compositing */
editingtree->progress(editingtree->prh, 0.0);
editingtree->stats_draw(editingtree->sdh, IFACE_("Compositing"));
bool twopass = (editingtree->flag & NTREE_TWO_PASS) > 0 && !rendering;
/* initialize execution system */
if (twopass) {
ExecutionSystem *system = new ExecutionSystem(rd, scene, editingtree, rendering, twopass, viewSettings, displaySettings, viewName);
system->execute();
delete system;
if (editingtree->test_break(editingtree->tbh)) {
// during editing multiple calls to this method can be triggered.
// make sure one the last one will be doing the work.
BLI_mutex_unlock(&s_compositorMutex);
return;
}
}
ExecutionSystem *system = new ExecutionSystem(rd, scene, editingtree, rendering, false,
viewSettings, displaySettings, viewName);
system->execute();
delete system;
BLI_mutex_unlock(&s_compositorMutex);
}
示例2: BLI_task_pool_work_and_wait
void BLI_task_pool_work_and_wait(TaskPool *pool)
{
TaskScheduler *scheduler = pool->scheduler;
BLI_mutex_lock(&pool->num_mutex);
while (pool->num != 0) {
Task *task, *work_task = NULL;
bool found_task = false;
BLI_mutex_unlock(&pool->num_mutex);
BLI_mutex_lock(&scheduler->queue_mutex);
/* find task from this pool. if we get a task from another pool,
* we can get into deadlock */
if (pool->num_threads == 0 ||
pool->currently_running_tasks < pool->num_threads)
{
for (task = scheduler->queue.first; task; task = task->next) {
if (task->pool == pool) {
work_task = task;
found_task = true;
BLI_remlink(&scheduler->queue, task);
break;
}
}
}
BLI_mutex_unlock(&scheduler->queue_mutex);
/* if found task, do it, otherwise wait until other tasks are done */
if (found_task) {
/* run task */
atomic_add_z(&pool->currently_running_tasks, 1);
work_task->run(pool, work_task->taskdata, 0);
/* delete task */
if (work_task->free_taskdata)
MEM_freeN(work_task->taskdata);
MEM_freeN(work_task);
/* notify pool task was done */
task_pool_num_decrease(pool, 1);
}
BLI_mutex_lock(&pool->num_mutex);
if (pool->num == 0)
break;
if (!found_task)
BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
}
BLI_mutex_unlock(&pool->num_mutex);
}
示例3: task_scheduler_thread_wait_pop
static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task)
{
bool found_task = false;
BLI_mutex_lock(&scheduler->queue_mutex);
while (!scheduler->queue.first && !scheduler->do_exit)
BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
do {
Task *current_task;
/* Assuming we can only have a void queue in 'exit' case here seems logical (we should only be here after
* our worker thread has been woken up from a condition_wait(), which only happens after a new task was
* added to the queue), but it is wrong.
* Waiting on condition may wake up the thread even if condition is not signaled (spurious wake-ups), and some
* race condition may also empty the queue **after** condition has been signaled, but **before** awoken thread
* reaches this point...
* See http://stackoverflow.com/questions/8594591
*
* So we only abort here if do_exit is set.
*/
if (scheduler->do_exit) {
BLI_mutex_unlock(&scheduler->queue_mutex);
return false;
}
for (current_task = scheduler->queue.first;
current_task != NULL;
current_task = current_task->next)
{
TaskPool *pool = current_task->pool;
if (scheduler->background_thread_only && !pool->run_in_background) {
continue;
}
if (atomic_add_and_fetch_z(&pool->currently_running_tasks, 1) <= pool->num_threads ||
pool->num_threads == 0)
{
*task = current_task;
found_task = true;
BLI_remlink(&scheduler->queue, *task);
break;
}
else {
atomic_sub_and_fetch_z(&pool->currently_running_tasks, 1);
}
}
if (!found_task)
BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
} while (!found_task);
BLI_mutex_unlock(&scheduler->queue_mutex);
return true;
}
示例4: preview_startjob
/* only this runs inside thread */
static void preview_startjob(void *data, short *stop, short *do_update, float *progress)
{
PreviewJob *pj = data;
PreviewJobAudio *previewjb;
BLI_mutex_lock(pj->mutex);
previewjb = pj->previews.first;
BLI_mutex_unlock(pj->mutex);
while (previewjb) {
PreviewJobAudio *preview_next;
bSound *sound = previewjb->sound;
BKE_sound_read_waveform(sound, stop);
if (*stop || G.is_break) {
BLI_mutex_lock(pj->mutex);
previewjb = previewjb->next;
BLI_mutex_unlock(pj->mutex);
while (previewjb) {
sound = previewjb->sound;
/* make sure we cleanup the loading flag! */
BLI_spin_lock(sound->spinlock);
sound->tags &= ~SOUND_TAGS_WAVEFORM_LOADING;
BLI_spin_unlock(sound->spinlock);
BLI_mutex_lock(pj->mutex);
previewjb = previewjb->next;
BLI_mutex_unlock(pj->mutex);
}
BLI_mutex_lock(pj->mutex);
BLI_freelistN(&pj->previews);
pj->total = 0;
pj->processed = 0;
BLI_mutex_unlock(pj->mutex);
break;
}
BLI_mutex_lock(pj->mutex);
preview_next = previewjb->next;
BLI_freelinkN(&pj->previews, previewjb);
previewjb = preview_next;
pj->processed++;
*progress = (pj->total > 0) ? (float)pj->processed / (float)pj->total : 1.0f;
*do_update = true;
BLI_mutex_unlock(pj->mutex);
}
}
示例5: task_scheduler_clear
static void task_scheduler_clear(TaskScheduler *scheduler, TaskPool *pool)
{
Task *task, *nexttask;
size_t done = 0;
BLI_mutex_lock(&scheduler->queue_mutex);
/* free all tasks from this pool from the queue */
for (task = scheduler->queue.first; task; task = nexttask) {
nexttask = task->next;
if (task->pool == pool) {
if (task->free_taskdata)
MEM_freeN(task->taskdata);
BLI_freelinkN(&scheduler->queue, task);
done++;
}
}
BLI_mutex_unlock(&scheduler->queue_mutex);
/* notify done */
task_pool_num_decrease(pool, done);
}
示例6: BLI_mutex_lock
void KX_BlenderSceneConverter::MergeAsyncLoads()
{
vector<KX_Scene *> *merge_scenes;
vector<KX_LibLoadStatus *>::iterator mit;
vector<KX_Scene *>::iterator sit;
BLI_mutex_lock(&m_threadinfo->m_mutex);
for (mit = m_mergequeue.begin(); mit != m_mergequeue.end(); ++mit) {
merge_scenes = (vector<KX_Scene *> *)(*mit)->GetData();
for (sit=merge_scenes->begin(); sit!=merge_scenes->end(); ++sit) {
(*mit)->GetMergeScene()->MergeScene(*sit);
delete (*sit);
}
delete merge_scenes;
(*mit)->SetData(NULL);
(*mit)->Finish();
}
m_mergequeue.clear();
BLI_mutex_unlock(&m_threadinfo->m_mutex);
}
示例7: UNUSED_FUNCTION
static void UNUSED_FUNCTION(COM_freeCaches)()
{
if (is_compositorMutex_init) {
BLI_mutex_lock(&s_compositorMutex);
intern_freeCompositorCaches();
BLI_mutex_unlock(&s_compositorMutex);
}
}
示例8: task_pool_num_increase
static void task_pool_num_increase(TaskPool *pool)
{
BLI_mutex_lock(&pool->num_mutex);
pool->num++;
BLI_condition_notify_all(&pool->num_cond);
BLI_mutex_unlock(&pool->num_mutex);
}
示例9: COM_deinitialize
void COM_deinitialize()
{
if (is_compositorMutex_init) {
BLI_mutex_lock(&s_compositorMutex);
WorkScheduler::deinitialize();
is_compositorMutex_init = false;
BLI_mutex_unlock(&s_compositorMutex);
BLI_mutex_end(&s_compositorMutex);
}
}
示例10: COM_deinitialize
void COM_deinitialize()
{
if (is_compositorMutex_init) {
BLI_mutex_lock(&s_compositorMutex);
intern_freeCompositorCaches();
WorkScheduler::deinitialize();
is_compositorMutex_init = FALSE;
BLI_mutex_unlock(&s_compositorMutex);
BLI_mutex_end(&s_compositorMutex);
}
}
示例11: task_scheduler_thread_wait_pop
static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task)
{
BLI_mutex_lock(&scheduler->queue_mutex);
while (!scheduler->queue.first && !scheduler->do_exit)
BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
if (!scheduler->queue.first) {
BLI_mutex_unlock(&scheduler->queue_mutex);
BLI_assert(scheduler->do_exit);
return false;
}
*task = scheduler->queue.first;
BLI_remlink(&scheduler->queue, *task);
BLI_mutex_unlock(&scheduler->queue_mutex);
return true;
}
示例12: eevee_lightbake_delete_resources
static void eevee_lightbake_delete_resources(EEVEE_LightBake *lbake)
{
if (!lbake->resource_only) {
BLI_mutex_lock(lbake->mutex);
}
if (lbake->gl_context) {
DRW_opengl_render_context_enable(lbake->gl_context);
DRW_gawain_render_context_enable(lbake->gpu_context);
}
else if (!lbake->resource_only) {
DRW_opengl_context_enable();
}
/* XXX Free the resources contained in the viewlayer data
* to be able to free the context before deleting the depsgraph. */
if (lbake->sldata) {
EEVEE_view_layer_data_free(lbake->sldata);
}
DRW_TEXTURE_FREE_SAFE(lbake->rt_depth);
DRW_TEXTURE_FREE_SAFE(lbake->rt_color);
DRW_TEXTURE_FREE_SAFE(lbake->grid_prev);
GPU_FRAMEBUFFER_FREE_SAFE(lbake->store_fb);
for (int i = 0; i < 6; ++i) {
GPU_FRAMEBUFFER_FREE_SAFE(lbake->rt_fb[i]);
}
if (lbake->gpu_context) {
DRW_gawain_render_context_disable(lbake->gpu_context);
DRW_gawain_render_context_enable(lbake->gpu_context);
GPU_context_discard(lbake->gpu_context);
}
if (lbake->gl_context && lbake->own_resources) {
/* Delete the baking context. */
DRW_opengl_render_context_disable(lbake->gl_context);
WM_opengl_context_dispose(lbake->gl_context);
lbake->gpu_context = NULL;
lbake->gl_context = NULL;
}
else if (lbake->gl_context) {
DRW_opengl_render_context_disable(lbake->gl_context);
}
else if (!lbake->resource_only) {
DRW_opengl_context_disable();
}
if (!lbake->resource_only) {
BLI_mutex_unlock(lbake->mutex);
}
}
示例13: task_pool_num_decrease
static void task_pool_num_decrease(TaskPool *pool, size_t done)
{
BLI_mutex_lock(&pool->num_mutex);
BLI_assert(pool->num >= done);
pool->num -= done;
if (pool->num == 0)
BLI_condition_notify_all(&pool->num_cond);
BLI_mutex_unlock(&pool->num_mutex);
}
示例14: task_scheduler_thread_wait_pop
static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task)
{
bool found_task = false;
BLI_mutex_lock(&scheduler->queue_mutex);
while (!scheduler->queue.first && !scheduler->do_exit)
BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
do {
Task *current_task;
if (!scheduler->queue.first) {
BLI_mutex_unlock(&scheduler->queue_mutex);
BLI_assert(scheduler->do_exit);
return false;
}
for (current_task = scheduler->queue.first;
current_task != NULL;
current_task = current_task->next)
{
TaskPool *pool = current_task->pool;
if (pool->num_threads == 0 ||
pool->currently_running_tasks < pool->num_threads)
{
*task = current_task;
found_task = true;
atomic_add_z(&pool->currently_running_tasks, 1);
BLI_remlink(&scheduler->queue, *task);
break;
}
}
if (!found_task)
BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
} while (!found_task);
BLI_mutex_unlock(&scheduler->queue_mutex);
return true;
}
示例15: BLI_task_pool_cancel
void BLI_task_pool_cancel(TaskPool *pool)
{
pool->do_cancel = true;
task_scheduler_clear(pool->scheduler, pool);
/* wait until all entries are cleared */
BLI_mutex_lock(&pool->num_mutex);
while (pool->num)
BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
BLI_mutex_unlock(&pool->num_mutex);
pool->do_cancel = false;
}