本文整理汇总了C++中PPUThread类的典型用法代码示例。如果您正苦于以下问题:C++ PPUThread类的具体用法?C++ PPUThread怎么用?C++ PPUThread使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PPUThread类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _sys_ppu_thread_exit
void _sys_ppu_thread_exit(PPUThread& ppu, u64 errorcode)
{
sys_ppu_thread.trace("_sys_ppu_thread_exit(errorcode=0x%llx)", errorcode);
LV2_LOCK;
// get all sys_mutex objects
for (auto& mutex : idm::get_all<lv2_mutex_t>())
{
// unlock mutex if locked by this thread
if (mutex->owner.get() == &ppu)
{
mutex->unlock(lv2_lock);
}
}
if (!ppu.is_joinable)
{
idm::remove<PPUThread>(ppu.get_id());
}
else
{
ppu.exit();
}
// Throw if this syscall was not called directly by the SC instruction
if (~ppu.hle_code != 41)
{
throw CPUThreadExit{};
}
}
示例2: sys_rwlock_trywlock
s32 sys_rwlock_trywlock(PPUThread& CPU, u32 rw_lock_id)
{
sys_rwlock.Log("sys_rwlock_trywlock(rw_lock_id=0x%x)", rw_lock_id);
LV2_LOCK;
const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id);
if (!rwlock)
{
return CELL_ESRCH;
}
if (rwlock->writer == CPU.GetId())
{
return CELL_EDEADLK;
}
if (rwlock->readers || rwlock->writer || rwlock->wwaiters)
{
return CELL_EBUSY;
}
rwlock->writer = CPU.GetId();
return CELL_OK;
}
示例3: sys_interrupt_thread_eoi
void sys_interrupt_thread_eoi(PPUThread& CPU)
{
sys_interrupt.Log("sys_interrupt_thread_eoi()");
// TODO: maybe it should actually unwind the stack (ensure that all the automatic objects are finalized)?
CPU.GPR[1] = align(CPU.GetStackAddr() + CPU.GetStackSize(), 0x200) - 0x200; // supercrutch (just to hide error messages)
CPU.FastStop();
}
示例4: sys_ppu_thread_get_stack_information
s32 sys_ppu_thread_get_stack_information(PPUThread& CPU, u32 info_addr)
{
sys_ppu_thread.Log("sys_ppu_thread_get_stack_information(info_addr=0x%x)", info_addr);
vm::write32(info_addr, (u32)CPU.GetStackAddr());
vm::write32(info_addr + 4, CPU.GetStackSize());
return CELL_OK;
}
示例5: ppu_thread_exit
void ppu_thread_exit(PPUThread& CPU, u64 errorcode)
{
if (CPU.owned_mutexes)
{
sys_ppu_thread.Error("Owned mutexes found (%d)", CPU.owned_mutexes);
CPU.owned_mutexes = 0;
}
CPU.SetExitStatus(errorcode);
CPU.Stop();
}
示例6: sys_lwmutex_destroy
s32 sys_lwmutex_destroy(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex)
{
sysPrxForUser.Log("sys_lwmutex_destroy(lwmutex=*0x%x)", lwmutex);
// check to prevent recursive locking in the next call
if (lwmutex->owner.read_relaxed() == CPU.GetId())
{
return CELL_EBUSY;
}
// attempt to lock the mutex
if (s32 res = sys_lwmutex_trylock(CPU, lwmutex))
{
return res;
}
// call the syscall
if (s32 res = _sys_lwmutex_destroy(lwmutex->sleep_queue))
{
// unlock the mutex if failed
sys_lwmutex_unlock(CPU, lwmutex);
return res;
}
// deleting succeeded
lwmutex->owner.exchange(lwmutex::dead);
return CELL_OK;
}
示例7: sys_ppu_thread_get_id
s32 sys_ppu_thread_get_id(PPUThread& CPU, vm::ptr<be_t<u64>> thread_id)
{
sys_ppu_thread.Log("sys_ppu_thread_get_id(thread_id_addr=0x%x)", thread_id.addr());
*thread_id = CPU.GetId();
return CELL_OK;
}
示例8: sys_rwlock_wunlock
s32 sys_rwlock_wunlock(PPUThread& CPU, u32 rw_lock_id)
{
sys_rwlock.Log("sys_rwlock_wunlock(rw_lock_id=0x%x)", rw_lock_id);
LV2_LOCK;
const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id);
if (!rwlock)
{
return CELL_ESRCH;
}
if (rwlock->writer != CPU.GetId())
{
return CELL_EPERM;
}
rwlock->writer = 0;
if (rwlock->wwaiters)
{
rwlock->wcv.notify_one();
}
else if (rwlock->rwaiters)
{
rwlock->rcv.notify_all();
}
return CELL_OK;
}
示例9: sys_rwlock_trywlock
s32 sys_rwlock_trywlock(PPUThread& ppu, u32 rw_lock_id)
{
sys_rwlock.Log("sys_rwlock_trywlock(rw_lock_id=0x%x)", rw_lock_id);
LV2_LOCK;
const auto rwlock = idm::get<lv2_rwlock_t>(rw_lock_id);
if (!rwlock)
{
return CELL_ESRCH;
}
if (rwlock->writer.get() == &ppu)
{
return CELL_EDEADLK;
}
if (rwlock->readers || rwlock->writer || rwlock->wsq.size())
{
return CELL_EBUSY;
}
rwlock->writer = std::static_pointer_cast<CPUThread>(ppu.shared_from_this());
return CELL_OK;
}
示例10: _sys_lwcond_queue_wait
s32 _sys_lwcond_queue_wait(PPUThread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 timeout)
{
sys_lwcond.Log("_sys_lwcond_queue_wait(lwcond_id=0x%x, lwmutex_id=0x%x, timeout=0x%llx)", lwcond_id, lwmutex_id, timeout);
const u64 start_time = get_system_time();
LV2_LOCK;
const auto cond = idm::get<lv2_lwcond_t>(lwcond_id);
const auto mutex = idm::get<lv2_lwmutex_t>(lwmutex_id);
if (!cond || !mutex)
{
return CELL_ESRCH;
}
// finalize unlocking the mutex
mutex->unlock(lv2_lock);
// add waiter; protocol is ignored in current implementation
sleep_queue_entry_t waiter(ppu, cond->sq);
// potential mutex waiter (not added immediately)
sleep_queue_entry_t mutex_waiter(ppu, cond->sq, defer_sleep);
while (!ppu.unsignal())
{
CHECK_EMU_STATUS;
if (timeout && waiter)
{
const u64 passed = get_system_time() - start_time;
if (passed >= timeout)
{
// try to reown the mutex if timed out
if (mutex->signaled)
{
mutex->signaled--;
return CELL_EDEADLK;
}
else
{
return CELL_ETIMEDOUT;
}
}
ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
}
else
{
ppu.cv.wait(lv2_lock);
}
}
// return cause
return ppu.GPR[3] ? CELL_EBUSY : CELL_OK;
}
示例11: sys_ppu_thread_get_id
s32 sys_ppu_thread_get_id(PPUThread& CPU, vm::ptr<u64> thread_id)
{
sysPrxForUser.Log("sys_ppu_thread_get_id(thread_id=*0x%x)", thread_id);
*thread_id = CPU.GetId();
return CELL_OK;
}
示例12: sys_lwcond_signal_to
s32 sys_lwcond_signal_to(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id)
{
sysPrxForUser.Log("sys_lwcond_signal_to(lwcond=*0x%x, ppu_thread_id=0x%x)", lwcond, ppu_thread_id);
const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex;
if ((lwmutex->attribute.data() & se32(SYS_SYNC_ATTR_PROTOCOL_MASK)) == se32(SYS_SYNC_RETRY))
{
// TODO (protocol ignored)
//return _sys_lwcond_signal(lwcond->lwcond_queue, 0, ppu_thread_id, 2);
}
if (lwmutex->owner.read_relaxed() == CPU.GetId())
{
// if owns the mutex
lwmutex->all_info++;
// call the syscall
if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 1))
{
lwmutex->all_info--;
return res;
}
return CELL_OK;
}
if (s32 res = sys_lwmutex_trylock(CPU, lwmutex))
{
// if locking failed
if (res != CELL_EBUSY)
{
return CELL_ESRCH;
}
// call the syscall
return _sys_lwcond_signal(lwcond->lwcond_queue, 0, ppu_thread_id, 2);
}
// if locking succeeded
lwmutex->all_info++;
// call the syscall
if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 3))
{
lwmutex->all_info--;
// unlock the lightweight mutex
sys_lwmutex_unlock(CPU, lwmutex);
return res;
}
return CELL_OK;
}
示例13: sys_lwcond_signal
s32 sys_lwcond_signal(PPUThread& ppu, vm::ptr<sys_lwcond_t> lwcond)
{
sysPrxForUser.trace("sys_lwcond_signal(lwcond=*0x%x)", lwcond);
const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex;
if ((lwmutex->attribute & SYS_SYNC_ATTR_PROTOCOL_MASK) == SYS_SYNC_RETRY)
{
// TODO (protocol ignored)
//return _sys_lwcond_signal(lwcond->lwcond_queue, 0, -1, 2);
}
if (lwmutex->vars.owner.load() == ppu.get_id())
{
// if owns the mutex
lwmutex->all_info++;
// call the syscall
if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 1))
{
lwmutex->all_info--;
return res == CELL_EPERM ? CELL_OK : res;
}
return CELL_OK;
}
if (s32 res = sys_lwmutex_trylock(ppu, lwmutex))
{
// if locking failed
if (res != CELL_EBUSY)
{
return CELL_ESRCH;
}
// call the syscall
return _sys_lwcond_signal(lwcond->lwcond_queue, 0, -1, 2);
}
// if locking succeeded
lwmutex->all_info++;
// call the syscall
if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 3))
{
lwmutex->all_info--;
// unlock the lightweight mutex
sys_lwmutex_unlock(ppu, lwmutex);
return res == CELL_ENOENT ? CELL_OK : res;
}
return CELL_OK;
}
示例14: sys_lwcond_signal_all
s32 sys_lwcond_signal_all(PPUThread& ppu, vm::ptr<sys_lwcond_t> lwcond)
{
sysPrxForUser.trace("sys_lwcond_signal_all(lwcond=*0x%x)", lwcond);
const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex;
if ((lwmutex->attribute & SYS_SYNC_ATTR_PROTOCOL_MASK) == SYS_SYNC_RETRY)
{
// TODO (protocol ignored)
//return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2);
}
if (lwmutex->vars.owner.load() == ppu.get_id())
{
// if owns the mutex, call the syscall
const s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
if (res <= 0)
{
// return error or CELL_OK
return res;
}
lwmutex->all_info += res;
return CELL_OK;
}
if (s32 res = sys_lwmutex_trylock(ppu, lwmutex))
{
// if locking failed
if (res != CELL_EBUSY)
{
return CELL_ESRCH;
}
// call the syscall
return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2);
}
// if locking succeeded, call the syscall
s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
if (res > 0)
{
lwmutex->all_info += res;
res = CELL_OK;
}
// unlock mutex
sys_lwmutex_unlock(ppu, lwmutex);
return res;
}
示例15: sys_lwcond_signal_all
s32 sys_lwcond_signal_all(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond)
{
sysPrxForUser.Log("sys_lwcond_signal_all(lwcond=*0x%x)", lwcond);
const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex;
if ((lwmutex->attribute.data() & se32(SYS_SYNC_ATTR_PROTOCOL_MASK)) == se32(SYS_SYNC_RETRY))
{
// TODO (protocol ignored)
//return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2);
}
if (lwmutex->owner.read_relaxed() == CPU.GetId())
{
// if owns the mutex, call the syscall
const s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
if (res <= 0)
{
// return error or CELL_OK
return res;
}
lwmutex->all_info += res;
return CELL_OK;
}
if (s32 res = sys_lwmutex_trylock(CPU, lwmutex))
{
// if locking failed
if (res != CELL_EBUSY)
{
return CELL_ESRCH;
}
// call the syscall
return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2);
}
// if locking succeeded, call the syscall
s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
if (res > 0)
{
lwmutex->all_info += res;
res = CELL_OK;
}
// unlock mutex
sys_lwmutex_unlock(CPU, lwmutex);
return res;
}