本文整理汇总了C++中KeRaiseIrql函数的典型用法代码示例。如果您正苦于以下问题:C++ KeRaiseIrql函数的具体用法?C++ KeRaiseIrql怎么用?C++ KeRaiseIrql使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了KeRaiseIrql函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: InbvAcquireLock
VOID
NTAPI
InbvAcquireLock(VOID)
{
KIRQL OldIrql;
/* Check if we're at dispatch level or lower */
OldIrql = KeGetCurrentIrql();
if (OldIrql <= DISPATCH_LEVEL)
{
/* Loop until the lock is free */
while (!KeTestSpinLock(&BootDriverLock));
/* Raise IRQL to dispatch level */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
}
/* Acquire the lock */
KiAcquireSpinLock(&BootDriverLock);
InbvOldIrql = OldIrql;
}
示例2: KdEnableDebugger
/*
* @implemented
*/
NTSTATUS
NTAPI
KdEnableDebugger(VOID)
{
KIRQL OldIrql;
/* Raise IRQL */
KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
/* TODO: Re-enable any breakpoints */
/* Enable the Debugger */
KdDebuggerEnabled = TRUE;
SharedUserData->KdDebuggerEnabled = TRUE;
/* Lower the IRQL */
KeLowerIrql(OldIrql);
/* Return success */
return STATUS_SUCCESS;
}
示例3: RTDECL
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
{
if (!RTMpIsCpuOnline(idCpu))
return !RTMpIsCpuPossible(idCpu)
? VERR_CPU_NOT_FOUND
: VERR_CPU_OFFLINE;
int rc = g_pfnrtSendIpi(idCpu);
if (rc == VINF_SUCCESS)
return rc;
/* Fallback. */
if (!fPokeDPCsInitialized)
{
for (unsigned i = 0; i < RT_ELEMENTS(aPokeDpcs); i++)
{
KeInitializeDpc(&aPokeDpcs[i], rtMpNtPokeCpuDummy, NULL);
KeSetImportanceDpc(&aPokeDpcs[i], HighImportance);
KeSetTargetProcessorDpc(&aPokeDpcs[i], (int)i);
}
fPokeDPCsInitialized = true;
}
/* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
* KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
*/
KIRQL oldIrql;
KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
KeSetImportanceDpc(&aPokeDpcs[idCpu], HighImportance);
KeSetTargetProcessorDpc(&aPokeDpcs[idCpu], (int)idCpu);
/* Assuming here that high importance DPCs will be delivered immediately; or at least an IPI will be sent immediately.
* @note: not true on at least Vista & Windows 7
*/
BOOLEAN bRet = KeInsertQueueDpc(&aPokeDpcs[idCpu], 0, 0);
KeLowerIrql(oldIrql);
return (bRet == TRUE) ? VINF_SUCCESS : VERR_ACCESS_DENIED /* already queued */;
}
示例4: CreateTrampoline
VOID CreateTrampoline()
{
PSHARED_DISP_DATA disp = GetSharedData();
if (disp->Signature != SHARED_SIGNATURE)
{
KdPrint (("ngvid:" __FUNCTION__ ": Damaged shared block %X signature %X should be %X\n",
disp, disp->Signature, SHARED_SIGNATURE));
return;
}
if (disp->Trampoline)
{
KdPrint(("Trampoline already exists at %X\n", disp->Trampoline));
TrampolineIsr = (PTRAMPOLINE) disp->Trampoline;
}
else
{
TrampolineIsr = (PTRAMPOLINE) ExAllocatePool (NonPagedPool, sizeof(TRAMPOLINE));
KdPrint(("Trampoline allocated at %X\n", TrampolineIsr));
}
KIRQL Irql;
KeRaiseIrql (HIGH_LEVEL, &Irql);
TrampolineIsr->e1.PushOpcode = 0x68;
TrampolineIsr->e1.Address = IsrHookRoutine;
TrampolineIsr->e1.RetOpcode = 0xC3;
KeLowerIrql (Irql);
KdPrint(("Trampoline created\n", TrampolineIsr));
if (disp->Trampoline == NULL)
{
I8042HookKeyboard ((PI8042_KEYBOARD_ISR) TrampolineIsr);
disp->Trampoline = TrampolineIsr;
}
}
示例5: _irqlevel_changed_
void _irqlevel_changed_(_irqL *irqlevel, u8 bLower)
{
#ifdef PLATFORM_OS_XP
if (bLower == LOWER) {
*irqlevel = KeGetCurrentIrql();
if (*irqlevel > PASSIVE_LEVEL) {
KeLowerIrql(PASSIVE_LEVEL);
//DEBUG_ERR(("\n <=== KeLowerIrql.\n"));
}
} else {
if (KeGetCurrentIrql() == PASSIVE_LEVEL) {
KeRaiseIrql(DISPATCH_LEVEL, irqlevel);
//DEBUG_ERR(("\n <=== KeRaiseIrql.\n"));
}
}
#endif
}
示例6: VBoxDrvNtDeviceControl
/**
* Device I/O Control entry point.
*
* @param pDevObj Device object.
* @param pIrp Request packet.
*/
NTSTATUS _stdcall VBoxDrvNtDeviceControl(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
PSUPDRVDEVEXT pDevExt = SUPDRVNT_GET_DEVEXT(pDevObj);
PIO_STACK_LOCATION pStack = IoGetCurrentIrpStackLocation(pIrp);
PSUPDRVSESSION pSession = (PSUPDRVSESSION)pStack->FileObject->FsContext;
/*
* Deal with the two high-speed IOCtl that takes it's arguments from
* the session and iCmd, and only returns a VBox status code.
*
* Note: The previous method of returning the rc prior to IOC version
* 7.4 has been abandond, we're no longer compatible with that
* interface.
*/
ULONG ulCmd = pStack->Parameters.DeviceIoControl.IoControlCode;
if ( ( ulCmd == SUP_IOCTL_FAST_DO_RAW_RUN
|| ulCmd == SUP_IOCTL_FAST_DO_HM_RUN
|| ulCmd == SUP_IOCTL_FAST_DO_NOP)
&& pSession->fUnrestricted == true)
{
int rc = supdrvIOCtlFast(ulCmd, (unsigned)(uintptr_t)pIrp->UserBuffer /* VMCPU id */, pDevExt, pSession);
#if 0 /* When preemption was not used i.e. !VBOX_WITH_VMMR0_DISABLE_PREEMPTION. That's no longer required. */
/* Raise the IRQL to DISPATCH_LEVEL to prevent Windows from rescheduling us to another CPU/core. */
Assert(KeGetCurrentIrql() <= DISPATCH_LEVEL);
KIRQL oldIrql;
KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
int rc = supdrvIOCtlFast(ulCmd, (unsigned)(uintptr_t)pIrp->UserBuffer /* VMCPU id */, pDevExt, pSession);
KeLowerIrql(oldIrql);
#endif
/* Complete the I/O request. */
NTSTATUS rcNt = pIrp->IoStatus.Status = RT_SUCCESS(rc) ? STATUS_SUCCESS : STATUS_INVALID_PARAMETER;
IoCompleteRequest(pIrp, IO_NO_INCREMENT);
return rcNt;
}
return VBoxDrvNtDeviceControlSlow(pDevExt, pSession, pIrp, pStack);
}
示例7: KeDeregisterBugCheckCallback
/*
* @implemented
*/
BOOLEAN
NTAPI
KeDeregisterBugCheckCallback(IN PKBUGCHECK_CALLBACK_RECORD CallbackRecord)
{
KIRQL OldIrql;
BOOLEAN Status = FALSE;
/* Raise IRQL to High */
KeRaiseIrql(HIGH_LEVEL, &OldIrql);
/* Check the Current State */
if (CallbackRecord->State == BufferInserted)
{
/* Reset state and remove from list */
CallbackRecord->State = BufferEmpty;
RemoveEntryList(&CallbackRecord->Entry);
Status = TRUE;
}
/* Lower IRQL and return */
KeLowerIrql(OldIrql);
return Status;
}
示例8: stopTracing
/* Stops and cleans any tracing if needed */
void stopTracing()
{
KIRQL old_irql = 0;
PAGED_CODE();
/* Raise the IRQL otherwise new thread could be created while cleaning */
old_irql = KeGetCurrentIrql();
if (old_irql < APC_LEVEL) {
KeRaiseIrql (APC_LEVEL, &old_irql);
}
KdPrint( ("Oregano: stopTracing: Got a stop trace command\r\n") );
if (TRUE == is_new_thread_handler_installed) {
PsRemoveCreateThreadNotifyRoutine(newThreadHandler);
is_new_thread_handler_installed = FALSE;
} else {
KdPrint(( "Oregano: stopTracing: Not new thread notifier\r\n" ));
}
if (0 != targetProcessId) {
unsetTrapFlagForAllThreads(targetProcessId);
targetProcessId = 0;
}
if (NULL != targetEProcess) {
ObDereferenceObject( targetEProcess );
targetEProcess = NULL;
}
target_process = NULL;
RtlZeroMemory( loggingRanges, sizeof(loggingRanges) );
/* Set back the Irql */
if (old_irql < APC_LEVEL) {
KeLowerIrql( old_irql );
}
return;
}
示例9: ExiTryToAcquireFastMutex
BOOLEAN
FASTCALL
ExiTryToAcquireFastMutex(PFAST_MUTEX FastMutex)
{
KIRQL OldIrql;
/* Raise to APC_LEVEL */
KeRaiseIrql(APC_LEVEL, &OldIrql);
/* Check if we can quickly acquire it */
if (InterlockedCompareExchange(&FastMutex->Count, 0, 1) == 1)
{
/* We have, set us as owners */
FastMutex->Owner = KeGetCurrentThread();
FastMutex->OldIrql = OldIrql;
return TRUE;
}
else
{
/* Acquire attempt failed */
KeLowerIrql(OldIrql);
return FALSE;
}
}
示例10: rtMpPokeCpuUsingDpc
int rtMpPokeCpuUsingDpc(RTCPUID idCpu)
{
/*
* APC fallback.
*/
static KDPC s_aPokeDpcs[MAXIMUM_PROCESSORS] = {0};
static bool s_fPokeDPCsInitialized = false;
if (!s_fPokeDPCsInitialized)
{
for (unsigned i = 0; i < RT_ELEMENTS(s_aPokeDpcs); i++)
{
KeInitializeDpc(&s_aPokeDpcs[i], rtMpNtPokeCpuDummy, NULL);
KeSetImportanceDpc(&s_aPokeDpcs[i], HighImportance);
KeSetTargetProcessorDpc(&s_aPokeDpcs[i], (int)i);
}
s_fPokeDPCsInitialized = true;
}
/* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
* KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
*/
KIRQL oldIrql;
KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
KeSetImportanceDpc(&s_aPokeDpcs[idCpu], HighImportance);
KeSetTargetProcessorDpc(&s_aPokeDpcs[idCpu], (int)idCpu);
/* Assuming here that high importance DPCs will be delivered immediately; or at least an IPI will be sent immediately.
* @note: not true on at least Vista & Windows 7
*/
BOOLEAN bRet = KeInsertQueueDpc(&s_aPokeDpcs[idCpu], 0, 0);
KeLowerIrql(oldIrql);
return (bRet == TRUE) ? VINF_SUCCESS : VERR_ACCESS_DENIED /* already queued */;
}
示例11: rtMpCallUsingDpcs
/**
* Internal worker for the RTMpOn* APIs.
*
* @returns IPRT status code.
* @param pfnWorker The callback.
* @param pvUser1 User argument 1.
* @param pvUser2 User argument 2.
* @param enmCpuid What to do / is idCpu valid.
* @param idCpu Used if enmCpuid is RT_NT_CPUID_SPECIFIC or
* RT_NT_CPUID_PAIR, otherwise ignored.
* @param idCpu2 Used if enmCpuid is RT_NT_CPUID_PAIR, otherwise ignored.
* @param pcHits Where to return the number of this. Optional.
*/
static int rtMpCallUsingDpcs(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2,
RT_NT_CPUID enmCpuid, RTCPUID idCpu, RTCPUID idCpu2, uint32_t *pcHits)
{
PRTMPARGS pArgs;
KDPC *paExecCpuDpcs;
#if 0
/* KeFlushQueuedDpcs must be run at IRQL PASSIVE_LEVEL according to MSDN, but the
* driver verifier doesn't complain...
*/
AssertMsg(KeGetCurrentIrql() == PASSIVE_LEVEL, ("%d != %d (PASSIVE_LEVEL)\n", KeGetCurrentIrql(), PASSIVE_LEVEL));
#endif
#ifdef IPRT_TARGET_NT4
KAFFINITY Mask;
/* g_pfnrtNt* are not present on NT anyway. */
return VERR_NOT_SUPPORTED;
#else
KAFFINITY Mask = KeQueryActiveProcessors();
#endif
/* KeFlushQueuedDpcs is not present in Windows 2000; import it dynamically so we can just fail this call. */
if (!g_pfnrtNtKeFlushQueuedDpcs)
return VERR_NOT_SUPPORTED;
pArgs = (PRTMPARGS)ExAllocatePoolWithTag(NonPagedPool, MAXIMUM_PROCESSORS*sizeof(KDPC) + sizeof(RTMPARGS), (ULONG)'RTMp');
if (!pArgs)
return VERR_NO_MEMORY;
pArgs->pfnWorker = pfnWorker;
pArgs->pvUser1 = pvUser1;
pArgs->pvUser2 = pvUser2;
pArgs->idCpu = NIL_RTCPUID;
pArgs->idCpu2 = NIL_RTCPUID;
pArgs->cHits = 0;
pArgs->cRefs = 1;
paExecCpuDpcs = (KDPC *)(pArgs + 1);
if (enmCpuid == RT_NT_CPUID_SPECIFIC)
{
KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
pArgs->idCpu = idCpu;
}
else if (enmCpuid == RT_NT_CPUID_SPECIFIC)
{
KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
pArgs->idCpu = idCpu;
KeInitializeDpc(&paExecCpuDpcs[1], rtmpNtDPCWrapper, pArgs);
KeSetImportanceDpc(&paExecCpuDpcs[1], HighImportance);
KeSetTargetProcessorDpc(&paExecCpuDpcs[1], (int)idCpu2);
pArgs->idCpu2 = idCpu2;
}
else
{
for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
{
KeInitializeDpc(&paExecCpuDpcs[i], rtmpNtDPCWrapper, pArgs);
KeSetImportanceDpc(&paExecCpuDpcs[i], HighImportance);
KeSetTargetProcessorDpc(&paExecCpuDpcs[i], i);
}
}
/* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
* KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
*/
KIRQL oldIrql;
KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
/*
* We cannot do other than assume a 1:1 relationship between the
* affinity mask and the process despite the warnings in the docs.
* If someone knows a better way to get this done, please let bird know.
*/
ASMCompilerBarrier(); /* paranoia */
if (enmCpuid == RT_NT_CPUID_SPECIFIC)
{
ASMAtomicIncS32(&pArgs->cRefs);
BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
Assert(ret);
}
else if (enmCpuid == RT_NT_CPUID_PAIR)
//.........这里部分代码省略.........
示例12: TransferPktComplete
//.........这里部分代码省略.........
pkt->OriginalIrp->IoStatus.Status = Irp->IoStatus.Status;
/*
* If the original I/O originated in user space (i.e. it is thread-queued),
* and the error is user-correctable (e.g. media is missing, for removable media),
* alert the user.
* Since this is only one of possibly several packets completing for the original IRP,
* we may do this more than once for a single request. That's ok; this allows
* us to test each returned status with IoIsErrorUserInduced().
*/
if (IoIsErrorUserInduced(Irp->IoStatus.Status) &&
pkt->CompleteOriginalIrpWhenLastPacketCompletes &&
pkt->OriginalIrp->Tail.Overlay.Thread){
IoSetHardErrorOrVerifyDevice(pkt->OriginalIrp, pkt->Fdo);
}
}
/*
* We use a field in the original IRP to count
* down the transfer pieces as they complete.
*/
numPacketsRemaining = InterlockedDecrement(
(PLONG)&pkt->OriginalIrp->Tail.Overlay.DriverContext[0]);
if (numPacketsRemaining > 0){
/*
* More transfer pieces remain for the original request.
* Wait for them to complete before completing the original irp.
*/
}
else {
/*
* All the transfer pieces are done.
* Complete the original irp if appropriate.
*/
ASSERT(numPacketsRemaining == 0);
if (pkt->CompleteOriginalIrpWhenLastPacketCompletes){
if (NT_SUCCESS(pkt->OriginalIrp->IoStatus.Status)){
ASSERT((ULONG)pkt->OriginalIrp->IoStatus.Information == origCurrentSp->Parameters.Read.Length);
ClasspPerfIncrementSuccessfulIo(fdoExt);
}
ClassReleaseRemoveLock(pkt->Fdo, pkt->OriginalIrp);
ClassCompleteRequest(pkt->Fdo, pkt->OriginalIrp, IO_DISK_INCREMENT);
/*
* We may have been called by one of the class drivers (e.g. cdrom)
* via the legacy API ClassSplitRequest.
* This is the only case for which the packet engine is called for an FDO
* with a StartIo routine; in that case, we have to call IoStartNextPacket
* now that the original irp has been completed.
*/
if (fdoExt->CommonExtension.DriverExtension->InitData.ClassStartIo) {
if (TEST_FLAG(pkt->Srb.SrbFlags, SRB_FLAGS_DONT_START_NEXT_PACKET)){
DBGTRAP(("SRB_FLAGS_DONT_START_NEXT_PACKET should never be set here (?)"));
}
else {
KIRQL oldIrql;
KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
IoStartNextPacket(pkt->Fdo, FALSE);
KeLowerIrql(oldIrql);
}
}
}
}
/*
* If the packet was synchronous, write the final
* result back to the issuer's status buffer and
* signal his event.
*/
if (pkt->SyncEventPtr){
KeSetEvent(pkt->SyncEventPtr, 0, FALSE);
pkt->SyncEventPtr = NULL;
}
/*
* Free the completed packet.
*/
pkt->OriginalIrp = NULL;
pkt->InLowMemRetry = FALSE;
EnqueueFreeTransferPacket(pkt->Fdo, pkt);
/*
* Now that we have freed some resources,
* try again to send one of the previously deferred irps.
*/
deferredIrp = DequeueDeferredClientIrp(fdoData);
if (deferredIrp){
DBGWARN(("... retrying deferred irp %xh.", deferredIrp));
ServiceTransferRequest(pkt->Fdo, deferredIrp);
}
ClassReleaseRemoveLock(Fdo, (PIRP)&uniqueAddr);
}
return STATUS_MORE_PROCESSING_REQUIRED;
}
示例13: PM_setMaxThreadPriority
/****************************************************************************
REMARKS:
Increase the thread priority to maximum, if possible.
****************************************************************************/
ulong PMAPI PM_setMaxThreadPriority(void)
{
KIRQL oldIrql;
KeRaiseIrql(DISPATCH_LEVEL+1,&oldIrql);
return oldIrql;
}
示例14: HalEnableSystemInterrupt
BOOLEAN
HalEnableSystemInterrupt (
IN ULONG Vector,
IN KIRQL Irql,
IN KINTERRUPT_MODE InterruptMode
)
/*++
Routine Description:
This routine enables the specified system interrupt.
Arguments:
Vector - Supplies the vector of the system interrupt that is enabled.
Irql - Supplies the IRQL of the interrupting source.
InterruptMode - Supplies the mode of the interrupt; LevelSensitive or
Latched.
Return Value:
TRUE if the system interrupt was enabled
--*/
{
BOOLEAN Enabled = FALSE;
KIRQL OldIrql;
//
// Raise IRQL to the highest level.
//
KeRaiseIrql(HIGH_LEVEL, &OldIrql);
//
// If the vector number is within the range of the EISA interrupts, then
// enable the EISA interrrupt and set the Level/Edge register.
//
if (Vector >= EISA_VECTORS &&
Vector < MAXIMUM_EISA_VECTOR &&
Irql == DEVICE_HIGH_LEVEL) {
HalpEnableEisaInterrupt( Vector, InterruptMode );
Enabled = TRUE;
}
//
// If the vector number is within the range of the PCI interrupts, then
// enable the PCI interrrupt.
//
if (Vector >= PCI_VECTORS &&
Vector < MAXIMUM_PCI_VECTOR &&
Irql == DEVICE_HIGH_LEVEL) {
HalpEnablePciInterrupt( Vector, InterruptMode );
Enabled = TRUE;
}
//
// If the vector is a performance counter vector we will ignore
// the enable - the performance counters are enabled directly by
// the wrperfmon callpal. Wrperfmon must be controlled directly
// by the driver.
//
switch (Vector) {
case PC0_VECTOR:
case PC1_VECTOR:
case PC2_VECTOR:
Enabled = TRUE;
break;
case CORRECTABLE_VECTOR:
//
// Enable the correctable error interrupt.
//
{
CIA_ERR_MASK CiaErrMask;
CiaErrMask.all = READ_CIA_REGISTER(
&((PCIA_ERROR_CSRS)(CIA_ERROR_CSRS_QVA))->ErrMask);
CiaErrMask.CorErr = 0x1;
WRITE_CIA_REGISTER(&((PCIA_ERROR_CSRS)(CIA_ERROR_CSRS_QVA))->ErrMask,
CiaErrMask.all
);
HalpSetMachineCheckEnables( FALSE, FALSE, FALSE );
}
//.........这里部分代码省略.........
示例15: HalDisableSystemInterrupt
VOID
HalDisableSystemInterrupt (
IN ULONG Vector,
IN KIRQL Irql
)
/*++
Routine Description:
This routine disables the specified system interrupt.
Arguments:
Vector - Supplies the vector of the system interrupt that is disabled.
Irql - Supplies the IRQL of the interrupting source.
Return Value:
None.
--*/
{
KIRQL OldIrql;
//
// Raise IRQL to the highest level.
//
KeRaiseIrql(HIGH_LEVEL, &OldIrql);
//
// If the vector number is within the range of the EISA interrupts, then
// disable the EISA interrrupt.
//
if (Vector >= EISA_VECTORS &&
Vector < MAXIMUM_EISA_VECTOR &&
Irql == DEVICE_HIGH_LEVEL) {
HalpDisableEisaInterrupt(Vector);
}
//
// If the vector number is within the range of the PCI interrupts, then
// disable the PCI interrrupt.
//
if (Vector >= PCI_VECTORS &&
Vector < MAXIMUM_PCI_VECTOR &&
Irql == DEVICE_HIGH_LEVEL) {
HalpDisablePciInterrupt(Vector);
}
//
// If the vector is a performance counter vector we will ignore
// the enable - the performance counters are enabled directly by
// the wrperfmon callpal. Wrperfmon must be controlled directly
// by the driver.
//
switch (Vector) {
case PC0_VECTOR:
case PC1_VECTOR:
case PC2_VECTOR:
break;
case CORRECTABLE_VECTOR:
//
// Disable the correctable error interrupt.
//
{
CIA_ERR_MASK CiaErrMask;
CiaErrMask.all = READ_CIA_REGISTER(
&((PCIA_ERROR_CSRS)(CIA_ERROR_CSRS_QVA))->ErrMask);
CiaErrMask.CorErr = 0x0;
WRITE_CIA_REGISTER(&((PCIA_ERROR_CSRS)(CIA_ERROR_CSRS_QVA))->ErrMask,
CiaErrMask.all
);
HalpSetMachineCheckEnables( FALSE, TRUE, TRUE );
}
break;
} //end switch Vector
//
// Lower IRQL to the previous level.
//
//.........这里部分代码省略.........