本文整理汇总了C++中AllocatePages函数的典型用法代码示例。如果您正苦于以下问题:C++ AllocatePages函数的具体用法?C++ AllocatePages怎么用?C++ AllocatePages使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了AllocatePages函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: InitHostContextCommon
/**
This function initialize host common context.
**/
VOID
InitHostContextCommon (
VOID
)
{
UINT32 Index;
INTERRUPT_GATE_DESCRIPTOR *IdtGate;
//
// PageTable
//
mHostContextCommon.PageTable = CreatePageTable ();
mHostContextCommon.Gdtr.Limit = mGuestContextCommon.GuestContextPerCpu[mBspIndex].Gdtr.Limit;
mHostContextCommon.Gdtr.Base = (UINTN)AllocatePages (FRM_SIZE_TO_PAGES (mHostContextCommon.Gdtr.Limit + 1));
CopyMem ((VOID *)mHostContextCommon.Gdtr.Base, (VOID *)mGuestContextCommon.GuestContextPerCpu[mBspIndex].Gdtr.Base, mHostContextCommon.Gdtr.Limit + 1);
AsmWriteGdtr (&mHostContextCommon.Gdtr);
mHostContextCommon.Idtr.Limit = 0x100 * sizeof (INTERRUPT_GATE_DESCRIPTOR) - 1;
mHostContextCommon.Idtr.Base = (UINTN)AllocatePages (FRM_SIZE_TO_PAGES (mHostContextCommon.Idtr.Limit + 1));
CopyMem ((VOID *)mHostContextCommon.Idtr.Base, (VOID *)mGuestContextCommon.GuestContextPerCpu[mBspIndex].Idtr.Base, mHostContextCommon.Idtr.Limit + 1);
IdtGate = (INTERRUPT_GATE_DESCRIPTOR *)mHostContextCommon.Idtr.Base;
for (Index = 0; Index < 0x100; Index++) {
IdtGate[Index].Offset15To0 = (UINT16)((UINTN)AsmExceptionHandlers + Index * mExceptionHandlerLength);
IdtGate[Index].Offset31To16 = (UINT16)(((UINTN)AsmExceptionHandlers + Index * mExceptionHandlerLength) >> 16);
#ifdef MDE_CPU_X64
IdtGate[Index].Offset63To32 = (UINT32)(((UINTN)AsmExceptionHandlers + Index * mExceptionHandlerLength) >> 32);
#endif
}
//
// Special for NMI
//
IdtGate[2].Offset15To0 = (UINT16)((UINTN)AsmNmiExceptionHandler);
IdtGate[2].Offset31To16 = (UINT16)(((UINTN)AsmNmiExceptionHandler) >> 16);
#ifdef MDE_CPU_X64
IdtGate[2].Offset63To32 = (UINT32)(((UINTN)AsmNmiExceptionHandler) >> 32);
#endif
//
// VmExitHandler
//
InitFrmHandler ();
mTeardownFinished = AllocatePages (FRM_SIZE_TO_PAGES (mHostContextCommon.CpuNum));
//
// Init HostContextPerCpu
//
mApicIdList = (UINTN)AllocatePages (FRM_SIZE_TO_PAGES (sizeof(UINT32) * mHostContextCommon.CpuNum));
GetApicIdListFromAcpi ((UINT32 *)mApicIdList);
for (Index = 0; Index < mHostContextCommon.CpuNum; Index++) {
mHostContextCommon.HostContextPerCpu[Index].Index = Index;
mHostContextCommon.HostContextPerCpu[Index].ApicId = *((UINT32 *)mApicIdList + Index);
mHostContextCommon.HostContextPerCpu[Index].Stack = (UINTN)AllocatePages (32);
mHostContextCommon.HostContextPerCpu[Index].Stack += FRM_PAGES_TO_SIZE (32);
InitHostVmcs (Index);
}
}
示例2: InitBasicContext
/**
This function initialize basic context for FRM.
**/
VOID
InitBasicContext (
VOID
)
{
UINT32 RegEax;
mHostContextCommon.CpuNum = GetCpuNumFromAcpi ();
GetPciExpressInfoFromAcpi (&mHostContextCommon.PciExpressBaseAddress, &mHostContextCommon.PciExpressLength);
PcdSet64 (PcdPciExpressBaseAddress, mHostContextCommon.PciExpressBaseAddress);
if (mHostContextCommon.PciExpressBaseAddress == 0) {
CpuDeadLoop ();
}
mHostContextCommon.AcpiTimerIoPortBaseAddress = GetAcpiTimerPort (&mHostContextCommon.AcpiTimerWidth);
PcdSet16 (PcdAcpiTimerIoPortBaseAddress, mHostContextCommon.AcpiTimerIoPortBaseAddress);
PcdSet8 (PcdAcpiTimerWidth, mHostContextCommon.AcpiTimerWidth);
if (mHostContextCommon.AcpiTimerIoPortBaseAddress == 0) {
CpuDeadLoop ();
}
mHostContextCommon.ResetIoPortBaseAddress = GetAcpiResetPort ();
mHostContextCommon.AcpiPmControlIoPortBaseAddress = GetAcpiPmControlPort ();
if (mHostContextCommon.AcpiPmControlIoPortBaseAddress == 0) {
CpuDeadLoop ();
}
mHostContextCommon.HostContextPerCpu = AllocatePages (FRM_SIZE_TO_PAGES(sizeof(FRM_HOST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum);
mGuestContextCommon.GuestContextPerCpu = AllocatePages (FRM_SIZE_TO_PAGES(sizeof(FRM_GUEST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum);
mHostContextCommon.LowMemoryBase = mCommunicationData.LowMemoryBase;
mHostContextCommon.LowMemorySize = mCommunicationData.LowMemorySize;
mHostContextCommon.LowMemoryBackupBase = (UINT64)(UINTN)AllocatePages (FRM_SIZE_TO_PAGES ((UINTN)mCommunicationData.LowMemorySize));
//
// Save current context
//
mBspIndex = ApicToIndex (ReadLocalApicId ());
mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr0 = AsmReadCr0 ();
mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr3 = AsmReadCr3 ();
mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr4 = AsmReadCr4 ();
AsmReadGdtr (&mGuestContextCommon.GuestContextPerCpu[mBspIndex].Gdtr);
AsmReadIdtr (&mGuestContextCommon.GuestContextPerCpu[mBspIndex].Idtr);
AsmCpuid (CPUID_EXTENDED_INFORMATION, &RegEax, NULL, NULL, NULL);
if (RegEax >= CPUID_EXTENDED_ADDRESS_SIZE) {
AsmCpuid (CPUID_EXTENDED_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);
mHostContextCommon.PhysicalAddressBits = (UINT8)RegEax;
} else {
mHostContextCommon.PhysicalAddressBits = 36;
}
}
示例3: GetImageReadFunction
/**
Support routine to get the Image read file function.
@param ImageContext - The context of the image being loaded
@retval EFI_SUCCESS - If Image function location is found
**/
EFI_STATUS
GetImageReadFunction (
IN PE_COFF_LOADER_IMAGE_CONTEXT *ImageContext
)
{
PEI_CORE_INSTANCE *Private;
VOID* MemoryBuffer;
Private = PEI_CORE_INSTANCE_FROM_PS_THIS (GetPeiServicesTablePointer ());
if (Private->PeiMemoryInstalled && ((Private->HobList.HandoffInformationTable->BootMode != BOOT_ON_S3_RESUME) || PcdGetBool (PcdShadowPeimOnS3Boot)) &&
(EFI_IMAGE_MACHINE_TYPE_SUPPORTED(EFI_IMAGE_MACHINE_X64) || EFI_IMAGE_MACHINE_TYPE_SUPPORTED(EFI_IMAGE_MACHINE_IA32))) {
//
// Shadow algorithm makes lots of non ANSI C assumptions and only works for IA32 and X64
// compilers that have been tested
//
if (Private->ShadowedImageRead == NULL) {
MemoryBuffer = AllocatePages (0x400 / EFI_PAGE_SIZE + 1);
ASSERT (MemoryBuffer != NULL);
CopyMem (MemoryBuffer, (CONST VOID *) (UINTN) PeiImageReadForShadow, 0x400);
Private->ShadowedImageRead = (PE_COFF_LOADER_READ_FILE) (UINTN) MemoryBuffer;
}
ImageContext->ImageRead = Private->ShadowedImageRead;
} else {
ImageContext->ImageRead = PeiImageRead;
}
return EFI_SUCCESS;
}
示例4: DispatchSystemFmpImages
/**
Dispatch system FMP images.
Caution: This function may receive untrusted input.
@param[in] Image The EDKII system FMP capsule image.
@param[in] ImageSize The size of the EDKII system FMP capsule image in bytes.
@param[out] LastAttemptVersion The last attempt version, which will be recorded in ESRT and FMP EFI_FIRMWARE_IMAGE_DESCRIPTOR.
@param[out] LastAttemptStatus The last attempt status, which will be recorded in ESRT and FMP EFI_FIRMWARE_IMAGE_DESCRIPTOR.
@retval EFI_SUCESS Process Capsule Image successfully.
@retval EFI_UNSUPPORTED Capsule image is not supported by the firmware.
@retval EFI_VOLUME_CORRUPTED FV volume in the capsule is corrupted.
@retval EFI_OUT_OF_RESOURCES Not enough memory.
**/
EFI_STATUS
DispatchSystemFmpImages (
IN VOID *Image,
IN UINTN ImageSize,
OUT UINT32 *LastAttemptVersion,
OUT UINT32 *LastAttemptStatus
)
{
EFI_STATUS Status;
VOID *AuthenticatedImage;
UINTN AuthenticatedImageSize;
VOID *DispatchFvImage;
UINTN DispatchFvImageSize;
EFI_HANDLE FvProtocolHandle;
EFI_FIRMWARE_VOLUME_HEADER *FvImage;
BOOLEAN Result;
AuthenticatedImage = NULL;
AuthenticatedImageSize = 0;
DEBUG((DEBUG_INFO, "DispatchSystemFmpImages\n"));
//
// Verify
//
Status = CapsuleAuthenticateSystemFirmware(Image, ImageSize, FALSE, LastAttemptVersion, LastAttemptStatus, &AuthenticatedImage, &AuthenticatedImageSize);
if (EFI_ERROR(Status)) {
DEBUG((DEBUG_INFO, "SystemFirmwareAuthenticateImage - %r\n", Status));
return Status;
}
//
// Get FV
//
Result = ExtractDriverFvImage(AuthenticatedImage, AuthenticatedImageSize, &DispatchFvImage, &DispatchFvImageSize);
if (Result) {
DEBUG((DEBUG_INFO, "ExtractDriverFvImage\n"));
//
// Dispatch
//
if (((EFI_FIRMWARE_VOLUME_HEADER *)DispatchFvImage)->FvLength == DispatchFvImageSize) {
FvImage = AllocatePages(EFI_SIZE_TO_PAGES(DispatchFvImageSize));
if (FvImage != NULL) {
CopyMem(FvImage, DispatchFvImage, DispatchFvImageSize);
Status = gDS->ProcessFirmwareVolume(
(VOID *)FvImage,
(UINTN)FvImage->FvLength,
&FvProtocolHandle
);
DEBUG((DEBUG_INFO, "ProcessFirmwareVolume - %r\n", Status));
if (!EFI_ERROR(Status)) {
gDS->Dispatch();
DEBUG((DEBUG_INFO, "Dispatch Done\n"));
}
}
}
}
return EFI_SUCCESS;
}
示例5: InitHostVmcs
/**
This function initialize host VMCS.
**/
VOID
InitHostVmcs (
UINTN Index
)
{
UINT64 Data64;
UINTN Size;
//
// VMCS size
//
Data64 = AsmReadMsr64 (IA32_VMX_BASIC_MSR_INDEX);
Size = (UINTN)(RShiftU64 (Data64, 32) & 0xFFFF);
//
// Allocate
//
mHostContextCommon.HostContextPerCpu[Index].Vmcs = (UINT64)(UINTN)AllocatePages (FRM_SIZE_TO_PAGES(Size));
//
// Set RevisionIdentifier
//
*(UINT32 *)(UINTN)mHostContextCommon.HostContextPerCpu[Index].Vmcs = (UINT32)Data64;
return ;
}
示例6: DmaMap
/**
Allocates pages that are suitable for an DmaMap() of type MapOperationBusMasterCommonBuffer.
mapping.
@param MemoryType The type of memory to allocate, EfiBootServicesData or
EfiRuntimeServicesData.
@param Pages The number of pages to allocate.
@param HostAddress A pointer to store the base system memory address of the
allocated range.
@retval EFI_SUCCESS The requested memory pages were allocated.
@retval EFI_UNSUPPORTED Attributes is unsupported. The only legal attribute bits are
MEMORY_WRITE_COMBINE and MEMORY_CACHED.
@retval EFI_INVALID_PARAMETER One or more parameters are invalid.
@retval EFI_OUT_OF_RESOURCES The memory pages could not be allocated.
**/
EFI_STATUS
EFIAPI
DmaAllocateBuffer (
IN EFI_MEMORY_TYPE MemoryType,
IN UINTN Pages,
OUT VOID **HostAddress
)
{
if (HostAddress == NULL) {
return EFI_INVALID_PARAMETER;
}
//
// The only valid memory types are EfiBootServicesData and EfiRuntimeServicesData
//
// We used uncached memory to keep coherency
//
if (MemoryType == EfiBootServicesData) {
*HostAddress = AllocatePages (Pages);
} else if (MemoryType != EfiRuntimeServicesData) {
*HostAddress = AllocateRuntimePages (Pages);
} else {
return EFI_INVALID_PARAMETER;
}
return EFI_SUCCESS;
}
示例7: ArchVectorConfig
RETURN_STATUS ArchVectorConfig(
IN UINTN VectorBaseAddress
)
{
UINTN HcrReg;
UINT8 *Stack;
Stack = AllocatePages (EL0_STACK_PAGES);
if (Stack == NULL) {
return RETURN_OUT_OF_RESOURCES;
}
RegisterEl0Stack ((UINT8 *)Stack + EFI_PAGES_TO_SIZE (EL0_STACK_PAGES));
if (ArmReadCurrentEL() == AARCH64_EL2) {
HcrReg = ArmReadHcr();
// Trap General Exceptions. All exceptions that would be routed to EL1 are routed to EL2
HcrReg |= ARM_HCR_TGE;
ArmWriteHcr(HcrReg);
}
return RETURN_SUCCESS;
}
示例8: AllocateAlignedPages
EFIAPI
AllocateAlignedPages (
IN UINTN Pages,
IN UINTN Alignment
)
{
VOID *Memory;
UINTN AlignmentMask;
//
// Alignment must be a power of two or zero.
//
ASSERT ((Alignment & (Alignment - 1)) == 0);
if (Pages == 0) {
return NULL;
}
//
// Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
//
ASSERT (Pages <= (MAX_ADDRESS - EFI_SIZE_TO_PAGES (Alignment)));
//
// We would rather waste some memory to save PEI code size.
//
Memory = (VOID *)(UINTN)AllocatePages (Pages + EFI_SIZE_TO_PAGES (Alignment));
if (Alignment == 0) {
AlignmentMask = Alignment;
} else {
AlignmentMask = Alignment - 1;
}
return (VOID *) (UINTN) (((UINTN) Memory + AlignmentMask) & ~AlignmentMask);
}
示例9: Split2MPageTo4K
/**
Split 2M page to 4K.
@param[in] PhysicalAddress Start physical address the 2M page covered.
@param[in, out] PageEntry2M Pointer to 2M page entry.
@param[in] StackBase Stack base address.
@param[in] StackSize Stack size.
**/
VOID
Split2MPageTo4K (
IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
IN OUT UINT64 *PageEntry2M,
IN EFI_PHYSICAL_ADDRESS StackBase,
IN UINTN StackSize
)
{
EFI_PHYSICAL_ADDRESS PhysicalAddress4K;
UINTN IndexOfPageTableEntries;
PAGE_TABLE_4K_ENTRY *PageTableEntry;
PageTableEntry = AllocatePages (1);
//
// Fill in 2M page entry.
//
*PageEntry2M = (UINT64) (UINTN) PageTableEntry | IA32_PG_P | IA32_PG_RW;
PhysicalAddress4K = PhysicalAddress;
for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {
//
// Fill in the Page Table entries
//
PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K;
PageTableEntry->Bits.ReadWrite = 1;
PageTableEntry->Bits.Present = 1;
if ((PhysicalAddress4K >= StackBase) && (PhysicalAddress4K < StackBase + StackSize)) {
//
// Set Nx bit for stack.
//
PageTableEntry->Bits.Nx = 1;
}
}
}
示例10: SetCacheability
/**
Set memory cache ability.
@param PageTable PageTable Address
@param Address Memory Address to change cache ability
@param Cacheability Cache ability to set
**/
VOID
SetCacheability (
IN UINT64 *PageTable,
IN UINTN Address,
IN UINT8 Cacheability
)
{
UINTN PTIndex;
VOID *NewPageTableAddress;
UINT64 *NewPageTable;
UINTN Index;
ASSERT ((Address & EFI_PAGE_MASK) == 0);
if (sizeof (UINTN) == sizeof (UINT64)) {
PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;
ASSERT (PageTable[PTIndex] & IA32_PG_P);
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
}
PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;
ASSERT (PageTable[PTIndex] & IA32_PG_P);
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
//
// A perfect implementation should check the original cacheability with the
// one being set, and break a 2M page entry into pieces only when they
// disagreed.
//
PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;
if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
//
// Allocate a page from SMRAM
//
NewPageTableAddress = AllocatePages (1);
ASSERT (NewPageTableAddress != NULL);
NewPageTable = (UINT64 *)NewPageTableAddress;
for (Index = 0; Index < 0x200; Index++) {
NewPageTable[Index] = PageTable[PTIndex];
if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {
NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);
NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;
}
NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);
}
PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | IA32_PG_P;
}
ASSERT (PageTable[PTIndex] & IA32_PG_P);
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;
ASSERT (PageTable[PTIndex] & IA32_PG_P);
PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));
PageTable[PTIndex] |= (UINT64)Cacheability;
}
示例11: ShutdownEfi
VOID
ShutdownEfi (
VOID
)
{
EFI_STATUS Status;
UINTN MemoryMapSize;
EFI_MEMORY_DESCRIPTOR *MemoryMap;
UINTN MapKey;
UINTN DescriptorSize;
UINTN DescriptorVersion;
UINTN Pages;
MemoryMap = NULL;
MemoryMapSize = 0;
do {
Status = gBS->GetMemoryMap (
&MemoryMapSize,
MemoryMap,
&MapKey,
&DescriptorSize,
&DescriptorVersion
);
if (Status == EFI_BUFFER_TOO_SMALL) {
Pages = EFI_SIZE_TO_PAGES (MemoryMapSize) + 1;
MemoryMap = AllocatePages (Pages);
//
// Get System MemoryMap
//
Status = gBS->GetMemoryMap (
&MemoryMapSize,
MemoryMap,
&MapKey,
&DescriptorSize,
&DescriptorVersion
);
// Don't do anything between the GetMemoryMap() and ExitBootServices()
if (!EFI_ERROR (Status)) {
Status = gBS->ExitBootServices (gImageHandle, MapKey);
if (EFI_ERROR (Status)) {
FreePages (MemoryMap, Pages);
MemoryMap = NULL;
MemoryMapSize = 0;
}
}
}
} while (EFI_ERROR (Status));
//Clean and invalidate caches.
WriteBackInvalidateDataCache();
InvalidateInstructionCache();
//Turning off Caches and MMU
ArmDisableDataCache ();
ArmDisableInstructionCache ();
ArmDisableMmu ();
}
示例12: InitGuestContextCommon
/**
This function initialize guest common context.
**/
VOID
InitGuestContextCommon (
VOID
)
{
UINT32 Index;
//
// CompatiblePageTable for IA32 flat mode only
//
mGuestContextCommon.CompatiblePageTable = CreateCompatiblePageTable ();
mGuestContextCommon.CompatiblePageTablePae = CreateCompatiblePageTablePae ();
mGuestContextCommon.MsrBitmap = (UINT64)(UINTN)AllocatePages (1);
EptInit ();
IoInit ();
VmxTimerInit ();
//
// Init GuestContextPerCpu
//
for (Index = 0; Index < mHostContextCommon.CpuNum; Index++) {
mGuestContextCommon.GuestContextPerCpu[Index].Stack = (UINTN)AllocatePages (1);
mGuestContextCommon.GuestContextPerCpu[Index].Stack += FRM_PAGES_TO_SIZE (1);
mGuestContextCommon.GuestContextPerCpu[Index].Cr0 = mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr0;
mGuestContextCommon.GuestContextPerCpu[Index].Cr3 = mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr3;
mGuestContextCommon.GuestContextPerCpu[Index].Cr4 = mGuestContextCommon.GuestContextPerCpu[mBspIndex].Cr4;
CopyMem (&mGuestContextCommon.GuestContextPerCpu[Index].Gdtr, &mGuestContextCommon.GuestContextPerCpu[mBspIndex].Gdtr, sizeof(IA32_DESCRIPTOR));
CopyMem (&mGuestContextCommon.GuestContextPerCpu[Index].Idtr, &mGuestContextCommon.GuestContextPerCpu[mBspIndex].Idtr, sizeof(IA32_DESCRIPTOR));
mGuestContextCommon.GuestContextPerCpu[Index].VmExitMsrStore = (UINT64)(UINTN)AllocatePages (1);
mGuestContextCommon.GuestContextPerCpu[Index].VmExitMsrLoad = (UINT64)(UINTN)AllocatePages (1);
mGuestContextCommon.GuestContextPerCpu[Index].VmEnterMsrLoad = (UINT64)(UINTN)AllocatePages (1);
//
// Allocate GuestVmcs
//
InitGuestVmcs (Index);
}
}
示例13: HandOffToDxeCore
/**
Transfers control to DxeCore.
This function performs a CPU architecture specific operations to execute
the entry point of DxeCore with the parameters of HobList.
It also installs EFI_END_OF_PEI_PPI to signal the end of PEI phase.
@param DxeCoreEntryPoint The entry point of DxeCore.
@param HobList The start of HobList passed to DxeCore.
**/
VOID
HandOffToDxeCore (
IN EFI_PHYSICAL_ADDRESS DxeCoreEntryPoint,
IN EFI_PEI_HOB_POINTERS HobList
)
{
VOID *BaseOfStack;
VOID *TopOfStack;
EFI_STATUS Status;
UINTN PageTables;
//
// Allocate 128KB for the Stack
//
BaseOfStack = AllocatePages (EFI_SIZE_TO_PAGES (STACK_SIZE));
ASSERT (BaseOfStack != NULL);
//
// Compute the top of the stack we were allocated. Pre-allocate a UINTN
// for safety.
//
TopOfStack = (VOID *) ((UINTN) BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - CPU_STACK_ALIGNMENT);
TopOfStack = ALIGN_POINTER (TopOfStack, CPU_STACK_ALIGNMENT);
PageTables = 0;
if (FeaturePcdGet (PcdDxeIplBuildPageTables)) {
//
// Create page table and save PageMapLevel4 to CR3
//
PageTables = CreateIdentityMappingPageTables ();
}
//
// End of PEI phase signal
//
Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi);
ASSERT_EFI_ERROR (Status);
if (FeaturePcdGet (PcdDxeIplBuildPageTables)) {
AsmWriteCr3 (PageTables);
}
//
// Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.
//
UpdateStackHob ((EFI_PHYSICAL_ADDRESS)(UINTN) BaseOfStack, STACK_SIZE);
//
// Transfer the control to the entry point of DxeCore.
//
SwitchStack (
(SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,
HobList.Raw,
NULL,
TopOfStack
);
}
示例14: LoadPeCoffImage
EFI_STATUS
EFIAPI
LoadPeCoffImage (
IN VOID *PeCoffImage,
OUT EFI_PHYSICAL_ADDRESS *ImageAddress,
OUT UINT64 *ImageSize,
OUT EFI_PHYSICAL_ADDRESS *EntryPoint
)
{
RETURN_STATUS Status;
PE_COFF_LOADER_IMAGE_CONTEXT ImageContext;
VOID *Buffer;
ZeroMem (&ImageContext, sizeof (ImageContext));
ImageContext.Handle = PeCoffImage;
ImageContext.ImageRead = PeCoffLoaderImageReadFromMemory;
Status = PeCoffLoaderGetImageInfo (&ImageContext);
ASSERT_EFI_ERROR (Status);
//
// Allocate Memory for the image
//
Buffer = AllocatePages (EFI_SIZE_TO_PAGES((UINT32)ImageContext.ImageSize));
ASSERT (Buffer != 0);
ImageContext.ImageAddress = (EFI_PHYSICAL_ADDRESS)(UINTN)Buffer;
//
// Load the image to our new buffer
//
Status = PeCoffLoaderLoadImage (&ImageContext);
ASSERT_EFI_ERROR (Status);
//
// Relocate the image in our new buffer
//
Status = PeCoffLoaderRelocateImage (&ImageContext);
ASSERT_EFI_ERROR (Status);
*ImageAddress = ImageContext.ImageAddress;
*ImageSize = ImageContext.ImageSize;
*EntryPoint = ImageContext.EntryPoint;
//
// Flush not needed for all architectures. We could have a processor specific
// function in this library that does the no-op if needed.
//
InvalidateInstructionCacheRange ((VOID *)(UINTN)*ImageAddress, (UINTN)*ImageSize);
return Status;
}
示例15: ShutdownUefiBootServices
EFI_STATUS
ShutdownUefiBootServices (
VOID
)
{
EFI_STATUS Status;
UINTN MemoryMapSize;
EFI_MEMORY_DESCRIPTOR *MemoryMap;
UINTN MapKey;
UINTN DescriptorSize;
UINT32 DescriptorVersion;
UINTN Pages;
MemoryMap = NULL;
MemoryMapSize = 0;
Pages = 0;
do {
Status = gBS->GetMemoryMap (
&MemoryMapSize,
MemoryMap,
&MapKey,
&DescriptorSize,
&DescriptorVersion
);
if (Status == EFI_BUFFER_TOO_SMALL) {
Pages = EFI_SIZE_TO_PAGES (MemoryMapSize) + 1;
MemoryMap = AllocatePages (Pages);
//
// Get System MemoryMap
//
Status = gBS->GetMemoryMap (
&MemoryMapSize,
MemoryMap,
&MapKey,
&DescriptorSize,
&DescriptorVersion
);
}
// Don't do anything between the GetMemoryMap() and ExitBootServices()
if (!EFI_ERROR(Status)) {
Status = gBS->ExitBootServices (gImageHandle, MapKey);
if (EFI_ERROR(Status)) {
FreePages (MemoryMap, Pages);
MemoryMap = NULL;
MemoryMapSize = 0;
}
}
} while (EFI_ERROR(Status));
return Status;
}