本文整理汇总了C++中MEM_NB_BLOCK::CpuMemTyping方法的典型用法代码示例。如果您正苦于以下问题:C++ MEM_NB_BLOCK::CpuMemTyping方法的具体用法?C++ MEM_NB_BLOCK::CpuMemTyping怎么用?C++ MEM_NB_BLOCK::CpuMemTyping使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MEM_NB_BLOCK
的用法示例。
在下文中一共展示了MEM_NB_BLOCK::CpuMemTyping方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ASSERT
//.........这里部分代码省略.........
//
// Copy Parameters from environment
//
ParameterList.HoleBase = EnvPtr->HoleBase;
ParameterList.BottomIo = EnvPtr->BottomIo;
ParameterList.UmaSize = EnvPtr->UmaSize;
ParameterList.SysLimit = EnvPtr->SysLimit;
ParameterList.TableBasedAlterations = EnvPtr->TableBasedAlterations;
ParameterList.PlatformMemoryConfiguration = EnvPtr->PlatformMemoryConfiguration;
MemPtr->ParameterListPtr = &ParameterList;
for (p = 0; p < MAX_PLATFORM_TYPES; p++) {
MemPtr->GetPlatformCfg[p] = EnvPtr->GetPlatformCfg[p];
}
MemPtr->ErrorHandling = EnvPtr->ErrorHandling;
//
// Create Local NBBlock and Tech Block
//
EnvPtr->NBBlockCtor (&NB, MCTPtr, EnvPtr->FeatPtr);
NB.RefPtr = &ParameterList;
NB.MemPtr = MemPtr;
i = 0;
while (memTechInstalled[i] != NULL) {
if (memTechInstalled[i] (&TB, &NB)) {
break;
}
i++;
}
NB.TechPtr = &TB;
NB.TechBlockSwitch (&NB);
//
// Setup CPU Mem Type MSRs on the AP
//
NB.CpuMemTyping (&NB);
IDS_HDT_CONSOLE (MEM_STATUS, "Node %d\n", NB.Node);
//
// Call Technology Specific Training routine
//
NB.TrainingFlow (&NB);
//
// Copy training data to ReturnData buffer
//
LibAmdMemCopy ( BufferPtr,
MCTPtr->DctData[0].ChData[0].RcvEnDlys,
((DctCount * ChannelCount) * (
(RowCount * ColumnCount * NUMBER_OF_DELAY_TABLES) +
(MAX_BYTELANES_PER_CHANNEL * MAX_CS_PER_CHANNEL * NUMBER_OF_FAILURE_MASK_TABLES) +
(MAX_DIMMS_PER_CHANNEL * MAX_NUMBER_LANES)
)
),
StdHeader);
HeapDeallocateBuffer (AMD_MEM_DATA_HANDLE, StdHeader);
//
// Restore pointers
//
for (Dct = 0; Dct < MCTPtr->DctCount; Dct++) {
for (Channel = 0; Channel < MCTPtr->DctData[Dct].ChannelCount; Channel++) {
MCTPtr->DctData[Dct].ChData[Channel].MCTPtr = &EnvPtr->DieStruct;
MCTPtr->DctData[Dct].ChData[Channel].DCTPtr = &EnvPtr->DieStruct.DctData[Dct];
MCTPtr->DctData[Dct].ChData[Channel].RcvEnDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RcvEnDlys;
MCTPtr->DctData[Dct].ChData[Channel].WrDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDqsDlys;
MCTPtr->DctData[Dct].ChData[Channel].RdDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsDlys;
MCTPtr->DctData[Dct].ChData[Channel].RdDqsDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsDlys;
MCTPtr->DctData[Dct].ChData[Channel].WrDatDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatDlys;
MCTPtr->DctData[Dct].ChData[Channel].RdDqs2dDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqs2dDlys;
MCTPtr->DctData[Dct].ChData[Channel].RdDqsMinDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsMinDlys;
MCTPtr->DctData[Dct].ChData[Channel].RdDqsMaxDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].RdDqsMaxDlys;
MCTPtr->DctData[Dct].ChData[Channel].WrDatMinDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatMinDlys;
MCTPtr->DctData[Dct].ChData[Channel].WrDatMaxDlys = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].WrDatMaxDlys;
MCTPtr->DctData[Dct].ChData[Channel].FailingBitMask = EnvPtr->DieStruct.DctData[Dct].ChData[Channel].FailingBitMask;
}
MCTPtr->DctData[Dct].ChData = EnvPtr->DieStruct.DctData[Dct].ChData;
}
MCTPtr->DctData = EnvPtr->DieStruct.DctData;
}
//
// Signal to BSP that training is complete and Send Results
//
ASSERT (ReturnData.DataPtr != NULL);
ApUtilTransmitBuffer (EnvPtr->BspSocket, EnvPtr->BspCore, &ReturnData, StdHeader);
//
// Clean up and exit.
//
HeapDeallocateBuffer (GENERATE_MEM_HANDLE (ALLOC_PAR_TRN_HANDLE, 0, 0, 0), StdHeader);
} else {
MCTPtr = &EnvPtr->DieStruct;
PutEventLog (AGESA_FATAL, MEM_ERROR_HEAP_ALLOCATE_FOR_TRAINING_DATA, MCTPtr->NodeId, 0, 0, 0, StdHeader);
SetMemError (AGESA_FATAL, MCTPtr);
ASSERT(FALSE); // Could not allocate heap for buffer for parallel training data
}
return TRUE;
}