本文整理汇总了C++中SC_StringArray::SetStringLen方法的典型用法代码示例。如果您正苦于以下问题:C++ SC_StringArray::SetStringLen方法的具体用法?C++ SC_StringArray::SetStringLen怎么用?C++ SC_StringArray::SetStringLen使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SC_StringArray
的用法示例。
在下文中一共展示了SC_StringArray::SetStringLen方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: GetGeoLayerIDs
bool LayerStaticSupport::GetGeoLayerIDs(SC_StringArray& ids)
{
ids.SetStringLen(IntervalBase::intervalIDLen);
ids.Alloc(geologyLayers.Size());
for (int i = 0; i < geologyLayers.Size(); i++)
ids += geologyLayers[i].intervalID;
return !ids.IsEmpty();
}
示例2: GetWellBoreZoneIDs
bool LayerStaticSupport::GetWellBoreZoneIDs(SC_StringArray& ids)
{
ids.SetStringLen(IntervalBase::intervalIDLen);
ids.Alloc(wellBoreZones.Size());
for (int i = 0; i < wellBoreZones.Size(); i++)
ids += wellBoreZones[i].intervalID;
return !ids.IsEmpty();
}
示例3: GetCaseIDs
void ProfileSimRunResults::GetCaseIDs(SC_StringArray& caseIDs) const
{
caseIDs.DeAlloc();
caseIDs.SetStringLen(ProfileSimCaseResults::caseIDLen);
caseIDs.Alloc(Size());
for (int i = 0; i < Size(); i++)
caseIDs += (*this)[i].caseID;
};
示例4: ReadStringArray
void ReadStringArray(SC_StringArray& strings)
{
strings.DeAlloc();
int nLines = ReadInt();
int strLen = ReadInt();
strings.SetStringLen(strLen);
strings.AllocAndSetSize(nLines);
for (int i = 0; i < nLines; i++ ) {
ReadText(strings[i], strLen);
}
}
示例5: GetWellIDs
bool DataCaptureStaticSupport::GetWellIDs(SC_StringArray& wellIDs)
{
wellIDs.SetStringLen(40);
wellIDs.Alloc(dataCaptureData.Size());
for (int i = 0; i < dataCaptureData.Size(); i++)
{
DataCaptureSpecGlob& currSpec = *(dataCaptureData[i]);
if (currSpec.IsWell() && currSpec.IsPressure())
wellIDs += currSpec.dataDesig;
}
return wellIDs.IsNotEmpty();
}
示例6: ResetAllControls
void DlgVaryPriority::ResetAllControls()
{
vary.SetForFlags();
SC_StringArray newVaryPars;
newVaryPars.SetStringLen(60);
newVaryPars.Alloc(orderedVary.Size());
for (int i = 0; i < orderedVary.Size(); i++)
newVaryPars += orderedVary[i]->GetLongID();
for (int j = 0; j < 3; j++)
{
m_varyPriority[j].ClearEntries();
if (j < orderedVary.Size())
m_varyPriority[j].AddBasicEntry(newVaryPars);
m_varyPriority[j].ResetStrings();
varyPriority[j] = j;
m_varyPriority[j].ResetSelection();
}
CheckRestrictions();
}
示例7: MPIMasterRun
static void MPIMasterRun(const char* MPIappID, int maxMPIRank)
{
// all msgs for master ...
NodeFile::mpiDebugRun = true;
SC_StringArray machineNames;
machineNames.SetStringLen(maxNameLen);
machineNames.Alloc(maxMPIRank);
machineNames.SetString(processorName, 0);
// get slave processor name
MPI_Status status;
for (int i = 1; i < maxMPIRank; i++)
{
CheckMPI(MPI_Recv(processorName, maxNameLen, MPI_CHAR, i, mpiTag_ProcName, MPI_COMM_WORLD, &status), "Master get slave names");
int currRank = status.MPI_SOURCE;
if (currRank != i)
GenAppInternalError("Unexpected slave rank on slave processor name");
machineNames.SetString(processorName, i);
}
time_t stTime;
bool stTimeOK = false;
char timeLab[80];
const char* timeFormat = "%x %X";
if (time(&stTime) != -1)
{
#ifdef MSCVS2005
tm tmOut;
localtime_s(&tmOut, &stTime);
strftime(timeLab, 80, timeFormat, &tmOut);
#else
strftime(timeLab, 80, timeFormat, localtime(&stTime));
#endif
GenAppInfoMsg("Master start time", timeLab);
stTimeOK = true;
}
bool isOptRun = MPIOptimizationRun();
int nRuns;
if (isOptRun)
{
GenAppInfoMsg("MPI Run","Optimization only");
}
else
{
nRuns = MPIGetNCases();
char nrunstr[10];
IntToString(nRuns, nrunstr, 10);
GenAppInfoMsg("Number of runs", nrunstr);
}
int nSlaves = maxMPIRank - 1;
if (nRuns < nSlaves)
nSlaves = nRuns;
MPIMasterInit(nSlaves);
if (isOptRun)
{
MPIRunOptimizationMaster(nSlaves);
}
else
{
MPIMasterSampling(nSlaves, maxMPIRank);
}
GenAppInfoMsg(MPIappID, "master run complete");
MPIMasterCleanup(nSlaves);
if (!isOptRun)
{
using namespace std;
cout << endl << "Slave Summary" << endl;
SC_IntArray processorCount(maxMPIRank, 0);
for (int i = 0; i < nSlaves; i++)
{
cout << "Slave " << i + 1 << " processed " << indexCount[i] << " runs" << endl;
processorCount[machineNames.SearchForKey(machineNames[i + 1])] += indexCount[i];
}
cout << endl << "Machine Summary" << endl;
for (int i = 0; i < maxMPIRank; i++)
if (processorCount[i] > 0)
cout << "Machine " << machineNames[i] << " processed " << processorCount[i] << " runs" << endl;
cout << endl;
}
GenAppInfoMsg(MPIappID, "all cases completed OK");
time_t endTime;
if (stTimeOK && (time(&endTime) != -1))
{
// write start time again
GenAppInfoMsg("Master start time", timeLab);
//.........这里部分代码省略.........