本文整理汇总了C++中ConfigArray类的典型用法代码示例。如果您正苦于以下问题:C++ ConfigArray类的具体用法?C++ ConfigArray怎么用?C++ ConfigArray使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ConfigArray类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: DoEvalBase
static void DoEvalBase(const ConfigParameters& config, IDataReader& reader)
{
DEVICEID_TYPE deviceId = DeviceFromConfig(config);
ConfigArray minibatchSize = config(L"minibatchSize", "40960");
size_t epochSize = config(L"epochSize", "0");
if (epochSize == 0)
{
epochSize = requestDataSize;
}
wstring modelPath = config(L"modelPath");
intargvector mbSize = minibatchSize;
int traceLevel = config(L"traceLevel", "0");
size_t numMBsToShowResult = config(L"numMBsToShowResult", "100");
ConfigArray evalNodeNames = config(L"evalNodeNames", "");
vector<wstring> evalNodeNamesVector;
for (int i = 0; i < evalNodeNames.size(); ++i)
{
evalNodeNamesVector.push_back(evalNodeNames[i]);
}
auto net = ComputationNetwork::CreateFromFile<ElemType>(deviceId, modelPath);
SimpleEvaluator<ElemType> eval(net, numMBsToShowResult, traceLevel);
eval.Evaluate(&reader, evalNodeNamesVector, mbSize[0], epochSize);
}
示例2: create
void ConfigList::load(std::istream &sin, string &line)
{
for (;;){
ConfigArray *el = create();
push_back(el);
el->load(sin, line);
if ((line == "[]") || (line == "")) return;
}
}
示例3: wmain
int wmain(int argc, wchar_t* argv[])
{
try
{
ConfigParameters config;
ConfigParameters::ParseCommandLine(argc, argv, config);
// get the command param set they want
wstring logpath = config("stderr", L"");
ConfigArray command = config("command", "train");
// dump config info
fprintf(stderr, "command: ");
for (int i = 0; i < command.size(); i++)
{
fprintf(stderr, "%s ", command[i].c_str());
}
// run commands
std::string type = config("precision", "float");
// accept old precision key for backward compatibility
if (config.Exists("type"))
type = config("type", "float");
fprintf(stderr, "\nprecision = %s\n", type.c_str());
if (type == "float")
DoCommand<float>(config);
else if (type == "double")
DoCommand<double>(config);
else
RuntimeError("invalid precision specified: %s", type.c_str());
}
catch (std::exception& err)
{
fprintf(stderr, "EXCEPTION occurred: %s", err.what());
Microsoft::MSR::CNTK::DebugUtil::PrintCallStack();
#ifdef _DEBUG
DebugBreak();
#endif
return -1;
}
catch (...)
{
fprintf(stderr, "Unknown ERROR occurred");
Microsoft::MSR::CNTK::DebugUtil::PrintCallStack();
#ifdef _DEBUG
DebugBreak();
#endif
return -1;
}
return 0;
}
示例4: DisableLegacyUsage
static void DisableLegacyUsage(const ConfigParameters& TopLevelConfig, const ConfigArray& commands)
{
for (size_t i = 0; i < commands.size(); i++)
{
ConfigParameters cfgParameters(TopLevelConfig(commands[i]));
DisableLegacyTruncationSettings(TopLevelConfig, cfgParameters);
}
}
示例5:
bool ConfigArray::operator == (const ConfigArray &a) const
{
list<ConfigValue*>::const_iterator it;
for (it = values.begin(); it != values.end(); it++){
const ConfigValue *v = a.getValue((*it)->m_name);
if (v == NULL) return false;
if (!(*v == *(*it))) return false;
}
return true;
}
示例6: GetModelFromConfig
ComputationNetworkPtr GetModelFromConfig(const ConfigRecordType& config, const wstring& outputNodeNamesConfig, vector<wstring>& outputNodeNamesVector)
{
DEVICEID_TYPE deviceId = DeviceFromConfig(config);
ConfigArray outputNodeNames = config(outputNodeNamesConfig.c_str(), ConfigArray(""));
ComputationNetworkPtr net;
// first try if a NetworkBuilder is present
function<ComputationNetworkPtr(DEVICEID_TYPE)> createNetworkFn;
bool gotIt = TryGetNetworkFactory<ConfigRecordType, ElemType>(config, createNetworkFn);
if (gotIt)
{
// We have several ways to create a network.
net = createNetworkFn(deviceId);
if (outputNodeNames.size() > 0)
{
net->InvalidateCompiledNetwork();
PatchOutputNodes(net, outputNodeNames, outputNodeNamesVector);
net->CompileNetwork();
// BUGBUG: This will generate double Validation output in the log
}
}
else // no NetworkBuilder given: load from 'modelPath'
{
wstring modelPath = config(L"modelPath");
// We don't use CreateFromFile() here since the user might specify OutputNodeNames in the config.
// By not compiling the network before patching, we avoid double log output for validation.
net = make_shared<ComputationNetwork>(deviceId);
net->SetTraceLevel(config(L"traceLevel", 0));
net->Read<ElemType>(modelPath);
if (outputNodeNames.size() > 0)
PatchOutputNodes(net, outputNodeNames, outputNodeNamesVector);
net->CompileNetwork();
}
return net;
}
示例7: DisableLegacyTruncationSettings
// special temporary function to guard against a now invalid usage of "truncated" which exists in some IPG production setups
static void DisableLegacyTruncationSettings(const ConfigParameters& TopLevelConfig, const ConfigParameters& commandConfig)
{
if (TopLevelConfig.ExistsCurrent(L"Truncated"))
{
return;
}
// if any of the action has set a reader/SGD section and has different Truncated value for reader and SGD section
ConfigArray actions = commandConfig(L"action");
for (size_t i = 0; i < actions.size(); i++)
{
if (actions[i] == "train" || actions[i] == "trainRNN")
{
ConfigParameters sgd = ConfigParameters(commandConfig(L"SGD"));
ConfigParameters reader = ConfigParameters(commandConfig(L"reader"));
// reader and SGD sections are two must-have sections in train/trainRNN
if (reader.ExistsCurrent(L"Truncated") && !sgd.ExistsCurrent(L"Truncated"))
{
InvalidArgument("DisableLegacyUsage: setting Truncated only in reader section are not allowed. Please move Truncated=true/false to the top level section.");
}
}
}
}
示例8: DoCrossValidate
void DoCrossValidate(const ConfigParameters& config)
{
// test
ConfigParameters readerConfig(config(L"reader"));
readerConfig.Insert("traceLevel", config(L"traceLevel", "0"));
DEVICEID_TYPE deviceId = DeviceFromConfig(config);
ConfigArray minibatchSize = config(L"minibatchSize", "40960");
size_t epochSize = config(L"epochSize", "0");
if (epochSize == 0)
{
epochSize = requestDataSize;
}
wstring modelPath = config(L"modelPath");
intargvector mbSize = minibatchSize;
ConfigArray cvIntervalConfig = config(L"crossValidationInterval");
intargvector cvInterval = cvIntervalConfig;
size_t sleepSecondsBetweenRuns = config(L"sleepTimeBetweenRuns", "0");
int traceLevel = config(L"traceLevel", 0);
size_t numMBsToShowResult = config(L"numMBsToShowResult", "100");
size_t firstMBsToShowResult = config(L"firstMBsToShowResult", "0");
size_t maxSamplesInRAM = config(L"maxSamplesInRAM", (size_t)SIZE_MAX);
size_t numSubminiBatches = config(L"numSubminibatches", (size_t)1);
bool enableDistributedMBReading = config(L"distributedMBReading", false);
ConfigArray evalNodeNames = config(L"evalNodeNames", "");
vector<wstring> evalNodeNamesVector;
for (int i = 0; i < evalNodeNames.size(); ++i)
{
evalNodeNamesVector.push_back(evalNodeNames[i]);
}
std::vector<std::vector<EpochCriterion>> cvErrorResults;
std::vector<std::wstring> cvModels;
DataReader cvDataReader(readerConfig);
bool finalModelEvaluated = false;
for (size_t i = cvInterval[0]; i <= cvInterval[2]; i += cvInterval[1])
{
wstring cvModelPath = msra::strfun::wstrprintf(L"%ls.%lld", modelPath.c_str(), i);
if (!fexists(cvModelPath))
{
fprintf(stderr, "Model %ls does not exist.\n", cvModelPath.c_str());
if (finalModelEvaluated || !fexists(modelPath))
continue; // file missing
else
{
cvModelPath = modelPath;
finalModelEvaluated = true;
}
}
cvModels.push_back(cvModelPath);
auto net = ComputationNetwork::CreateFromFile<ElemType>(deviceId, cvModelPath);
// BUGBUG: ^^ Should use GetModelFromConfig()
SimpleEvaluator<ElemType> eval(net, MPIWrapper::GetInstance(), enableDistributedMBReading, numMBsToShowResult,
firstMBsToShowResult, traceLevel, maxSamplesInRAM, numSubminiBatches);
fprintf(stderr, "Model %ls --> \n", cvModelPath.c_str());
auto evalErrors = eval.Evaluate(&cvDataReader, evalNodeNamesVector, mbSize[0], epochSize);
cvErrorResults.push_back(evalErrors);
::Sleep(1000 * sleepSecondsBetweenRuns);
}
// find best model
if (cvErrorResults.size() == 0)
LogicError("No model is evaluated.");
vector<double> minErrors;
vector<int> minErrIds;
vector<EpochCriterion> evalErrors = cvErrorResults[0];
for (int i = 0; i < evalErrors.size(); ++i)
{
minErrors.push_back(evalErrors[i].Average());
minErrIds.push_back(0);
}
for (int i = 0; i < cvErrorResults.size(); i++)
{
evalErrors = cvErrorResults[i];
for (int j = 0; j < evalErrors.size(); j++)
{
if (evalErrors[j].Average() < minErrors[j])
{
minErrors[j] = evalErrors[j].Average();
minErrIds[j] = i;
}
}
}
fprintf(stderr, "Best models:\n");
fprintf(stderr, "------------\n");
//.........这里部分代码省略.........
示例9: wmainOldCNTKConfig
// called from wmain which is a wrapper that catches & repots Win32 exceptions
int wmainOldCNTKConfig(int argc, wchar_t* argv[])
{
std::string timestamp = TimeDateStamp();
PrintBanner(argc, argv, timestamp);
ConfigParameters config;
std::string rawConfigString = ConfigParameters::ParseCommandLine(argc, argv, config); // get the command param set they want
int traceLevel = config(L"traceLevel", 0);
#ifndef CPUONLY
ConfigValue val = config("deviceId", "auto");
if (!EqualCI(val, "cpu") && !EqualCI(val, "auto"))
{
if (static_cast<int>(val) >= 0) // gpu (id >= 0)
{
CheckSupportForGpu(static_cast<int>(val)); // throws if gpu is not supported
}
}
#endif
if (config(L"timestamping", false))
ProgressTracing::SetTimestampingFlag();
if (config(L"forceDeterministicAlgorithms", false))
Globals::ForceDeterministicAlgorithms();
// get the command param set they want
wstring logpath = config(L"stderr", L"");
wstring doneFile = config(L"doneFile", L"");
ConfigArray command = config(L"command", "train");
// parallel training
// The top-level 'parallelTrain' is a bool, not to be confused with the parallelTrain block inside SGD.
shared_ptr<Microsoft::MSR::CNTK::MPIWrapper> mpi;
auto ensureMPIWrapperCleanup = MakeScopeExit(&MPIWrapper::DeleteInstance);
// when running under MPI with more than one node, use 'true' as the default value for parallelTrain,
// 'false' otherwise.
bool paralleltrain = config(L"parallelTrain", (MPIWrapper::GetTotalNumberOfMPINodes() > 1));
if (paralleltrain)
{
mpi = MPIWrapper::GetInstance(true /*create*/);
}
g_shareNodeValueMatrices = config(L"shareNodeValueMatrices", false);
TracingGPUMemoryAllocator::SetTraceLevel(config(L"traceGPUMemoryAllocations", 0));
if (logpath != L"")
{
#if 1 // keep the ability to do it how it was done before 1.8; delete if noone needs it anymore
let useOldWay = ProgressTracing::GetTimestampingFlag(); // enable it when running in our server farm
if (useOldWay)
{
for (int i = 0; i < command.size(); i++) // append all 'command' entries
{
logpath += L"_";
logpath += (wstring)command[i];
}
logpath += L".log"; // append .log
}
if (paralleltrain && useOldWay)
{
std::wostringstream oss;
oss << mpi->CurrentNodeRank();
logpath += L"rank" + oss.str();
}
else
#endif
// for MPI workers except main, append .rankN
if (paralleltrain && mpi->CurrentNodeRank() != 0)
logpath += msra::strfun::wstrprintf(L".rank%d", mpi->CurrentNodeRank());
RedirectStdErr(logpath);
if (traceLevel == 0)
PrintBanner(argc, argv, timestamp); // repeat simple banner into log file
}
// full config info
if (traceLevel > 0)
{
PrintBuiltInfo();
PrintGpuInfo();
}
#ifdef _DEBUG
if (traceLevel > 0)
{
// This simply merges all the different config parameters specified (eg, via config files or via command line directly),
// and prints it.
fprintf(stderr, "\nConfiguration, Raw:\n\n");
LOGPRINTF(stderr, "%s\n", rawConfigString.c_str());
// Same as above, but all variables are resolved. If a parameter is set multiple times (eg, set in config, overridden at command line),
// All of these assignments will appear, even though only the last assignment matters.
fprintf(stderr, "\nConfiguration After Variable Resolution:\n\n");
//.........这里部分代码省略.........
示例10: DoCommands
void DoCommands(const ConfigParameters& config, const shared_ptr<MPIWrapper>& mpi)
{
ConfigArray command = config(L"command", "train");
if (Globals::ShouldForceDeterministicAlgorithms())
ForceDeterministicAlgorithmsOnCPU();
else
{
// Setting specified number of threads.
int numCPUThreads = config(L"numCPUThreads", "0");
numCPUThreads = CPUMatrix<ElemType>::SetNumThreads(numCPUThreads);
if (numCPUThreads > 0)
{
LOGPRINTF(stderr, "Using %d CPU threads.\n", numCPUThreads);
}
}
bool progressTracing = config(L"progressTracing", false);
// temporary hack to prevent users from failing due to a small breaking change related to the "truncated" flag (will be redone bigger and better some day)
DisableLegacyUsage(config, command);
// summarize command info upfront in the log and stdout
size_t fullTotalMaxEpochs = 0;
for (int i = 0; i < command.size(); i++)
{
// get the configuration parameters that match the command
ConfigParameters commandParams(config(command[i]));
ConfigArray action = commandParams("action", "train");
// determine the action to perform, and do it
for (int j = 0; j < action.size(); j++)
{
if (action[j] == "train" || action[j] == "trainRNN")
{
wstring modelPath = commandParams("modelPath");
size_t maxEpochs = GetMaxEpochs(commandParams);
if (progressTracing)
{
LOGPRINTF(stderr, "CNTKModelPath: %ls\n", modelPath.c_str());
LOGPRINTF(stderr, "CNTKCommandTrainInfo: %s : %d\n", command[i].c_str(), (int)maxEpochs);
}
fullTotalMaxEpochs += maxEpochs;
}
}
}
if (progressTracing)
{
LOGPRINTF(stderr, "CNTKCommandTrainInfo: CNTKNoMoreCommands_Total : %d\n", (int)fullTotalMaxEpochs);
}
// set up progress tracing for compute cluster management
if (progressTracing && (!mpi || mpi->IsMainNode()))
{
ProgressTracing::SetTracingFlag();
ProgressTracing::TraceTotalNumberOfSteps(fullTotalMaxEpochs); // enable tracing, using this as the total number of epochs
}
size_t fullEpochsOffset = 0;
// execute the commands
for (int i = 0; i < command.size(); i++)
{
// get the configuration parameters that match the command
const string thisCommand = command[i];
ConfigParameters commandParams(config(thisCommand));
ConfigArray action = commandParams("action", "train");
int traceLevel = commandParams("traceLevel", "0");
if (progressTracing && ((mpi == nullptr) || mpi->IsMainNode()))
{
ProgressTracing::SetStepOffset(fullEpochsOffset); // this is the epoch number that SGD will log relative to
}
// determine the action to perform, and do it
for (int j = 0; j < action.size(); j++)
{
const string thisAction = action[j];
// print a banner to visually separate each action in the log
const char* delim = "##############################################################################";
string showActionAs = thisCommand + " command (" + thisAction + " action)";
fprintf(stderr, "\n");
LOGPRINTF(stderr, "%s\n", delim);
LOGPRINTF(stderr, "#%*s#\n", (int)(strlen(delim) - 2), "");
LOGPRINTF(stderr, "# %s%*s #\n", showActionAs.c_str(), (int)(strlen(delim) - showActionAs.size() - 4), "");
LOGPRINTF(stderr, "#%*s#\n", (int)(strlen(delim) - 2), "");
LOGPRINTF(stderr, "%s\n\n", delim);
if ((mpi == nullptr) || (commandstoRunOnAllRanks.find(thisAction) != commandstoRunOnAllRanks.end()) || mpi->IsMainNode())
{
if (thisAction == "train" || thisAction == "trainRNN")
{
if (progressTracing)
{
LOGPRINTF(stderr, "CNTKCommandTrainBegin: %s\n", command[i].c_str());
}
DoTrain<ConfigParameters, ElemType>(commandParams);
if (progressTracing)
{
//.........这里部分代码省略.........
示例11: DoWriteOutput
void DoWriteOutput(const ConfigParameters& config)
{
ConfigParameters readerConfig(config(L"reader"));
readerConfig.Insert("traceLevel", config(L"traceLevel", "0"));
readerConfig.Insert("randomize", "None"); // we don't want randomization when output results
DataReader testDataReader(readerConfig);
DEVICEID_TYPE deviceId = DeviceFromConfig(config);
ConfigArray minibatchSize = config(L"minibatchSize", "2048");
wstring modelPath = config(L"modelPath");
intargvector mbSize = minibatchSize;
size_t epochSize = config(L"epochSize", "0");
if (epochSize == 0)
{
epochSize = requestDataSize;
}
ConfigArray outputNodeNames = config(L"outputNodeNames", "");
vector<wstring> outputNodeNamesVector;
// Note this is required since the user might specify OutputNodeNames in the config, so don't use CreateFromFile,
// instead we build the network ourselves.
auto net = make_shared<ComputationNetwork>(deviceId);
net->Read<ElemType>(modelPath);
if (outputNodeNames.size() > 0)
{
net->OutputNodes().clear();
for (int i = 0; i < outputNodeNames.size(); ++i)
{
outputNodeNamesVector.push_back(outputNodeNames[i]);
net->OutputNodes().emplace_back(net->GetNodeFromName(outputNodeNames[i]));
}
}
net->CompileNetwork();
SimpleOutputWriter<ElemType> writer(net, 1);
if (config.Exists("writer"))
{
ConfigParameters writerConfig(config(L"writer"));
bool bWriterUnittest = writerConfig(L"unittest", "false");
DataWriter testDataWriter(writerConfig);
writer.WriteOutput(testDataReader, mbSize[0], testDataWriter, outputNodeNamesVector, epochSize, bWriterUnittest);
}
else if (config.Exists("outputPath"))
{
wstring outputPath = config(L"outputPath");
// gather additional formatting options
typename decltype(writer)::WriteFormattingOptions formattingOptions;
if (config.Exists("format"))
{
ConfigParameters formatConfig(config(L"format"));
if (formatConfig.ExistsCurrent("type")) // do not inherit 'type' from outer block
{
string type = formatConfig(L"type");
if (type == "real") formattingOptions.isCategoryLabel = false;
else if (type == "category") formattingOptions.isCategoryLabel = true;
else InvalidArgument("write: type must be 'real' or 'category'");
if (formattingOptions.isCategoryLabel)
formattingOptions.labelMappingFile = (wstring)formatConfig(L"labelMappingFile", L"");
}
formattingOptions.transpose = formatConfig(L"transpose", formattingOptions.transpose);
formattingOptions.prologue = formatConfig(L"prologue", formattingOptions.prologue);
formattingOptions.epilogue = formatConfig(L"epilogue", formattingOptions.epilogue);
formattingOptions.sequenceSeparator = formatConfig(L"sequenceSeparator", formattingOptions.sequenceSeparator);
formattingOptions.sequencePrologue = formatConfig(L"sequencePrologue", formattingOptions.sequencePrologue);
formattingOptions.sequenceEpilogue = formatConfig(L"sequenceEpilogue", formattingOptions.sequenceEpilogue);
formattingOptions.elementSeparator = formatConfig(L"elementSeparator", formattingOptions.elementSeparator);
formattingOptions.sampleSeparator = formatConfig(L"sampleSeparator", formattingOptions.sampleSeparator);
formattingOptions.precisionFormat = formatConfig(L"precisionFormat", formattingOptions.precisionFormat);
}
writer.WriteOutput(testDataReader, mbSize[0], outputPath, outputNodeNamesVector, formattingOptions, epochSize);
}
else
InvalidArgument("write command: You must specify either 'writer'or 'outputPath'");
}
示例12: wmainOldCNTKConfig
int wmainOldCNTKConfig(int argc, wchar_t* argv[]) // called from wmain which is a wrapper that catches & repots Win32 exceptions
{
ConfigParameters config;
std::string rawConfigString = ConfigParameters::ParseCommandLine(argc, argv, config);
// get the command param set they want
wstring logpath = config(L"stderr", L"");
// [1/26/2015 erw, add done file so that it can be used on HPC]
wstring DoneFile = config(L"DoneFile", L"");
ConfigArray command = config(L"command", "train");
// paralleltrain training
g_mpi = nullptr;
bool paralleltrain = config(L"parallelTrain", "false");
if (paralleltrain)
{
g_mpi = new MPIWrapper();
}
g_shareNodeValueMatrices = config(L"shareNodeValueMatrices", false);
TracingGPUMemoryAllocator::SetTraceLevel(config(L"traceGPUMemoryAllocations", 0));
if (logpath != L"")
{
for (int i = 0; i < command.size(); i++)
{
logpath += L"_";
logpath += (wstring) command[i];
}
logpath += L".log";
if (paralleltrain)
{
std::wostringstream oss;
oss << g_mpi->CurrentNodeRank();
logpath += L"rank" + oss.str();
}
RedirectStdErr(logpath);
}
PrintBuiltInfo(); // this one goes to log file
std::string timestamp = TimeDateStamp();
// dump config info
fprintf(stderr, "running on %s at %s\n", GetHostName().c_str(), timestamp.c_str());
fprintf(stderr, "command line: \n");
for (int i = 0; i < argc; i++)
{
fprintf(stderr, "%s ", WCharToString(argv[i]).c_str());
}
// This simply merges all the different config parameters specified (eg, via config files or via command line directly),
// and prints it.
fprintf(stderr, "\n\n>>>>>>>>>>>>>>>>>>>> RAW CONFIG (VARIABLES NOT RESOLVED) >>>>>>>>>>>>>>>>>>>>\n");
fprintf(stderr, "%s\n", rawConfigString.c_str());
fprintf(stderr, "<<<<<<<<<<<<<<<<<<<< RAW CONFIG (VARIABLES NOT RESOLVED) <<<<<<<<<<<<<<<<<<<<\n");
// Same as above, but all variables are resolved. If a parameter is set multiple times (eg, set in config, overriden at command line),
// All of these assignments will appear, even though only the last assignment matters.
fprintf(stderr, "\n>>>>>>>>>>>>>>>>>>>> RAW CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>\n");
fprintf(stderr, "%s\n", config.ResolveVariables(rawConfigString).c_str());
fprintf(stderr, "<<<<<<<<<<<<<<<<<<<< RAW CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<\n");
// This outputs the final value each variable/parameter is assigned to in config (so if a parameter is set multiple times, only the last
// value it is set to will appear).
fprintf(stderr, "\n>>>>>>>>>>>>>>>>>>>> PROCESSED CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>\n");
config.dumpWithResolvedVariables();
fprintf(stderr, "<<<<<<<<<<<<<<<<<<<< PROCESSED CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<\n");
fprintf(stderr, "command: ");
for (int i = 0; i < command.size(); i++)
{
fprintf(stderr, "%s ", command[i].c_str());
}
// run commands
std::string type = config(L"precision", "float");
// accept old precision key for backward compatibility
if (config.Exists("type"))
{
type = config(L"type", "float");
}
fprintf(stderr, "\nprecision = %s\n", type.c_str());
if (type == "float")
{
DoCommands<float>(config);
}
else if (type == "double")
{
DoCommands<double>(config);
}
else
{
RuntimeError("invalid precision specified: %s", type.c_str());
}
// still here , write a DoneFile if necessary
//.........这里部分代码省略.........
示例13: DoCommands
void DoCommands(const ConfigParameters& config)
{
ConfigArray command = config(L"command", "train");
int numCPUThreads = config(L"numCPUThreads", "0");
numCPUThreads = CPUMatrix<ElemType>::SetNumThreads(numCPUThreads);
if (numCPUThreads > 0)
{
std::cerr << "Using " << numCPUThreads << " CPU threads" << endl;
}
bool progressTracing = config(L"progressTracing", false);
// temporary hack to prevent users from failling for a small breaking change related to the "truncated" flag (will be redone bigger and better some day)
DisableLegacyUsage(config, command);
// summarize command info upfront in the log and stdout
size_t fullTotalMaxEpochs = 0;
for (int i = 0; i < command.size(); i++)
{
// get the configuration parameters that match the command
ConfigParameters commandParams(config(command[i]));
ConfigArray action = commandParams("action", "train");
// determine the action to perform, and do it
for (int j = 0; j < action.size(); j++)
{
if (action[j] == "train" || action[j] == "trainRNN")
{
wstring modelPath = commandParams("modelPath");
std::wcerr << "CNTKModelPath: " << modelPath << endl;
size_t maxEpochs = GetMaxEpochs(commandParams);
std::cerr << "CNTKCommandTrainInfo: " + command[i] << " : " << maxEpochs << endl;
fullTotalMaxEpochs += maxEpochs;
}
}
}
std::cerr << "CNTKCommandTrainInfo: CNTKNoMoreCommands_Total : " << fullTotalMaxEpochs << endl;
// set up progress tracing for compute cluster management
if (progressTracing && ((g_mpi == nullptr) || g_mpi->IsMainNode()))
{
ProgressTracing::TraceTotalNumberOfSteps(fullTotalMaxEpochs); // enable tracing, using this as the total number of epochs
}
size_t fullEpochsOffset = 0;
// execute the commands
for (int i = 0; i < command.size(); i++)
{
// get the configuration parameters that match the command
ConfigParameters commandParams(config(command[i]));
ConfigArray action = commandParams("action", "train");
if (progressTracing && ((g_mpi == nullptr) || g_mpi->IsMainNode()))
{
ProgressTracing::SetStepOffset(fullEpochsOffset); // this is the epoch number that SGD will log relative to
}
// determine the action to perform, and do it
for (int j = 0; j < action.size(); j++)
{
if (action[j] == "train" || action[j] == "trainRNN")
{
std::cerr << "CNTKCommandTrainBegin: " + command[i] << endl;
DoTrain<ConfigParameters, ElemType>(commandParams);
std::cerr << "CNTKCommandTrainEnd: " + command[i] << endl;
fullEpochsOffset += GetMaxEpochs(commandParams);
}
else if (action[j] == "adapt")
{
DoAdapt<ElemType>(commandParams);
}
else if (action[j] == "test" || action[j] == "eval")
{
DoEval<ElemType>(commandParams);
}
else if (action[j] == "edit")
{
DoEdit<ElemType>(commandParams);
}
else if (action[j] == "cv")
{
DoCrossValidate<ElemType>(commandParams);
}
else if (action[j] == "write")
{
DoWriteOutput<ElemType>(commandParams);
}
else if (action[j] == "devtest")
{
TestCn<ElemType>(config); // for "devtest" action pass the root config instead
}
else if (action[j] == "dumpnode")
{
DumpNodeInfo<ElemType>(commandParams);
}
else if (action[j] == "convertdbn")
{
//.........这里部分代码省略.........
示例14: wmainOldCNTKConfig
// ---------------------------------------------------------------------------
// main() for old CNTK config language
// ---------------------------------------------------------------------------
// called from wmain which is a wrapper that catches & repots Win32 exceptions
int wmainOldCNTKConfig(int argc, wchar_t* argv[])
{
ConfigParameters config;
std::string rawConfigString = ConfigParameters::ParseCommandLine(argc, argv, config); // get the command param set they want
bool timestamping = config(L"timestamping", false);
if (timestamping)
{
ProgressTracing::SetTimestampingFlag();
}
// get the command param set they want
wstring logpath = config(L"stderr", L"");
// [1/26/2015 erw, add done file so that it can be used on HPC]
wstring DoneFile = config(L"DoneFile", L"");
ConfigArray command = config(L"command", "train");
// paralleltrain training
shared_ptr<Microsoft::MSR::CNTK::MPIWrapper> mpi;
bool paralleltrain = config(L"parallelTrain", "false");
if (paralleltrain)
mpi = MPIWrapper::GetInstance(true /*create*/);
g_shareNodeValueMatrices = config(L"shareNodeValueMatrices", false);
TracingGPUMemoryAllocator::SetTraceLevel(config(L"traceGPUMemoryAllocations", 0));
if (logpath != L"")
{
for (int i = 0; i < command.size(); i++)
{
logpath += L"_";
logpath += (wstring) command[i];
}
logpath += L".log";
if (paralleltrain)
{
std::wostringstream oss;
oss << mpi->CurrentNodeRank();
logpath += L"rank" + oss.str();
}
RedirectStdErr(logpath);
}
PrintBuiltInfo(); // this one goes to log file
std::string timestamp = TimeDateStamp();
// dump config info
fprintf(stderr, "\n");
LOGPRINTF(stderr, "Running on %s at %s\n", GetHostName().c_str(), timestamp.c_str());
LOGPRINTF(stderr, "Command line: \n");
for (int i = 0; i < argc; i++)
fprintf(stderr, "%*s%ls", i > 0 ? 2 : 0, "", argv[i]); // use 2 spaces for better visual separability
fprintf(stderr, "\n\n");
#if 1 //def _DEBUG
// This simply merges all the different config parameters specified (eg, via config files or via command line directly),
// and prints it.
fprintf(stderr, "\n\n");
LOGPRINTF(stderr, ">>>>>>>>>>>>>>>>>>>> RAW CONFIG (VARIABLES NOT RESOLVED) >>>>>>>>>>>>>>>>>>>>\n");
LOGPRINTF(stderr, "%s\n", rawConfigString.c_str());
LOGPRINTF(stderr, "<<<<<<<<<<<<<<<<<<<< RAW CONFIG (VARIABLES NOT RESOLVED) <<<<<<<<<<<<<<<<<<<<\n");
// Same as above, but all variables are resolved. If a parameter is set multiple times (eg, set in config, overridden at command line),
// All of these assignments will appear, even though only the last assignment matters.
fprintf(stderr, "\n");
LOGPRINTF(stderr, ">>>>>>>>>>>>>>>>>>>> RAW CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>\n");
LOGPRINTF(stderr, "%s\n", config.ResolveVariables(rawConfigString).c_str());
LOGPRINTF(stderr, "<<<<<<<<<<<<<<<<<<<< RAW CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<\n");
// This outputs the final value each variable/parameter is assigned to in config (so if a parameter is set multiple times, only the last
// value it is set to will appear).
fprintf(stderr, "\n");
LOGPRINTF(stderr, ">>>>>>>>>>>>>>>>>>>> PROCESSED CONFIG WITH ALL VARIABLES RESOLVED >>>>>>>>>>>>>>>>>>>>\n");
config.dumpWithResolvedVariables();
LOGPRINTF(stderr, "<<<<<<<<<<<<<<<<<<<< PROCESSED CONFIG WITH ALL VARIABLES RESOLVED <<<<<<<<<<<<<<<<<<<<\n");
#endif
LOGPRINTF(stderr, "Commands:");
for (int i = 0; i < command.size(); i++)
fprintf(stderr, " %s", command[i].c_str());
fprintf(stderr, "\n");
// run commands
std::string type = config(L"precision", "float");
// accept old precision key for backward compatibility
if (config.Exists("type"))
InvalidArgument("CNTK: Use of 'type' parameter is deprecated, it is called 'precision' now.");
LOGPRINTF(stderr, "Precision = \"%s\"\n", type.c_str());
if (type == "float")
DoCommands<float>(config, mpi);
else if (type == "double")
DoCommands<double>(config, mpi);
//.........这里部分代码省略.........
示例15: TestConfiguration
void TestConfiguration(const ConfigParameters& configBase)
{
ConfigParameters configMacros = configBase("macroExample");
for (auto iterMacro = configMacros.begin(); iterMacro != configMacros.end(); iterMacro++)
{
std::map<std::string, ConfigValue> paramsMap;
ConfigParameters configCN = iterMacro->second;
if (configCN.Exists("parameters"))
{
ConfigArray params = configCN("parameters");
for (int i = 0; i < params.size(); ++i)
paramsMap[params[i]] = ConfigValue("uninitialized");
}
ConfigParameters configNodes = configCN("NodeList");
for (auto iter = configNodes.begin();
iter != configNodes.end(); iter++)
{
std::wstring nodeName;
nodeName = msra::strfun::utf16(iter->first);
ConfigArray configNode = iter->second;
std::string opName = configNode[0];
if (IsParameter(paramsMap, opName))
{
;
}
if (opName == "InputValue" && configNode.size() >= 2)
{
size_t rows = 0;
if (!IsParameter(paramsMap, configNode[1]))
rows = configNode[1];
}
else if (opName == "LearnableParameter" && configNode.size() >= 3)
{
size_t rows = 0;
if (!IsParameter(paramsMap, configNode[1]))
rows = configNode[1];
size_t cols = 0;
if (!IsParameter(paramsMap, configNode[2]))
cols = configNode[2];
bool learningRateMultiplier = 0;
bool init = false;
ConfigArray initData;
// look for optional parameters
for (int i = 3; i < configNode.size(); ++i)
{
bool needsGradient = false;
ConfigParameters configParam = configNode[i];
if (configParam.Exists("learningRateMultiplier")) // TODO: should this be a test for 'true' rather than Exists()?
needsGradient = (float)configParam("learningRateMultiplier") > 0? true : false;
else if (configParam.Exists("init"))
{
init = true;
initData = configParam["init"];
}
}
// if initializing, do so now
if (init)
{
bool uniform = true;
ElemType initValueScale = 1;
size_t inputSize = cols;
if (initData.size() > 0)
initValueScale = initData[0];
if (initData.size() > 1)
uniform = EqualCI(initData[1], "uniform");
}
}
}
// now link up all the nodes
configNodes = configCN("Relation");
for (auto iter = configNodes.begin(); iter != configNodes.end(); iter++)
{
std::wstring nodeName = msra::strfun::utf16(iter->first);
ConfigArray configNode = iter->second;
int numChildren = (int) configNode.size();
for (int i = 0; i < numChildren; ++i)
{
std::wstring nodeName = configNode[i];
}
}
ConfigParameters configRoots = configCN("RootNodes");
ConfigArray configNode = configRoots("FeatureNodes");
for (size_t i = 0; i < configNode.size(); i++)
{
std::wstring nodeName = configNode[i];
}
if (configRoots.Exists("LabelNodes"))
{
configNode = configRoots("LabelNodes");
for (size_t i = 0; i < configNode.size(); i++)
{
std::wstring nodeName = configNode[i];
}
}
//.........这里部分代码省略.........