本文整理汇总了C++中OptionParser::HelpRequested方法的典型用法代码示例。如果您正苦于以下问题:C++ OptionParser::HelpRequested方法的具体用法?C++ OptionParser::HelpRequested怎么用?C++ OptionParser::HelpRequested使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类OptionParser
的用法示例。
在下文中一共展示了OptionParser::HelpRequested方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
// ****************************************************************************
// Method: main()
//
// Purpose:
// serial and parallel main for OpenCL level0 benchmarks
//
// Arguments:
// argc, argv
//
// Programmer: SHOC Team
// Creation: The Epoch
//
// Modifications:
// Jeremy Meredith, Tue Jan 12 15:09:33 EST 2010
// Changed the way device selection works. It now defaults to the device
// index corresponding to the process's rank within a node if no devices
// are specified on the command command line, and otherwise, round-robins
// the list of devices among the tasks.
//
// Gabriel Marin, Tue Jun 01 15:38 EST 2010
// Check that we have valid (not NULL) context and queue objects before
// running the benchmarks. Errors inside CreateContextFromSingleDevice or
// CreateCommandQueueForContextAndDevice were not propagated out to the main
// program.
//
// Jeremy Meredith, Wed Nov 10 14:20:47 EST 2010
// Split timing reports into detailed and summary. For serial code, we
// report all trial values, and for parallel, skip the per-process vals.
// Also detect and print outliers from parallel runs.
//
// ****************************************************************************
int main(int argc, char *argv[])
{
int ret = 0;
try
{
#ifdef PARALLEL
int rank, size;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
cout << "MPI Task "<< rank << "/" << size - 1 << " starting....\n";
#endif
OptionParser op;
//Add shared options to the parser
op.addOption("platform", OPT_INT, "0", "specify OpenCL platform to use",
'p');
op.addOption("device", OPT_VECINT, "", "specify device(s) to run on", 'd');
op.addOption("passes", OPT_INT, "10", "specify number of passes", 'n');
op.addOption("size", OPT_VECINT, "1", "specify problem size", 's');
op.addOption("infoDevices", OPT_BOOL, "",
"show info for available platforms and devices", 'i');
op.addOption("verbose", OPT_BOOL, "", "enable verbose output", 'v');
op.addOption("quiet", OPT_BOOL, "", "write minimum necessary to standard output", 'q');
addBenchmarkSpecOptions(op);
if (!op.parse(argc, argv))
{
#ifdef PARALLEL
if (rank == 0)
op.usage();
MPI_Finalize();
#else
op.usage();
#endif
return (op.HelpRequested() ? 0 : 1 );
}
if (op.getOptionBool("infoDevices"))
{
#define DEBUG_DEVICE_CONTAINER 0
#ifdef PARALLEL
// execute following code only if I am the process of lowest
// rank on this node
NodeInfo NI;
int mynoderank = NI.nodeRank();
if (mynoderank==0)
{
int nlrrank, nlrsize;
MPI_Comm nlrcomm = NI.getNLRComm();
MPI_Comm_size(nlrcomm, &nlrsize);
MPI_Comm_rank(nlrcomm, &nlrrank);
OpenCLNodePlatformContainer ndc1;
OpenCLMultiNodeContainer localMnc(ndc1);
localMnc.doMerge (nlrrank, nlrsize, nlrcomm);
if (rank==0) // I am the global rank 0, print all configurations
localMnc.Print (cout);
}
#else
OpenCLNodePlatformContainer ndc1;
ndc1.Print (cout);
#if DEBUG_DEVICE_CONTAINER
OpenCLMultiNodeContainer mnc1(ndc1), mnc2;
mnc1.Print (cout);
ostringstream oss;
//.........这里部分代码省略.........
示例2: main
// ****************************************************************************
// Function: main
//
// Purpose:
// The main function takes care of initialization (device and MPI), then
// performs the benchmark and prints results.
//
// Arguments:
//
//
// Programmer: Jeremy Meredith
// Creation:
//
// Modifications:
// Jeremy Meredith, Wed Nov 10 14:20:47 EST 2010
// Split timing reports into detailed and summary. For serial code, we
// report all trial values, and for parallel, skip the per-process vals.
// Also detect and print outliers from parallel runs.
//
// ****************************************************************************
int main(int argc, char *argv[])
{
int ret = 0;
bool noprompt = false;
try
{
#ifdef PARALLEL
int rank, size;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
cerr << "MPI Task " << rank << "/" << size - 1 << " starting....\n";
#endif
// Get args
OptionParser op;
//Add shared options to the parser
op.addOption("device", OPT_VECINT, "0",
"specify device(s) to run on", 'd');
op.addOption("verbose", OPT_BOOL, "", "enable verbose output", 'v');
op.addOption("passes", OPT_INT, "10", "specify number of passes", 'n');
op.addOption("size", OPT_INT, "1", "specify problem size", 's');
op.addOption("infoDevices", OPT_BOOL, "",
"show info for available platforms and devices", 'i');
op.addOption("quiet", OPT_BOOL, "", "write minimum necessary to standard output", 'q');
#ifdef _WIN32
op.addOption("noprompt", OPT_BOOL, "", "don't wait for prompt at program exit");
#endif
addBenchmarkSpecOptions(op);
if (!op.parse(argc, argv))
{
#ifdef PARALLEL
if (rank == 0)
op.usage();
MPI_Finalize();
#else
op.usage();
#endif
return (op.HelpRequested() ? 0 : 1);
}
bool verbose = op.getOptionBool("verbose");
bool infoDev = op.getOptionBool("infoDevices");
#ifdef _WIN32
noprompt = op.getOptionBool("noprompt");
#endif
int device;
#ifdef PARALLEL
NodeInfo ni;
int myNodeRank = ni.nodeRank();
vector<long long> deviceVec = op.getOptionVecInt("device");
if (myNodeRank >= deviceVec.size()) {
// Default is for task i to test device i
device = myNodeRank;
} else {
device = deviceVec[myNodeRank];
}
#else
device = op.getOptionVecInt("device")[0];
#endif
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (device >= deviceCount) {
cerr << "Warning: device index: " << device <<
" out of range, defaulting to device 0.\n";
device = 0;
}
// Initialization
EnumerateDevicesAndChoose(device, infoDev);
if( infoDev )
{
return 0;
}
ResultDatabase resultDB;
//.........这里部分代码省略.........