本文整理汇总了C++中FileNode::size方法的典型用法代码示例。如果您正苦于以下问题:C++ FileNode::size方法的具体用法?C++ FileNode::size怎么用?C++ FileNode::size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类FileNode
的用法示例。
在下文中一共展示了FileNode::size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: deserialize
void ImageMolecule::deserialize(const cv::FileNode& fn)
{
FileNode atoms = fn["atoms"];
CV_Assert(atoms.type() == FileNode::SEQ);
std::map<int, Ptr<ImageAtom> > a_map;
for (size_t i = 0; i < atoms.size(); i++)
{
Ptr<ImageAtom> atom(new ImageAtom);
atom->deserialize(atoms[i]);
a_map[atom->uid()] = atom;
//we will insert from pairs...
insertAtom(atom);
}
FileNode pairs = fn["pairs"];
CV_Assert(pairs.type() == FileNode::SEQ);
vector<AtomPair> pairs_temp;
pairs_temp.resize(pairs.size());
for (size_t i = 0; i < pairs.size(); i++)
{
pairs_temp[i].deserialize(pairs[i]);
pairs_temp[i].setAtom1(a_map[pairs_temp[i].atom1()->uid()]);
pairs_temp[i].setAtom2(a_map[pairs_temp[i].atom2()->uid()]);
}
insertPairs(pairs_temp);
}
示例2: read
void CvCascadeBoostTree::read( const FileNode &node, CvBoost* _ensemble,
CvDTreeTrainData* _data )
{
int maxCatCount = ((CvCascadeBoostTrainData*)_data)->featureEvaluator->getMaxCatCount();
int subsetN = (maxCatCount + 31)/32;
int step = 3 + ( maxCatCount>0 ? subsetN : 1 );
queue<CvDTreeNode*> internalNodesQueue;
FileNodeIterator internalNodesIt, leafValsuesIt;
CvDTreeNode* prntNode, *cldNode;
clear();
data = _data;
ensemble = _ensemble;
pruned_tree_idx = 0;
// read tree nodes
FileNode rnode = node[CC_INTERNAL_NODES];
internalNodesIt = rnode.end();
leafValsuesIt = node[CC_LEAF_VALUES].end();
internalNodesIt--; leafValsuesIt--;
for( size_t i = 0; i < rnode.size()/step; i++ )
{
prntNode = data->new_node( 0, 0, 0, 0 );
if ( maxCatCount > 0 )
{
prntNode->split = data->new_split_cat( 0, 0 );
for( int j = subsetN-1; j>=0; j--)
{
*internalNodesIt >> prntNode->split->subset[j]; internalNodesIt--;
}
}
else
{
示例3: ReadRightRects
void ReadRightRects(vector<ImageRecognition::SlidingRect> &rightRects, const string &xml_filename, RecognitionStatistics &stat)
{
using namespace Utils;
FileStorage file_storage(xml_filename, FileStorage::READ);
FileNode images = file_storage["images"];
rightRects.reserve(images.size());
for (FileNodeIterator it = images.begin(); it != images.end(); ++it)
{
string part_filename = string(*it);
int dot_pos = part_filename.find_first_of('.');
if (dot_pos != -1)
part_filename = part_filename.substr(0, dot_pos);
stringstream ss(part_filename);
vector<string> parts;
string part;
while (getline(ss, part, '_'))
parts.push_back(part);
rightRects.push_back(ImageRecognition::SlidingRect());
int last = parts.size() - 1;
rightRects.back().rect.x = str2int(parts[last - 3]);
rightRects.back().rect.y = str2int(parts[last - 2]);
rightRects.back().rect.width = str2int(parts[last - 1]);
rightRects.back().rect.height = str2int(parts[last]);
}
}
示例4: loadHist
void loadHist(mH2& hist){
FileStorage fs("test123.xml", FileStorage::READ);
FileNode n = fs["ModelHistograms"];
// Loop through Classes
for(int i=0;i<n.size();i++){
stringstream ss;
ss << "Class_";
ss << i;
string a = ss.str();
FileNode n1 = n[a];
// Loop through Each classes Models
for(int j = 0; j < n1.size(); j++){
stringstream ss1;
ss1 << "Model_";
ss1 << j;
string b = ss1.str();
FileNode n2 = n1[b];
// Save stored Mat to mask
FileNodeIterator it = n2.begin(), it_end = n2.end();
for(;it != it_end;++it){
Mat mask;
(*it) >> hist[i][j];
}
}
}
fs.release();
}
示例5: load
void Expression::load(std::string filename) {
FileStorage fs(ofToDataPath(filename), FileStorage::READ);
description = (std::string) fs["description"];
FileNode samplesNode = fs["samples"];
int n = samplesNode.size();
samples.resize(n);
for(int i = 0; i < n; i++) {
samplesNode[i] >> samples[i];
}
}
示例6: readMatBinary
void NMPTUtils::readMatBinary(const FileNode &tm, Mat &mat) {
//FileNode tm = fs[name];
int rows = (int)tm["rows"], cols = (int)tm["cols"], type = (int)tm["type"];
mat.create(rows,cols,type);
if (rows > 0 && cols > 0) {
vector<string> vs;
FileNode tl = tm["data"];
//std::cout << tl.type() << std::endl;
CV_Assert(tl.type() == FileNode::SEQ);
vs.resize(tl.size());
for (size_t i = 0; i < tl.size(); i++) {
tl[i] >> vs[i];
}
// CV_Assert(tl.size() == (size_t)numRegs);
// tm["data"] >> vs;
string s;
joinString(vs, s);
asciiToBinary(s, mat.data, mat.rows*mat.step);
}
示例7: read
bool HOGEvaluator::read( const FileNode& node )
{
features->resize(node.size());
featuresPtr = &(*features)[0];
FileNodeIterator it = node.begin(), it_end = node.end();
for(int i = 0; it != it_end; ++it, i++)
{
if(!featuresPtr[i].read(*it))
return false;
}
return true;
}
示例8: readStages
bool CvCascadeClassifier::readStages( const FileNode &node)
{
FileNode rnode = node[CC_STAGES];
if (!rnode.empty() || !rnode.isSeq())
return false;
stageClassifiers.reserve(numStages);
FileNodeIterator it = rnode.begin();
for( int i = 0; i < min( (int)rnode.size(), numStages ); i++, it++ )
{
Ptr<CvCascadeBoost> tempStage = makePtr<CvCascadeBoost>();
if ( !tempStage->read( *it, featureEvaluator, *stageParams) )
return false;
stageClassifiers.push_back(tempStage);
}
return true;
}
示例9: readRunParams
virtual int readRunParams( FileStorage& fs )
{
int code = CV_StereoMatchingTest::readRunParams(fs);
FileNode fn = fs.getFirstTopLevelNode();
assert(fn.isSeq());
for( int i = 0; i < (int)fn.size(); i+=4 )
{
string caseName = fn[i], datasetName = fn[i+1];
RunParams params;
string ndisp = fn[i+2]; params.ndisp = atoi(ndisp.c_str());
string iterCount = fn[i+3]; params.iterCount = atoi(iterCount.c_str());
caseNames.push_back( caseName );
caseDatasets.push_back( datasetName );
caseRunParams.push_back( params );
}
return code;
}
示例10: read
void read(const FileNode& fn)
{
clear();
read_params(fn["training_params"]);
fn["weights"] >> weights;
fn["means"] >> means;
FileNode cfn = fn["covs"];
FileNodeIterator cfn_it = cfn.begin();
int i, n = (int)cfn.size();
covs.resize(n);
for( i = 0; i < n; i++, ++cfn_it )
(*cfn_it) >> covs[i];
decomposeCovs();
computeLogWeightDivDet();
}
示例11: readStages
bool CvCascadeClassifier::readStages( const FileNode &node)
{
FileNode rnode = node[CC_STAGES];
if (!rnode.empty() || !rnode.isSeq())
return false;
stageClassifiers.reserve(numStages);
FileNodeIterator it = rnode.begin();
for( int i = 0; i < min( (int)rnode.size(), numStages ); i++, it++ )
{
CvCascadeBoost* tempStage = new CvCascadeBoost;
if ( !tempStage->read( *it, (CvFeatureEvaluator *)featureEvaluator, *((CvCascadeBoostParams*)stageParams) ) )
{
delete tempStage;
return false;
}
stageClassifiers.push_back(tempStage);
}
return true;
}
示例12: readDatasetsParams
int CV_StereoMatchingTest::readDatasetsParams( FileStorage& fs )
{
if( !fs.isOpened() )
{
ts->printf( CvTS::LOG, "datasetsParams can not be read " );
return CvTS::FAIL_INVALID_TEST_DATA;
}
datasetsParams.clear();
FileNode fn = fs.getFirstTopLevelNode();
assert(fn.isSeq());
for( int i = 0; i < (int)fn.size(); i+=3 )
{
string name = fn[i];
DatasetParams params;
string sf = fn[i+1]; params.dispScaleFactor = atoi(sf.c_str());
string uv = fn[i+2]; params.dispUnknVal = atoi(uv.c_str());
datasetsParams[name] = params;
}
return CvTS::OK;
}
示例13: read_params
int CV_MLBaseTest::read_params( CvFileStorage* __fs )
{
FileStorage _fs(__fs, false);
if( !_fs.isOpened() )
test_case_count = -1;
else
{
FileNode fn = _fs.getFirstTopLevelNode()["run_params"][modelName];
test_case_count = (int)fn.size();
if( test_case_count <= 0 )
test_case_count = -1;
if( test_case_count > 0 )
{
dataSetNames.resize( test_case_count );
FileNodeIterator it = fn.begin();
for( int i = 0; i < test_case_count; i++, ++it )
{
dataSetNames[i] = (string)*it;
}
}
}
return cvtest::TS::OK;;
}
示例14: run
void run(int)
{
double ranges[][2] = {{0, 256}, {-128, 128}, {0, 65536}, {-32768, 32768},
{-1000000, 1000000}, {-10, 10}, {-10, 10}};
RNG& rng = ts->get_rng();
RNG rng0;
test_case_count = 4;
int progress = 0;
MemStorage storage(cvCreateMemStorage(0));
for( int idx = 0; idx < test_case_count; idx++ )
{
ts->update_context( this, idx, false );
progress = update_progress( progress, idx, test_case_count, 0 );
cvClearMemStorage(storage);
bool mem = (idx % 4) >= 2;
string filename = tempfile(idx % 2 ? ".yml" : ".xml");
FileStorage fs(filename, FileStorage::WRITE + (mem ? FileStorage::MEMORY : 0));
int test_int = (int)cvtest::randInt(rng);
double test_real = (cvtest::randInt(rng)%2?1:-1)*exp(cvtest::randReal(rng)*18-9);
string test_string = "vw wv23424rt\"&<>&'@#[email protected]$%$%&%[email protected]#[email protected]%$&*&() ";
int depth = cvtest::randInt(rng) % (CV_64F+1);
int cn = cvtest::randInt(rng) % 4 + 1;
Mat test_mat(cvtest::randInt(rng)%30+1, cvtest::randInt(rng)%30+1, CV_MAKETYPE(depth, cn));
rng0.fill(test_mat, CV_RAND_UNI, Scalar::all(ranges[depth][0]), Scalar::all(ranges[depth][1]));
if( depth >= CV_32F )
{
exp(test_mat, test_mat);
Mat test_mat_scale(test_mat.size(), test_mat.type());
rng0.fill(test_mat_scale, CV_RAND_UNI, Scalar::all(-1), Scalar::all(1));
multiply(test_mat, test_mat_scale, test_mat);
}
CvSeq* seq = cvCreateSeq(test_mat.type(), (int)sizeof(CvSeq),
(int)test_mat.elemSize(), storage);
cvSeqPushMulti(seq, test_mat.data, test_mat.cols*test_mat.rows);
CvGraph* graph = cvCreateGraph( CV_ORIENTED_GRAPH,
sizeof(CvGraph), sizeof(CvGraphVtx),
sizeof(CvGraphEdge), storage );
int edges[][2] = {{0,1},{1,2},{2,0},{0,3},{3,4},{4,1}};
int i, vcount = 5, ecount = 6;
for( i = 0; i < vcount; i++ )
cvGraphAddVtx(graph);
for( i = 0; i < ecount; i++ )
{
CvGraphEdge* edge;
cvGraphAddEdge(graph, edges[i][0], edges[i][1], 0, &edge);
edge->weight = (float)(i+1);
}
depth = cvtest::randInt(rng) % (CV_64F+1);
cn = cvtest::randInt(rng) % 4 + 1;
int sz[] = {cvtest::randInt(rng)%10+1, cvtest::randInt(rng)%10+1, cvtest::randInt(rng)%10+1};
MatND test_mat_nd(3, sz, CV_MAKETYPE(depth, cn));
rng0.fill(test_mat_nd, CV_RAND_UNI, Scalar::all(ranges[depth][0]), Scalar::all(ranges[depth][1]));
if( depth >= CV_32F )
{
exp(test_mat_nd, test_mat_nd);
MatND test_mat_scale(test_mat_nd.dims, test_mat_nd.size, test_mat_nd.type());
rng0.fill(test_mat_scale, CV_RAND_UNI, Scalar::all(-1), Scalar::all(1));
multiply(test_mat_nd, test_mat_scale, test_mat_nd);
}
int ssz[] = {cvtest::randInt(rng)%10+1, cvtest::randInt(rng)%10+1,
cvtest::randInt(rng)%10+1,cvtest::randInt(rng)%10+1};
SparseMat test_sparse_mat = cvTsGetRandomSparseMat(4, ssz, cvtest::randInt(rng)%(CV_64F+1),
cvtest::randInt(rng) % 10000, 0, 100, rng);
fs << "test_int" << test_int << "test_real" << test_real << "test_string" << test_string;
fs << "test_mat" << test_mat;
fs << "test_mat_nd" << test_mat_nd;
fs << "test_sparse_mat" << test_sparse_mat;
fs << "test_list" << "[" << 0.0000000000001 << 2 << CV_PI << -3435345 << "2-502 2-029 3egegeg" <<
"{:" << "month" << 12 << "day" << 31 << "year" << 1969 << "}" << "]";
fs << "test_map" << "{" << "x" << 1 << "y" << 2 << "width" << 100 << "height" << 200 << "lbp" << "[:";
const uchar arr[] = {0, 1, 1, 0, 1, 1, 0, 1};
fs.writeRaw("u", arr, (int)(sizeof(arr)/sizeof(arr[0])));
fs << "]" << "}";
cvWriteComment(*fs, "test comment", 0);
fs.writeObj("test_seq", seq);
fs.writeObj("test_graph",graph);
CvGraph* graph2 = (CvGraph*)cvClone(graph);
string content = fs.releaseAndGetString();
if(!fs.open(mem ? content : filename, FileStorage::READ + (mem ? FileStorage::MEMORY : 0)))
{
ts->printf( cvtest::TS::LOG, "filename %s can not be read\n", !mem ? filename.c_str() : content.c_str());
//.........这里部分代码省略.........
示例15: if
bool CascadeClassifier::Data::read(const FileNode &root)
{
static const float THRESHOLD_EPS = 1e-5f;
// load stage params
String stageTypeStr = (String)root[CC_STAGE_TYPE];
if( stageTypeStr == CC_BOOST )
stageType = BOOST;
else
return false;
String featureTypeStr = (String)root[CC_FEATURE_TYPE];
if( featureTypeStr == CC_HAAR )
featureType = FeatureEvaluator::HAAR;
else if( featureTypeStr == CC_LBP )
featureType = FeatureEvaluator::LBP;
else if( featureTypeStr == CC_HOG )
featureType = FeatureEvaluator::HOG;
else
return false;
origWinSize.width = (int)root[CC_WIDTH];
origWinSize.height = (int)root[CC_HEIGHT];
CV_Assert( origWinSize.height > 0 && origWinSize.width > 0 );
isStumpBased = (int)(root[CC_STAGE_PARAMS][CC_MAX_DEPTH]) == 1 ? true : false;
// load feature params
FileNode fn = root[CC_FEATURE_PARAMS];
if( fn.empty() )
return false;
ncategories = fn[CC_MAX_CAT_COUNT];
int subsetSize = (ncategories + 31)/32,
nodeStep = 3 + ( ncategories>0 ? subsetSize : 1 );
// load stages
fn = root[CC_STAGES];
if( fn.empty() )
return false;
stages.reserve(fn.size());
classifiers.clear();
nodes.clear();
FileNodeIterator it = fn.begin(), it_end = fn.end();
for( int si = 0; it != it_end; si++, ++it )
{
FileNode fns = *it;
Stage stage;
stage.threshold = (float)fns[CC_STAGE_THRESHOLD] - THRESHOLD_EPS;
fns = fns[CC_WEAK_CLASSIFIERS];
if(fns.empty())
return false;
stage.ntrees = (int)fns.size();
stage.first = (int)classifiers.size();
stages.push_back(stage);
classifiers.reserve(stages[si].first + stages[si].ntrees);
FileNodeIterator it1 = fns.begin(), it1_end = fns.end();
for( ; it1 != it1_end; ++it1 ) // weak trees
{
FileNode fnw = *it1;
FileNode internalNodes = fnw[CC_INTERNAL_NODES];
FileNode leafValues = fnw[CC_LEAF_VALUES];
if( internalNodes.empty() || leafValues.empty() )
return false;
DTree tree;
tree.nodeCount = (int)internalNodes.size()/nodeStep;
classifiers.push_back(tree);
nodes.reserve(nodes.size() + tree.nodeCount);
leaves.reserve(leaves.size() + leafValues.size());
if( subsetSize > 0 )
subsets.reserve(subsets.size() + tree.nodeCount*subsetSize);
FileNodeIterator internalNodesIter = internalNodes.begin(), internalNodesEnd = internalNodes.end();
for( ; internalNodesIter != internalNodesEnd; ) // nodes
{
DTreeNode node;
node.left = (int)*internalNodesIter; ++internalNodesIter;
node.right = (int)*internalNodesIter; ++internalNodesIter;
node.featureIdx = (int)*internalNodesIter; ++internalNodesIter;
if( subsetSize > 0 )
{
for( int j = 0; j < subsetSize; j++, ++internalNodesIter )
subsets.push_back((int)*internalNodesIter);
node.threshold = 0.f;
}
else
{
node.threshold = (float)*internalNodesIter; ++internalNodesIter;
}
nodes.push_back(node);
}
//.........这里部分代码省略.........