当前位置: 首页>>代码示例>>C++>>正文


C++ AnnotationData类代码示例

本文整理汇总了C++中AnnotationData的典型用法代码示例。如果您正苦于以下问题:C++ AnnotationData类的具体用法?C++ AnnotationData怎么用?C++ AnnotationData使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了AnnotationData类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: dump

//***********************************************************************
// main function for outputing the graph
//***********************************************************************
void AnnotationGraphXmlDumper::dump(
    std::ostream& os,
    const AnnotationGraph* graph,
    const AnnotationData& annotationData) const
{
  DumpGraphVisitor vis(os, annotationData);

  os << "<annot-graph>" << std::endl;
//   Color color;
  boost::depth_first_search(*graph, boost::visitor(vis));
  os << "  <matchings>" << std::endl;
  std::map<StringsPoolIndex, std::multimap<AnnotationGraphVertex, AnnotationGraphVertex> >::const_iterator it, it_end;
  it = annotationData.matchings().begin();
  it_end = annotationData.matchings().end();
  for (; it != it_end; it++)
  {
    const std::multimap<AnnotationGraphVertex, AnnotationGraphVertex>& matching = (*it).second;
    os << "    <matching id=\"" << Misc::limastring2utf8stdstring(annotationData.annotationName((*it).first)) << "\">" << std::endl;
    std::multimap<AnnotationGraphVertex, AnnotationGraphVertex>::const_iterator sit, sit_end;
    sit = matching.begin(); sit_end = matching.end();
    for (; sit != sit_end; sit++)
    {
      os << "      <pair k=\""<<(*sit).first<<"\" v=\""<<(*sit).second<<"\"/>" << std::endl;
    }
    os << "    </matching>" << std::endl;
  }
  os << "  </matchings>" << std::endl;
  os << "</annot-graph>" << std::endl;
}
开发者ID:clemance,项目名称:lima,代码行数:32,代码来源:AnnotationGraphXmlDumper.cpp

示例2: getOrCreateAnnotationData

void
KML_Feature::build( const Config& conf, KMLContext& cx, osg::Node* working )
{
    KML_Object::build(conf, cx, working);

    // subclass feature is built; now add feature level data if available
    if ( working )
    {
        // parse the visibility to show/hide the item by default:
        if ( conf.hasValue("visibility") )
            working->setNodeMask( conf.value<bool>("visibility",true) == true ? ~0 : 0 );

        // parse a "LookAt" element (stores a viewpoint)
        AnnotationData* anno = getOrCreateAnnotationData(working);
        
        anno->setName( conf.value("name") );
        anno->setDescription( conf.value("description") );

        const Config& lookat = conf.child("lookat");
        if ( !lookat.empty() )
        {
            Viewpoint vp(
                lookat.value<double>("longitude", 0.0),
                lookat.value<double>("latitude", 0.0),
                lookat.value<double>("altitude", 0.0),
                lookat.value<double>("heading", 0.0),
                -lookat.value<double>("tilt", 45.0),
                lookat.value<double>("range", 10000.0) );

            anno->setViewpoint( vp );
        }
    }
}
开发者ID:airwzz999,项目名称:osgearth-for-android,代码行数:33,代码来源:KML_Feature.cpp

示例3: makePlaceNode

    osg::Node* makePlaceNode(FilterContext&     context,
                             Feature*           feature, 
                             const Style&       style, 
                             NumericExpression& priorityExpr )
    {
        osg::Vec3d center = feature->getGeometry()->getBounds().center();

        AltitudeMode mode = ALTMODE_ABSOLUTE;        

        const AltitudeSymbol* alt = style.getSymbol<AltitudeSymbol>();
        if (alt &&
           (alt->clamping() == AltitudeSymbol::CLAMP_TO_TERRAIN || alt->clamping() == AltitudeSymbol::CLAMP_RELATIVE_TO_TERRAIN) &&
           alt->technique() == AltitudeSymbol::TECHNIQUE_SCENE)
        {
            mode = ALTMODE_RELATIVE;
        }                              

        GeoPoint point(feature->getSRS(), center.x(), center.y(), center.z(), mode);        

        PlaceNode* node = new PlaceNode(0L, point, style, context.getDBOptions());

        if ( !priorityExpr.empty() )
        {
            AnnotationData* data = new AnnotationData();
            data->setPriority( feature->eval(priorityExpr, &context) );
            node->setAnnotationData( data );
        }

        return node;
    }
开发者ID:omega-hub,项目名称:osgearth,代码行数:30,代码来源:AnnotationLabelSource.cpp

示例4: getValue

void
KML_Feature::build( xml_node<>* node, KMLContext& cx, osg::Node* working )
{
    KML_Object::build(node, cx, working);

    // subclass feature is built; now add feature level data if available
    if ( working )
    {
        // parse the visibility to show/hide the item by default:
		std::string visibility = getValue(node, "visibility");
        if ( !visibility.empty() )
            working->setNodeMask( as<int>(visibility, 1) == 1 ? ~0 : 0 );

        // parse a "LookAt" element (stores a viewpoint)
        AnnotationData* anno = getOrCreateAnnotationData(working);
        
        anno->setName( getValue(node, "name") );
        anno->setDescription( getValue(node, "description") );

        xml_node<>* lookat = node->first_node("lookat", 0, false);
        if ( lookat )
        {
            Viewpoint vp;

            vp.focalPoint() = GeoPoint(
                cx._srs.get(),
				as<double>(getValue(lookat, "longitude"), 0.0),
				as<double>(getValue(lookat, "latitude"), 0.0),
				as<double>(getValue(lookat, "altitude"), 0.0),
                ALTMODE_ABSOLUTE );

            vp.heading() =  as<double>(getValue(lookat, "heading"), 0.0);
            vp.pitch()   = -as<double>(getValue(lookat, "tilt"), 45.0),
            vp.range()   =  as<double>(getValue(lookat, "range"), 10000.0);

            anno->setViewpoint( vp );
        }

        xml_node<>* extdata = node->first_node("extendeddata", 0, false);
        if ( extdata )
        {
            xml_node<>* data = extdata->first_node("data", 0, false);
            if ( data )
            {
			    for (xml_node<>* n = data->first_node(); n; n = n->next_sibling())
			    {
    				working->setUserValue(getValue(n, "name"), getValue(n, "value"));
			    }
            }
        }
    }
}
开发者ID:Brucezhou1979,项目名称:osgearth,代码行数:52,代码来源:KML_Feature.cpp

示例5: createTrack

TrackNode* createTrack(TrackNodeFieldSchema& schema, osg::Image* image, const std::string& name, MapNode* mapNode, const osg::Vec3d& center, double radius, double time, TrackSimVector& trackSims)
{
  TrackNode* track = new TrackNode(mapNode, GeoPoint(mapNode->getMapSRS(),center,ALTMODE_ABSOLUTE), image, schema);
  track->setFieldValue(TRACK_FIELD_NAME, name);

  AnnotationData* data = new AnnotationData();
  data->setName(name);
  data->setViewpoint(osgEarth::Viewpoint(center, 0.0, -90.0, 1e5));
  track->setAnnotationData( data );

  trackSims.push_back(new TrackSim(track, center, radius, time, mapNode));

  return track;
}
开发者ID:KNeal,项目名称:osgearth,代码行数:14,代码来源:osgearth_qt.cpp

示例6: createTrackNodes

/** Builds a bunch of tracks. */
void
createTrackNodes( MapNode* mapNode, osg::Group* parent, const TrackNodeFieldSchema& schema, TrackSims& sims )
{
    // load an icon to use:
    osg::ref_ptr<osg::Image> srcImage = osgDB::readImageFile( ICON_URL );
    osg::ref_ptr<osg::Image> image;
    ImageUtils::resizeImage( srcImage.get(), ICON_SIZE, ICON_SIZE, image );

    // make some tracks, choosing a random simulation for each.
    Random prng;
    const SpatialReference* geoSRS = mapNode->getMapSRS()->getGeographicSRS();

    for( unsigned i=0; i<g_numTracks; ++i )
    {
        double lon0 = -180.0 + prng.next() * 360.0;
        double lat0 = -80.0 + prng.next() * 160.0;

        GeoPoint pos(geoSRS, lon0, lat0);

        TrackNode* track = new TrackNode(mapNode, pos, image, schema);

        track->setFieldValue( FIELD_NAME,     Stringify() << "Track:" << i );
        track->setFieldValue( FIELD_POSITION, Stringify() << s_format(pos) );
        track->setFieldValue( FIELD_NUMBER,   Stringify() << (1 + prng.next(9)) );

        // add a priority
        AnnotationData* data = new AnnotationData();
        data->setPriority( float(i) );
        track->setAnnotationData( data );

        Decluttering::setEnabled(track->getOrCreateStateSet(), true);

        parent->addChild( track );

        // add a simulator for this guy
        double lon1 = -180.0 + prng.next() * 360.0;
        double lat1 = -80.0 + prng.next() * 160.0;
        TrackSim* sim = new TrackSim();
        sim->_track = track;        
        sim->_startLat = lat0; sim->_startLon = lon0;
        sim->_endLat = lat1; sim->_endLon = lon1;
        sims.push_back( sim );
    }
}
开发者ID:3dcl,项目名称:osgearth,代码行数:45,代码来源:osgearth_tracks.cpp

示例7: makePlaceNode

    osg::Node* makePlaceNode(const FilterContext& context,
                             const Feature*       feature, 
                             const Style&         style, 
                             NumericExpression&   priorityExpr )
    {
        osg::Vec3d center = feature->getGeometry()->getBounds().center();
        GeoPoint point(feature->getSRS(), center.x(), center.y());

        PlaceNode* placeNode = new PlaceNode(0L, point, style, context.getDBOptions());

        if ( !priorityExpr.empty() )
        {
            AnnotationData* data = new AnnotationData();
            data->setPriority( feature->eval(priorityExpr, &context) );
            placeNode->setAnnotationData( data );
        }

        return placeNode;
    }
开发者ID:DavidLeehome,项目名称:osgearth,代码行数:19,代码来源:AnnotationLabelSource.cpp

示例8: compare

bool EntityGroupTransition::
compare(const LinguisticAnalysisStructure::AnalysisGraph& graph,
        const LinguisticGraphVertex& v,
        AnalysisContent& analysis,
        const LinguisticAnalysisStructure::Token* /*token*/,
        const LinguisticAnalysisStructure::MorphoSyntacticData* /*data*/) const
{
  // should compare to vertex ?
  AnnotationData* annotationData = static_cast< AnnotationData* >(analysis.getData("AnnotationData"));
  if (annotationData==0) {
    AULOGINIT;
    LDEBUG << "EntityGroupTransition::compare: no annotation graph available !";
    return false;
  }

  // find annotationGraphVertex matching the vertex of the current graph
  std::set<AnnotationGraphVertex> matches = annotationData->matches(graph.getGraphId(), v, "annot");
  if (matches.empty())
  {
    AULOGINIT;
    LDEBUG << "annotation ("<<graph.getGraphId()<<", "<<v<<", \"annot\") available";
    return false;
  }
  AnnotationGraphVertex annotVertex = *(matches.begin());

  if (!annotationData->hasAnnotation(annotVertex, m_entityAnnotation))
  {
    AULOGINIT;
    LDEBUG << "EntityGroupTransition::compare: No " << m_entityAnnotation << " annotation available on " << v;
    return false;
  }
  
  const SpecificEntityAnnotation* se =
    annotationData->annotation(annotVertex, m_entityAnnotation).
    pointerValue<SpecificEntityAnnotation>();
  Common::MediaticData::EntityType type = se->getType();
  AULOGINIT;
  LDEBUG << "EntityGroupTransition::compare: type = " << type << ", groupId = " << type.getGroupId();
  LDEBUG << "EntityGroupTransition::compare: m_entityGroupId = " << m_entityGroupId;
  LDEBUG << "EntityGroupTransition::compare: tests m_entityGroupId == type.getGroupId() = " << (m_entityGroupId == type.getGroupId());
  return( m_entityGroupId == type.getGroupId() );
}
开发者ID:aymara,项目名称:lima,代码行数:42,代码来源:entityGroupTransition.cpp

示例9: makeLabelNode

    osg::Node* makeLabelNode(const FilterContext& context, 
                             const Feature*       feature, 
                             const std::string&   value, 
                             const TextSymbol*    text, 
                             NumericExpression&   priorityExpr )
    {
        LabelNode* labelNode = new LabelNode(
            context.getSession()->getMapInfo().getProfile()->getSRS(),
            GeoPoint(feature->getSRS(), feature->getGeometry()->getBounds().center()),
            value,
            text );

        if ( text->priority().isSet() )
        {
            AnnotationData* data = new AnnotationData();
            data->setPriority( feature->eval(priorityExpr, &context) );
            labelNode->setAnnotationData( data );
        }

        return labelNode;
    }
开发者ID:airwzz999,项目名称:osgearth-for-android,代码行数:21,代码来源:AnnotationLabelSource.cpp

示例10: process

LimaStatusCode SpecificEntitiesXmlLogger::process(
  AnalysisContent& analysis) const
{
  SELOGINIT;
  LDEBUG << "SpecificEntitiesXmlLogger::process";
  TimeUtils::updateCurrentTime();

  AnnotationData* annotationData = static_cast< AnnotationData* >(analysis.getData("AnnotationData"));
  if (annotationData == 0) {
    SELOGINIT;
    LERROR << "no annotationData ! abort";
    return MISSING_DATA;
  }
  
  
  LinguisticAnalysisStructure::AnalysisGraph* graphp = static_cast<LinguisticAnalysisStructure::AnalysisGraph*>(analysis.getData(m_graph));
  if (graphp == 0) {
    SELOGINIT;
    LERROR << "no graph "<< m_graph <<" ! abort";
    return MISSING_DATA;
  }
  const LinguisticAnalysisStructure::AnalysisGraph& graph = *graphp;
  LinguisticGraph* lingGraph = const_cast<LinguisticGraph*>(graph.getGraph());
  VertexTokenPropertyMap tokenMap = get(vertex_token, *lingGraph);
  
  LinguisticMetaData* metadata=static_cast<LinguisticMetaData*>(analysis.getData("LinguisticMetaData"));
  if (metadata == 0) {
      SELOGINIT;
      LERROR << "no LinguisticMetaData ! abort";
      return MISSING_DATA;
  }

  DumperStream* dstream=initialize(analysis);
  ostream& out=dstream->out();

  uint64_t offset(0);
  try {
    offset=atoi(metadata->getMetaData("StartOffset").c_str());
  }
  catch (LinguisticProcessingException& ) {
    // do nothing: not set in analyzeText (only in analyzeXmlDocuments)
  }

  uint64_t offsetIndexingNode(0);
  try {
    offsetIndexingNode=atoi(metadata->getMetaData("StartOffsetIndexingNode").c_str());
  }
  catch (LinguisticProcessingException& ) {
    // do nothing: not set in analyzeText (only in analyzeXmlDocuments)
  }

  std::string docId("");
  try {
    docId=metadata->getMetaData("DocId");
  }
  catch (LinguisticProcessingException& ) {
    // do nothing: not set in analyzeText (only in analyzeXmlDocuments)
  }

  if (m_compactFormat) {
    out << "<entities docid=\"" << docId
    << "\" offsetNode=\"" << offsetIndexingNode 
    << "\" offset=\"" << offset
    << "\">" << endl;
  }
  else {
    out << "<specific_entities>" << endl;
  }
//   SELOGINIT;

  if (m_followGraph) {
    // instead of looking to all annotations, follow the graph (in
    // morphological graph, some vertices are not related to main graph:
    // idiomatic expressions parts and named entity parts)
    // -> this will not include nested entities

    AnalysisGraph* tokenList=static_cast<AnalysisGraph*>(analysis.getData(m_graph));
    if (tokenList==0) {
      LERROR << "graph " << m_graph << " has not been produced: check pipeline";
      return MISSING_DATA;
    }
    LinguisticGraph* graph=tokenList->getGraph();
    //const FsaStringsPool& sp=Common::MediaticData::MediaticData::single().stringsPool(m_language);
    
    std::queue<LinguisticGraphVertex> toVisit;
    std::set<LinguisticGraphVertex> visited;
    toVisit.push(tokenList->firstVertex());
    
    LinguisticGraphOutEdgeIt outItr,outItrEnd;
    while (!toVisit.empty()) {
      LinguisticGraphVertex v=toVisit.front();
      toVisit.pop();
      if (v == tokenList->lastVertex()) {
        continue;
      }
      
      for (boost::tie(outItr,outItrEnd)=out_edges(v,*graph); outItr!=outItrEnd; outItr++) 
      {
        LinguisticGraphVertex next=target(*outItr,*graph);
        if (visited.find(next)==visited.end())
//.........这里部分代码省略.........
开发者ID:clemance,项目名称:lima,代码行数:101,代码来源:SpecificEntitiesXmlLogger.cpp

示例11: main

// main routine
int main(int argc, char* argv[]) {

    //%%%%%%%%%%%%%%%%%%%%%%%% init %%%%%%%%%%%%%%%%%%%%%%%%

    // read arguments
    if(argc<3) {
        cerr << "Usage: ComputeFeatures config.txt override(no(0)/yes(1))" << endl;
        exit(-1);
    }

    // read config file
    StructParam param;
    if(!param.loadConfigFeature(argv[1])) {
        cerr << "Could not parse " << argv[1] << endl;
        exit(-1);
    }

    // read test/anno data (uses same data structure)
    AnnotationData TestD;
    TestD.loadAnnoFile(param.test_file.c_str());

    //if(atoi(argv[2])==2)
    //system(("rm " + param.feature_path + "/*.pgm").c_str());


    // detect hypotheses on all images
    for(int i=0; i<TestD.AnnoData.size(); ++i) {

        // read image
        Mat originImg = imread((param.image_path+"/"+TestD.AnnoData[i].image_name).c_str());
        if(originImg.empty()) {
            cerr << "Could not read image file " << param.image_path << "/" << TestD.AnnoData[i].image_name << endl;
            continue;
        }

        cout << system(("mkdir " + param.feature_path + "/" + TestD.AnnoData[i].image_name).c_str());

        // extract features
        for(int k=0; k<param.scales.size(); ++k) {

            Features Feat;
            string fname(param.feature_path+"/"+TestD.AnnoData[i].image_name+"/"+TestD.AnnoData[i].image_name);
            if( atoi(argv[2])==1 || !Feat.loadFeatures( fname, param.scales[k]) ) {

                Mat scaledImg;
                resize(originImg, scaledImg, Size(int(originImg.cols * param.scales[k] + 0.5), int(originImg.rows * param.scales[k] + 0.5)) );
                Feat.extractFeatureChannels(scaledImg);
                Feat.saveFeatures( fname, param.scales[k]);

#if 0
                // debug!!!!
                Features Feat2;
                namedWindow( "ShowF", CV_WINDOW_AUTOSIZE );
                imshow( "ShowF", Feat.Channels[0] );

                Feat2.loadFeatures( fname, param.scales[k]);

                namedWindow( "ShowF2", CV_WINDOW_AUTOSIZE );
                imshow( "ShowF2", Feat2.Channels[0] );

                cout << scaledImg.rows << " " << scaledImg.cols << " " << scaledImg.depth() << " " << scaledImg.channels() << " " << scaledImg.isContinuous() << endl;
                cout << Feat.Channels[0].rows << " " << Feat.Channels[0].cols << " " << Feat.Channels[0].depth() << " " << Feat.Channels[0].channels() << " " << Feat.Channels[0].isContinuous() << endl;
                cout << Feat2.Channels[0].rows << " " << Feat2.Channels[0].cols << " " << Feat2.Channels[0].depth() << " " << Feat2.Channels[0].channels() << " " << Feat.Channels[0].isContinuous() << endl;


                Mat diff(Size(scaledImg.cols,scaledImg.rows),CV_8UC1);
                cout << diff.rows << " " << diff.cols << " " << diff.depth() << " " << diff.channels() << " " << scaledImg.isContinuous() << endl;

                diff = Feat.Channels[0] - Feat2.Channels[0];

                namedWindow( "ShowDiff", CV_WINDOW_AUTOSIZE );
                imshow( "ShowDiff", diff );
                waitKey(0);
#endif
            }

        }

    }

    return 0;

}
开发者ID:aalibash,项目名称:autocontext_forest,代码行数:84,代码来源:ComputeFeatures.cpp

示例12: detect_L2

int detect_L2(int argc, char* argv[]) {

    cout << "Testing L2 started" << endl;
    if(argc<3) {
        cerr << "Usage: HFTrainDetect config.txt 1 [image] [detection]" << endl;
        return -1;
    }

    // timer
    timeval start, end;
    gettimeofday(&start, NULL);
    double runtime=0;

    // read config file
    StructParam param;
    if(!param.loadConfigDetect_L2(argv[1])) {
        cerr << "Could not parse " << argv[1] << endl;
        exit(-1);
    }

    // load first layer forest
    HFForest_L1 forest_L1(&param);
    forest_L1.loadForest(param.treepath_L1);

    // load second layer forest
    HFForest_L2 forest_L2(&param);
    forest_L2.loadForest(param.treepath_L2);

    AnnotationData TestD;
    TestD.loadAnnoFile(param.test_file.c_str());

    // detect hypotheses on all images
    for(unsigned int i=0; i<TestD.AnnoData.size(); ++i) {

        // read image
        string fileName = param.image_path+"/"+TestD.AnnoData[i].image_name;
        string depthFileName = param.depth_image_path+"/"+TestD.AnnoData[i].image_name.substr(0,TestD.AnnoData[i].image_name.size()-4)+"_abs_smooth.png";
        Mat originImg = imread(fileName);
        Mat depthImg  = imread(depthFileName,CV_LOAD_IMAGE_ANYDEPTH);    // depthImg is UINT16

        // calculate leaf id maps using first layer forest
        cout<<"evaluating leafId maps"<<endl;
        vector<vector<vector<Mat> > > leafIdMaps;
        forest_L1.evaluateLeafIdMaps(originImg, depthImg, leafIdMaps);

//        // get the vote maps from the first layer
//        cout<<"evaluating L1 vote maps"<<endl;
//        vector<vector<Mat> > voteMaps_L1;
//        forest_L1.returnVoteMaps(originImg, depthImg, voteMaps_L1);

        // get the vote maps from the second layer
        cout<<"evaluating L2 vote maps"<<endl;
        vector<vector<Mat> > voteMaps_L2;
        forest_L2.returnVoteMaps(leafIdMaps,voteMaps_L2,originImg);

#if 0
        namedWindow("show",CV_WINDOW_AUTOSIZE);
        for(unsigned int aspIdx=0; aspIdx<param.asp_ratios.size(); ++aspIdx){
            for(unsigned int sclIdx=0; sclIdx<param.scales.size(); ++sclIdx){
                Mat show;
                voteMaps_L2[aspIdx][sclIdx].convertTo(show,CV_8U,255*0.05);
                imshow("show",show);
                waitKey(0);
            }
        }
#endif

        Hypotheses hyp;
        forest_L2.detect(hyp,voteMaps_L2);
//        hyp.save_detections((param.hypotheses_path+"/"+TestD.AnnoData[i].image_name+".txt").c_str());
//        hyp.show_detections(originImg,param.d_thres);

//        // pass leafIdMaps to second layer forest for detection
//        cout<<"evaluating combined detection"<<endl;
//        vector<Hypotheses> bigHyp;
//        forest_L2.detect(bigHyp, voteMaps_L1, voteMaps_L2);
//
//        // save detections
//        for(unsigned int hypIdx=0; hypIdx<bigHyp.size(); ++hypIdx){
//            char buffer[5];
//            sprintf(buffer,"%02d",hypIdx);
//            string strBuffer = buffer;
//            bigHyp[hypIdx].save_detections( (param.hypotheses_path+"/lambda"+strBuffer+"/"+TestD.AnnoData[i].image_name+".txt").c_str());
//        }
    }

    gettimeofday(&end, NULL);
    runtime = ( (end.tv_sec - start.tv_sec)*1000 + (end.tv_usec - start.tv_usec)/(1000.0) );
    cout << "Total runtime (L2 test): " << runtime << " msec" << endl;

    return 0;

}
开发者ID:aalibash,项目名称:autocontext_forest,代码行数:93,代码来源:HFTrainDetect.cpp

示例13: detect

int detect(int argc, char* argv[]) {

    cout << "Testing L1 started" << endl;
    if(argc<3) {
        cerr << "Usage: HFTrainDetect config.txt 1 [image] [detection]" << endl;
        return -1;
    }

    // read config file
    StructParam param;
    if(!param.loadConfigDetect(argv[1])) {
        cerr << "Could not parse " << argv[1] << endl;
        exit(-1);
    }

    // timer
    timeval start, end;
    double runtime;
    gettimeofday(&start, NULL);

    // load forest
    HFForest_L1 forest(&param);
    forest.loadForest(param.treepath_L1);


    AnnotationData TestD;
    TestD.loadAnnoFile(param.test_file.c_str());

    // detect hypotheses on all images
    for(unsigned int i=0; i<TestD.AnnoData.size(); ++i) {

        // read image
        Mat originImg = imread((param.image_path+"/"+TestD.AnnoData[i].image_name).c_str()); // originImg is UINT8_3Channel
        string depthFileName = param.depth_image_path+"/"+TestD.AnnoData[i].image_name.substr(0,TestD.AnnoData[i].image_name.size()-4)+"_abs_smooth.png";
        Mat depthImg  = imread(depthFileName,CV_LOAD_IMAGE_ANYDEPTH);    // depthImg is UINT16

        if(originImg.empty()) {
            cerr << "Could not read image file " << param.image_path << "/" << TestD.AnnoData[i].image_name << endl;
            continue;
        }

        // detect
        Hypotheses hyp;
        if(param.feature_path.empty()) {
            forest.detect(TestD.AnnoData[i].image_name,originImg,depthImg,hyp);
        }

        #if 0
        // evaluate
        Annotation train;
        hyp.check(TestD.AnnoData[i], param.d_thres, train);
        #endif

        // save detections
        hyp.save_detections( (param.hypotheses_path+"/"+TestD.AnnoData[i].image_name+".txt").c_str());

        #if 0
        // show detections
        hyp.show_detections(originImg, param.d_thres);
        #endif

    }

    gettimeofday(&end, NULL);
    runtime = ( (end.tv_sec - start.tv_sec)*1000 + (end.tv_usec - start.tv_usec)/(1000.0) );
    cout << "Total runtime (L1 test): " << runtime << " msec" << endl;

    return 0;

}
开发者ID:aalibash,项目名称:autocontext_forest,代码行数:70,代码来源:HFTrainDetect.cpp

示例14: operator

bool CreateIdiomaticAlternative::operator()(Automaton::RecognizerMatch& result,
                                            AnalysisContent& analysis) const
{
#ifdef DEBUG_LP
    MORPHOLOGINIT;
    LDEBUG << "CreateIdiomaticAlternative, match is " << result;
    LDEBUG << "    expression is " << (result.isContiguous()?"":"non") <<
     " contiguous and" << (result.isContextual()?" non":"") << " absolute";
#endif
  if (result.empty()) return false;
  const LinguisticAnalysisStructure::AnalysisGraph& graph = *(result.getGraph());
  AnnotationData* annotationData = static_cast< AnnotationData* >(analysis.getData("AnnotationData"));
  if (annotationData->dumpFunction("IdiomExpr") == 0)
  {
    annotationData->dumpFunction("IdiomExpr", new DumpIdiomaticExpressionAnnotation());
  }
  
  RecognizerData* recoData=static_cast<RecognizerData*>(analysis.getData("RecognizerData"));
  
  std::set<LinguisticGraphVertex> addedVertices;
  // initialize the vertices to clear

  if (result.isContiguous())
  {
//     MORPHOLOGINIT;
//      LDEBUG << "contiguous idiomatic expression found: "
//          << result.concatString();

    // only one part : terms in expression are adjacent -> easy part

    // check if there is an overlap first
    if (recoData->matchOnRemovedVertices(result))
    {
      // ignore current idiomatic expression, continue
      MORPHOLOGINIT;
      LWARN << "idiomatic expression ignored: " << Common::Misc::limastring2utf8stdstring(result.concatString())
          << ": overlapping with a previous one";
      return false;
    }

    // create the new token
    std::pair<Token*,MorphoSyntacticData*> newToken = createAlternativeToken(result);
    if (newToken.second->empty())
    {
      // ignore current idiomatic expression, continue
      MORPHOLOGINIT;
      LERROR << "CreateIdiomaticAlternative::operator() Got empty morphosyntactic data. Abort";
      delete newToken.first;
      delete newToken.second;
      return false;
    }

    // add the vertex
    LinguisticGraphVertex idiomaticVertex =
        addAlternativeVertex(newToken.first, newToken.second, const_cast<LinguisticGraph*>(graph.getGraph()));
    AnnotationGraphVertex agv =  annotationData->createAnnotationVertex();
    annotationData->addMatching("AnalysisGraph", idiomaticVertex, "annot", agv);
    annotationData->annotate(agv, Common::Misc::utf8stdstring2limastring("AnalysisGraph"), idiomaticVertex);
    IdiomaticExpressionAnnotation annot(result);
    GenericAnnotation ga(annot);
    annotationData->annotate(agv, Common::Misc::utf8stdstring2limastring("IdiomExpr"), ga);

    addedVertices.insert(idiomaticVertex);

    //create the alternative with this only vertex
    createBeginAlternative(result.front().getVertex(),
                            idiomaticVertex,const_cast<LinguisticGraph&>(*graph.getGraph()));
    attachEndOfAlternative(idiomaticVertex,
                            result.back().getVertex(),const_cast<LinguisticGraph&>(*graph.getGraph()));

    // if expression is not contextual, only keep alternative
    if (! result.isContextual())
    {
      recoData->storeVerticesToRemove(result,const_cast<LinguisticGraph*>(graph.getGraph()));
      removeEdges(const_cast<LinguisticGraph&>(*graph.getGraph()),
                 result, analysis);
      //recoData->setNextVertex(idiomaticVertex);
      // if match was on single token, use next vertices (to avoid loops)
      if (result.size() > 1) 
      {
        recoData->setNextVertex(idiomaticVertex);
      }
      else 
      {
        LinguisticGraphOutEdgeIt outItr,outItrEnd;
        boost::tie(outItr,outItrEnd) = out_edges(idiomaticVertex,*(graph.getGraph()));
        for (;outItr!=outItrEnd;outItr++) 
        {
          recoData->setNextVertex(target(*outItr, *(graph.getGraph())));
        }
      }
    }
  }
  else
  {
    // several parts : tough case
//     MORPHOLOGINIT;
//      LDEBUG << "non contiguous idiomatic expression found: "
//          << result.concatString();

//.........这里部分代码省略.........
开发者ID:aymara,项目名称:lima,代码行数:101,代码来源:IdiomaticAlternativesConstraints.cpp

示例15: process

      LimaStatusCode CorefSolvingNormalizedXmlLogger::process(
        AnalysisContent& analysis) const
      {
//         COREFSOLVERLOGINIT;
        TimeUtils::updateCurrentTime();
        AnnotationData* annotationData = static_cast<AnnotationData*>(analysis.getData("AnnotationData"));
        const LinguisticAnalysisStructure::AnalysisGraph& graph = *(static_cast<LinguisticAnalysisStructure::AnalysisGraph*>(analysis.getData(m_graph)));

//         LinguisticGraph* lingGraph = const_cast<LinguisticGraph*>(graph.getGraph());
        LinguisticMetaData* metadata=static_cast<LinguisticMetaData*>(analysis.getData("LinguisticMetaData"));
        if (metadata == 0)
        {
          COREFSOLVERLOGINIT;
          LERROR << "no LinguisticMetaData ! abort" << LENDL;
          return MISSING_DATA;
        }

        ofstream out;
        if (!openLogFile(out,metadata->getMetaData("FileName")))
        {
          COREFSOLVERLOGINIT;
          LERROR << "Can't open log file " << LENDL;
          return UNKNOWN_ERROR;
        }

        out << "<coreferences>" << endl;


        //   LDEBUG << "CorefSolvingNormalizedXmlLogger on graph " << m_graph << LENDL;
        AnnotationGraphVertexIt itv, itv_end;
        boost::tie(itv, itv_end) = vertices(annotationData->getGraph());
        for (; itv != itv_end; itv++)
        {
          // process
          //LDEBUG << "CorefSolvingNormalizedXmlLogger on annotation vertex " << *itv << LENDL;
          if (annotationData->hasAnnotation(*itv,utf8stdstring2limastring("Coreferent")))
            //if (annotationData->hasAnnotation(*itv,utf8stdstring2limastring("Coreferent")))
          {
            CoreferentAnnotation* annot ;
            try
            {
              annot = annotationData->annotation(*itv,utf8stdstring2limastring("Coreferent"))
                      .pointerValue<CoreferentAnnotation>();
            }
            catch (const boost::bad_any_cast& )
            {
              COREFSOLVERLOGINIT;
              LERROR << "One annotation on vertex " << *itv << " you are trying to cast is not a Coreference; Coreference not logged" << LENDL;
              for (int i = 0; i < 19 ; i++)
              {
                LERROR << "annot "<< i << " : " << limastring2utf8stdstring(annotationData->annotationName(i)) << LENDL ;
              }
              continue;
            }
            LinguisticProcessing::LinguisticAnalysisStructure::Token* token = get(vertex_token, *graph.getGraph(), annot->morphVertex());
            if (token == 0)
            {
              COREFSOLVERLOGINIT;
              LERROR << "Vertex " << *itv << " has no entry in the analysis graph token map. This should not happen !!" << LENDL;
            }
            else
            {
              CoreferentAnnotation* antecedent;
//               bool hasAntecedent = false;
              AnnotationGraphOutEdgeIt it, it_end;
              boost::tie(it, it_end) = boost::out_edges(static_cast<AnnotationGraphVertex>(*itv), annotationData->getGraph());

              for (; it != it_end; it++)
              {
                if (annotationData->hasAnnotation(target(*it,annotationData->getGraph()),utf8stdstring2limastring("Coreferent")))
                {
                  try
                  {
                    antecedent = annotationData->annotation(target(*it, annotationData->getGraph()), utf8stdstring2limastring("Coreferent")).pointerValue<CoreferentAnnotation>();
//                     hasAntecedent = true;
                  }
                  catch (const boost::bad_any_cast& )
                  {
                    COREFSOLVERLOGINIT;
                    LERROR << "One annotation on vertex you are trying to cast resulting from an edge out of " << *itv << " is not a Coreference; Coreference not logged" << LENDL;
                    continue;
                  }
                }
              }
              out << "  <reference>\n"
              << "    <pos>" << get(vertex_token,*graph.getGraph(),annot->morphVertex())->position() << "</pos>\n"
              << "    <len>" << token->stringForm().length() << "</len>\n"
              << "    <string>"<< limastring2utf8stdstring(transcodeToXmlEntities(token->stringForm())) << "</string>\n"
              << "    <npId>" << annot->id() << "</npId>\n"
              << "    <posVertex>" << annot->morphVertex() << "</posVertex>\n";
              //if (hasAntecedent)
              if (false)
              {
                out << "    <npRef>" << antecedent->id() << "</npRef>\n";
                out << "    <refPosVertex>" << antecedent->morphVertex() << "</refPosVertex>\n";
              }
              out << "    <categ>" << annot->categ() << "</categ>\n"
                    << "  </reference>\n"
              << endl;
            }
//.........这里部分代码省略.........
开发者ID:pquentin,项目名称:lima,代码行数:101,代码来源:CorefSolvingNormalizedXmlLogger.cpp


注:本文中的AnnotationData类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。