本文整理汇总了C++中ConsensusMap::begin方法的典型用法代码示例。如果您正苦于以下问题:C++ ConsensusMap::begin方法的具体用法?C++ ConsensusMap::begin怎么用?C++ ConsensusMap::begin使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ConsensusMap
的用法示例。
在下文中一共展示了ConsensusMap::begin方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: generateSeedLists
void SeedListGenerator::generateSeedLists(const ConsensusMap& consensus,
Map<UInt64, SeedList>& seed_lists)
{
seed_lists.clear();
// iterate over all consensus features...
for (ConsensusMap::ConstIterator cons_it = consensus.begin();
cons_it != consensus.end(); ++cons_it)
{
DPosition<2> point(cons_it->getRT(), cons_it->getMZ());
// for each sub-map in the consensus map, add a seed at the position of
// this consensus feature:
for (ConsensusMap::FileDescriptions::const_iterator file_it =
consensus.getFileDescriptions().begin(); file_it !=
consensus.getFileDescriptions().end(); ++file_it)
seed_lists[file_it->first].push_back(point);
// for each feature contained in the consensus feature, remove the seed of
// the corresponding map:
for (ConsensusFeature::HandleSetType::const_iterator feat_it =
cons_it->getFeatures().begin(); feat_it !=
cons_it->getFeatures().end(); ++feat_it)
{
seed_lists[feat_it->getMapIndex()].pop_back();
}
// this leaves seeds for maps where no feature was found near the
// consensus position
}
}
示例2: main_
ExitCodes main_(int, const char **)
{
String in = getStringOption_("in"), out = getStringOption_("out");
FileTypes::Type in_type = FileHandler::getType(in);
if (in_type == FileTypes::FEATUREXML)
{
FeatureMap<> features;
FeatureXMLFile().load(in, features);
for (FeatureMap<>::Iterator feat_it = features.begin();
feat_it != features.end(); ++feat_it)
{
resolveConflict_(feat_it->getPeptideIdentifications());
}
addDataProcessing_(features,
getProcessingInfo_(DataProcessing::FILTERING));
FeatureXMLFile().store(out, features);
}
else // consensusXML
{
ConsensusMap consensus;
ConsensusXMLFile().load(in, consensus);
for (ConsensusMap::Iterator cons_it = consensus.begin();
cons_it != consensus.end(); ++cons_it)
{
resolveConflict_(cons_it->getPeptideIdentifications());
}
addDataProcessing_(consensus,
getProcessingInfo_(DataProcessing::FILTERING));
ConsensusXMLFile().store(out, consensus);
}
return EXECUTION_OK;
}
示例3: normalizeMaps
void ConsensusMapNormalizerAlgorithmThreshold::normalizeMaps(ConsensusMap& map, const vector<double>& ratios)
{
ConsensusMap::Iterator cf_it;
ProgressLogger progresslogger;
progresslogger.setLogType(ProgressLogger::CMD);
progresslogger.startProgress(0, map.size(), "normalizing maps");
for (cf_it = map.begin(); cf_it != map.end(); ++cf_it)
{
progresslogger.setProgress(cf_it - map.begin());
ConsensusFeature::HandleSetType::const_iterator f_it;
for (f_it = cf_it->getFeatures().begin(); f_it != cf_it->getFeatures().end(); ++f_it)
{
f_it->asMutable().setIntensity(f_it->getIntensity() * ratios[f_it->getMapIndex()]);
}
}
progresslogger.endProgress();
}
示例4: align
void MapAlignmentAlgorithmPoseClustering::align(const ConsensusMap & map, TransformationDescription & trafo)
{
// TODO: move this to updateMembers_? (if consensusMap prevails)
// TODO: why does superimposer work on consensus map???
const ConsensusMap & map_model = reference_;
ConsensusMap map_scene = map;
// run superimposer to find the global transformation
TransformationDescription si_trafo;
superimposer_.run(map_model, map_scene, si_trafo);
// apply transformation to consensus features and contained feature
// handles
for (Size j = 0; j < map_scene.size(); ++j)
{
//Calculate new RT
double rt = map_scene[j].getRT();
rt = si_trafo.apply(rt);
//Set RT of consensus feature centroid
map_scene[j].setRT(rt);
//Set RT of consensus feature handles
map_scene[j].begin()->asMutable().setRT(rt);
}
//run pairfinder to find pairs
ConsensusMap result;
//TODO: add another 2map interface to pairfinder?
std::vector<ConsensusMap> input(2);
input[0] = map_model;
input[1] = map_scene;
pairfinder_.run(input, result);
// calculate the local transformation
si_trafo.invert(); // to undo the transformation applied above
TransformationDescription::DataPoints data;
for (ConsensusMap::Iterator it = result.begin(); it != result.end();
++it)
{
if (it->size() == 2) // two matching features
{
ConsensusFeature::iterator feat_it = it->begin();
double y = feat_it->getRT();
double x = si_trafo.apply((++feat_it)->getRT());
// one feature should be from the reference map:
if (feat_it->getMapIndex() != 0)
{
data.push_back(make_pair(x, y));
}
else
{
data.push_back(make_pair(y, x));
}
}
}
trafo = TransformationDescription(data);
trafo.fitModel("linear");
}
示例5: transferSubelements
void FeatureGroupingAlgorithm::transferSubelements(const vector<ConsensusMap>& maps, ConsensusMap& out) const
{
// accumulate file descriptions from the input maps:
// cout << "Updating file descriptions..." << endl;
out.getFileDescriptions().clear();
// mapping: (map index, original id) -> new id
map<pair<Size, UInt64>, Size> mapid_table;
for (Size i = 0; i < maps.size(); ++i)
{
const ConsensusMap& consensus = maps[i];
for (ConsensusMap::FileDescriptions::const_iterator desc_it = consensus.getFileDescriptions().begin(); desc_it != consensus.getFileDescriptions().end(); ++desc_it)
{
Size counter = mapid_table.size();
mapid_table[make_pair(i, desc_it->first)] = counter;
out.getFileDescriptions()[counter] = desc_it->second;
}
}
// look-up table: input map -> unique ID -> consensus feature
// cout << "Creating look-up table..." << endl;
vector<map<UInt64, ConsensusMap::ConstIterator> > feat_lookup(maps.size());
for (Size i = 0; i < maps.size(); ++i)
{
const ConsensusMap& consensus = maps[i];
for (ConsensusMap::ConstIterator feat_it = consensus.begin();
feat_it != consensus.end(); ++feat_it)
{
// do NOT use "id_lookup[i][feat_it->getUniqueId()] = feat_it;" here as
// you will get "attempt to copy-construct an iterator from a singular
// iterator" in STL debug mode:
feat_lookup[i].insert(make_pair(feat_it->getUniqueId(), feat_it));
}
}
// adjust the consensus features:
// cout << "Adjusting consensus features..." << endl;
for (ConsensusMap::iterator cons_it = out.begin(); cons_it != out.end(); ++cons_it)
{
ConsensusFeature adjusted = ConsensusFeature(
static_cast<BaseFeature>(*cons_it)); // remove sub-features
for (ConsensusFeature::HandleSetType::const_iterator sub_it = cons_it->getFeatures().begin(); sub_it != cons_it->getFeatures().end(); ++sub_it)
{
UInt64 id = sub_it->getUniqueId();
Size map_index = sub_it->getMapIndex();
ConsensusMap::ConstIterator origin = feat_lookup[map_index][id];
for (ConsensusFeature::HandleSetType::const_iterator handle_it = origin->getFeatures().begin(); handle_it != origin->getFeatures().end(); ++handle_it)
{
FeatureHandle handle = *handle_it;
Size new_id = mapid_table[make_pair(map_index, handle.getMapIndex())];
handle.setMapIndex(new_id);
adjusted.insert(handle);
}
}
*cons_it = adjusted;
}
}
示例6: transformSingleConsensusMap
void MapAlignmentTransformer::transformSingleConsensusMap(ConsensusMap & cmap,
const TransformationDescription & trafo)
{
for (ConsensusMap::Iterator cmit = cmap.begin(); cmit != cmap.end();
++cmit)
{
applyToConsensusFeature_(*cmit, trafo);
}
// adapt RT values of unassigned peptides:
if (!cmap.getUnassignedPeptideIdentifications().empty())
{
transformSinglePeptideIdentification(
cmap.getUnassignedPeptideIdentifications(), trafo);
}
}
示例7: setNormalizedIntensityValues
void ConsensusMapNormalizerAlgorithmQuantile::setNormalizedIntensityValues(const vector<vector<double> >& feature_ints, ConsensusMap& map)
{
//assumes the input map and feature_ints are in the same order as in the beginning,
//although feature_ints has normalized values now (but the same ranks as before)
Size number_of_maps = map.getColumnHeaders().size();
ConsensusMap::ConstIterator cf_it;
vector<Size> progress_indices(number_of_maps);
for (cf_it = map.begin(); cf_it != map.end(); ++cf_it)
{
ConsensusFeature::HandleSetType::const_iterator f_it;
for (f_it = cf_it->getFeatures().begin(); f_it != cf_it->getFeatures().end(); ++f_it)
{
Size map_idx = f_it->getMapIndex();
double intensity = feature_ints[map_idx][progress_indices[map_idx]++];
f_it->asMutable().setIntensity(intensity);
}
}
}
示例8: mergeConsensusMaps_
void QuantitativeExperimentalDesign::mergeConsensusMaps_(ConsensusMap & out, const String & experiment, StringList & file_paths)
{
ConsensusMap map;
LOG_INFO << "Merge consensus maps: " << endl;
UInt counter = 1;
for (StringList::Iterator file_it = file_paths.begin(); file_it != file_paths.end(); ++file_it, ++counter)
{
//load should clear the map
ConsensusXMLFile().load(*file_it, map);
for (ConsensusMap::iterator it = map.begin(); it != map.end(); ++it)
{
it->setMetaValue("experiment", DataValue(experiment));
}
out += map;
}
LOG_INFO << endl;
}
示例9: extractIntensityVectors
void ConsensusMapNormalizerAlgorithmQuantile::extractIntensityVectors(const ConsensusMap& map, vector<vector<double> >& out_intensities)
{
//reserve space for out_intensities (unequal vector lengths, 0-features omitted)
Size number_of_maps = map.getColumnHeaders().size();
out_intensities.clear();
out_intensities.resize(number_of_maps);
for (UInt i = 0; i < number_of_maps; i++)
{
ConsensusMap::ColumnHeaders::const_iterator it = map.getColumnHeaders().find(i);
if (it == map.getColumnHeaders().end()) throw Exception::ElementNotFound(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String(i));
out_intensities[i].reserve(it->second.size);
}
//fill out_intensities
ConsensusMap::ConstIterator cf_it;
for (cf_it = map.begin(); cf_it != map.end(); ++cf_it)
{
ConsensusFeature::HandleSetType::const_iterator f_it;
for (f_it = cf_it->getFeatures().begin(); f_it != cf_it->getFeatures().end(); ++f_it)
{
out_intensities[f_it->getMapIndex()].push_back(f_it->getIntensity());
}
}
}
示例10:
vector<double> ConsensusMapNormalizerAlgorithmThreshold::computeCorrelation(const ConsensusMap& map, const double& ratio_threshold, const String& acc_filter, const String& desc_filter)
{
Size number_of_features = map.size();
Size number_of_maps = map.getFileDescriptions().size();
vector<vector<double> > feature_int(number_of_maps);
//get map with most features, resize feature_int
UInt map_with_most_features_idx = 0;
ConsensusMap::FileDescriptions::const_iterator map_with_most_features = map.getFileDescriptions().find(0);
for (UInt i = 0; i < number_of_maps; i++)
{
feature_int[i].resize(number_of_features);
ConsensusMap::FileDescriptions::const_iterator it = map.getFileDescriptions().find(i);
if (it->second.size > map_with_most_features->second.size)
{
map_with_most_features = it;
map_with_most_features_idx = i;
}
}
//fill feature_int with intensities
Size pass_counter = 0;
ConsensusMap::ConstIterator cf_it;
UInt idx = 0;
for (cf_it = map.begin(); cf_it != map.end(); ++cf_it, ++idx)
{
if (!ConsensusMapNormalizerAlgorithmMedian::passesFilters_(cf_it, map, acc_filter, desc_filter))
{
continue;
}
++pass_counter;
ConsensusFeature::HandleSetType::const_iterator f_it;
for (f_it = cf_it->getFeatures().begin(); f_it != cf_it->getFeatures().end(); ++f_it)
{
feature_int[f_it->getMapIndex()][idx] = f_it->getIntensity();
}
}
LOG_INFO << endl << "Using " << pass_counter << "/" << map.size() << " consensus features for computing normalization coefficients" << endl << endl;
//determine ratio
vector<double> ratio_vector(number_of_maps);
for (UInt j = 0; j < number_of_maps; j++)
{
vector<double> ratios;
for (UInt k = 0; k < number_of_features; ++k)
{
if (feature_int[map_with_most_features_idx][k] != 0.0 && feature_int[j][k] != 0.0)
{
double ratio = feature_int[map_with_most_features_idx][k] / feature_int[j][k];
if (ratio > ratio_threshold && ratio < 1 / ratio_threshold)
{
ratios.push_back(ratio);
}
}
}
if (ratios.empty())
{
LOG_WARN << endl << "Not enough features passing filters. Cannot compute normalization coefficients for all maps. Result will be unnormalized." << endl << endl;
return vector<double>(number_of_maps, 1.0);
}
ratio_vector[j] = Math::mean(ratios.begin(), ratios.end());
}
return ratio_vector;
}
示例11: ice
IsobaricChannelExtractor ice(q_method);
// disable activation filtering
Param p = ice.getParameters();
p.setValue("select_activation", "");
p.setValue("min_precursor_intensity", 5300000.0);
ice.setParameters(p);
// extract channels
ConsensusMap cm_out;
ice.extractChannels(exp, cm_out);
// compare results
TEST_EQUAL(cm_out.size(), 4)
ABORT_IF(cm_out.size() != 4)
for(ConsensusMap::Iterator cf = cm_out.begin(); cf != cm_out.end(); ++cf)
{
DoubleReal prec_intensity = cf->getMetaValue("precursor_intensity");
TEST_EQUAL(prec_intensity > 5300000.0, true)
}
}
{
// load test data
MSExperiment<Peak1D> exp;
MzMLFile mzmlfile;
mzmlfile.load(OPENMS_GET_TEST_DATA_PATH("IsobaricChannelExtractor_6.mzML"), exp);
// add some more information to the quant method
Param pItraq = q_method->getParameters();
pItraq.setValue("channel_114_description", "ref");
pItraq.setValue("channel_115_description", "something");
示例12: outputTo
ExitCodes outputTo(ostream& os)
{
//-------------------------------------------------------------
// Parameter handling
//-------------------------------------------------------------
// File names
String in = getStringOption_("in");
// File type
FileHandler fh;
FileTypes::Type in_type = FileTypes::nameToType(getStringOption_("in_type"));
if (in_type == FileTypes::UNKNOWN)
{
in_type = fh.getType(in);
writeDebug_(String("Input file type: ") + FileTypes::typeToName(in_type), 2);
}
if (in_type == FileTypes::UNKNOWN)
{
writeLog_("Error: Could not determine input file type!");
return PARSE_ERROR;
}
MSExperiment<Peak1D> exp;
FeatureMap feat;
ConsensusMap cons;
if (in_type == FileTypes::FEATUREXML) //features
{
FeatureXMLFile().load(in, feat);
feat.updateRanges();
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
ConsensusXMLFile().load(in, cons);
cons.updateRanges();
}
//-------------------------------------------------------------
// meta information
//-------------------------------------------------------------
if (getFlag_("m"))
{
os << endl
<< "-- General information --" << endl
<< endl
<< "file name: " << in << endl
<< "file type: " << FileTypes::typeToName(in_type) << endl;
//basic info
os << endl
<< "-- Meta information --" << endl
<< endl;
if (in_type == FileTypes::FEATUREXML) //features
{
os << "Document id : " << feat.getIdentifier() << endl << endl;
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
os << "Document id : " << cons.getIdentifier() << endl << endl;
}
}
//-------------------------------------------------------------
// data processing
//-------------------------------------------------------------
if (getFlag_("p"))
{
//basic info
os << endl
<< "-- Data processing information --" << endl
<< endl;
//get data processing info
vector<DataProcessing> dp;
if (in_type == FileTypes::FEATUREXML) //features
{
dp = feat.getDataProcessing();
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
dp = cons.getDataProcessing();
}
int i = 0;
for (vector<DataProcessing>::iterator it = dp.begin(); it != dp.end(); ++it)
{
os << "Data processing " << i << endl;
os << "\tcompletion_time: " << (*it).getCompletionTime().getDate() << 'T' << (*it).getCompletionTime().getTime() << endl;
os << "\tsoftware name: " << (*it).getSoftware().getName() << " version " << (*it).getSoftware().getVersion() << endl;
for (set<DataProcessing::ProcessingAction>::const_iterator paIt = (*it).getProcessingActions().begin(); paIt != (*it).getProcessingActions().end(); ++paIt)
{
os << "\t\tprocessing action: " << DataProcessing::NamesOfProcessingAction[*paIt] << endl;
}
}
++i;
}
//.........这里部分代码省略.........
示例13: main_
ExitCodes main_(int, const char**)
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
// file list
StringList file_list = getStringList_("in");
// file type
FileHandler file_handler;
FileTypes::Type force_type;
if (getStringOption_("in_type").size() > 0)
{
force_type = FileTypes::nameToType(getStringOption_("in_type"));
}
else
{
force_type = file_handler.getType(file_list[0]);
}
// output file names and types
String out_file = getStringOption_("out");
bool annotate_file_origin = getFlag_("annotate_file_origin");
rt_gap_ = getDoubleOption_("rt_concat:gap");
vector<String> trafo_out = getStringList_("rt_concat:trafo_out");
if (trafo_out.empty())
{
// resize now so we don't have to worry about indexing out of bounds:
trafo_out.resize(file_list.size());
}
else if (trafo_out.size() != file_list.size())
{
writeLog_("Error: Number of transformation output files must equal the number of input files (parameters 'rt_concat:trafo_out'/'in')!");
return ILLEGAL_PARAMETERS;
}
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
if (force_type == FileTypes::FEATUREXML)
{
FeatureMap out;
FeatureXMLFile fh;
for (Size i = 0; i < file_list.size(); ++i)
{
FeatureMap map;
fh.load(file_list[i], map);
if (annotate_file_origin)
{
for (FeatureMap::iterator it = map.begin(); it != map.end(); ++it)
{
it->setMetaValue("file_origin", DataValue(file_list[i]));
}
}
if (rt_gap_ > 0.0) // concatenate in RT
{
adjustRetentionTimes_(map, trafo_out[i], i == 0);
}
out += map;
}
//-------------------------------------------------------------
// writing output
//-------------------------------------------------------------
// annotate output with data processing info
addDataProcessing_(out, getProcessingInfo_(DataProcessing::FORMAT_CONVERSION));
fh.store(out_file, out);
}
else if (force_type == FileTypes::CONSENSUSXML)
{
ConsensusMap out;
ConsensusXMLFile fh;
fh.load(file_list[0], out);
// skip first file
for (Size i = 1; i < file_list.size(); ++i)
{
ConsensusMap map;
fh.load(file_list[i], map);
if (annotate_file_origin)
{
for (ConsensusMap::iterator it = map.begin(); it != map.end(); ++it)
{
it->setMetaValue("file_origin", DataValue(file_list[i]));
}
}
if (rt_gap_ > 0.0) // concatenate in RT
{
adjustRetentionTimes_(map, trafo_out[i], i == 0);
}
//.........这里部分代码省略.........
示例14: store
void IBSpectraFile::store(const String& filename, const ConsensusMap& cm)
{
// typdefs for shorter code
typedef std::vector<ProteinHit>::iterator ProtHitIt;
// general settings .. do we need to expose these?
// ----------------------------------------------------------------------
/// Allow also non-unique peptides to be exported
bool allow_non_unique = true;
/// Intensities below this value will be set to 0.0 to avoid numerical problems when quantifying
double intensity_threshold = 0.00001;
// ----------------------------------------------------------------------
// guess experiment type
boost::shared_ptr<IsobaricQuantitationMethod> quantMethod = guessExperimentType_(cm);
// we need the protein identifications to reference the protein names
ProteinIdentification protIdent;
bool has_proteinIdentifications = false;
if (cm.getProteinIdentifications().size() > 0)
{
protIdent = cm.getProteinIdentifications()[0];
has_proteinIdentifications = true;
}
// start the file by adding the tsv header
TextFile textFile;
textFile.addLine(ListUtils::concatenate(constructHeader_(*quantMethod), "\t"));
for (ConsensusMap::ConstIterator cm_iter = cm.begin();
cm_iter != cm.end();
++cm_iter)
{
const ConsensusFeature& cFeature = *cm_iter;
std::vector<IdCSV> entries;
/// 1st we extract the identification information from the consensus feature
if (cFeature.getPeptideIdentifications().size() == 0 || !has_proteinIdentifications)
{
// we store unidentified hits anyway, because the iTRAQ quant is still helpful for normalization
entries.push_back(IdCSV());
}
else
{
// protein name:
const PeptideHit& peptide_hit = cFeature.getPeptideIdentifications()[0].getHits()[0];
std::set<String> protein_accessions = peptide_hit.extractProteinAccessions();
if (protein_accessions.size() != 1)
{
if (!allow_non_unique) continue; // we only want unique peptides
}
for (std::set<String>::const_iterator prot_ac = protein_accessions.begin(); prot_ac != protein_accessions.end(); ++prot_ac)
{
IdCSV entry;
entry.charge = cFeature.getPeptideIdentifications()[0].getHits()[0].getCharge();
entry.peptide = cFeature.getPeptideIdentifications()[0].getHits()[0].getSequence().toUnmodifiedString();
entry.theo_mass = cFeature.getPeptideIdentifications()[0].getHits()[0].getSequence().getMonoWeight(Residue::Full, cFeature.getPeptideIdentifications()[0].getHits()[0].getCharge());
// write modif
entry.modif = getModifString_(cFeature.getPeptideIdentifications()[0].getHits()[0].getSequence());
ProtHitIt proteinHit = protIdent.findHit(*prot_ac);
if (proteinHit == protIdent.getHits().end())
{
std::cerr << "Protein referenced in peptide not found...\n";
continue; // protein not found
}
entry.accession = proteinHit->getAccession();
entries.push_back(entry);
}
}
// 2nd we add the quantitative information of the channels
// .. skip features with 0 intensity
if (cFeature.getIntensity() == 0)
{
continue;
}
for (std::vector<IdCSV>::iterator entry = entries.begin();
entry != entries.end();
++entry)
{
// set parent intensity
entry->parent_intens = cFeature.getIntensity();
entry->retention_time = cFeature.getRT();
entry->spectrum = cFeature.getUniqueId();
entry->exp_mass = cFeature.getMZ();
// create output line
StringList currentLine;
// add entry to currentLine
entry->toStringList(currentLine);
// extract channel intensities and positions
//.........这里部分代码省略.........
示例15: main_
ExitCodes main_(int, const char **)
{
String in = getStringOption_("in"), out = getStringOption_("out"),
id_out = getStringOption_("id_out");
if (out.empty() && id_out.empty())
{
throw Exception::RequiredParameterNotGiven(__FILE__, __LINE__,
__PRETTY_FUNCTION__,
"out/id_out");
}
vector<ProteinIdentification> proteins;
vector<PeptideIdentification> peptides;
FileTypes::Type in_type = FileHandler::getType(in);
if (in_type == FileTypes::MZML)
{
MSExperiment<> experiment;
MzMLFile().load(in, experiment);
// what about unassigned peptide IDs?
for (MSExperiment<>::Iterator exp_it = experiment.begin();
exp_it != experiment.end(); ++exp_it)
{
peptides.insert(peptides.end(),
exp_it->getPeptideIdentifications().begin(),
exp_it->getPeptideIdentifications().end());
exp_it->getPeptideIdentifications().clear();
}
experiment.getProteinIdentifications().swap(proteins);
if (!out.empty())
{
addDataProcessing_(experiment,
getProcessingInfo_(DataProcessing::FILTERING));
MzMLFile().store(out, experiment);
}
}
else if (in_type == FileTypes::FEATUREXML)
{
FeatureMap features;
FeatureXMLFile().load(in, features);
features.getUnassignedPeptideIdentifications().swap(peptides);
for (FeatureMap::Iterator feat_it = features.begin();
feat_it != features.end(); ++feat_it)
{
peptides.insert(peptides.end(),
feat_it->getPeptideIdentifications().begin(),
feat_it->getPeptideIdentifications().end());
feat_it->getPeptideIdentifications().clear();
}
features.getProteinIdentifications().swap(proteins);
if (!out.empty())
{
addDataProcessing_(features,
getProcessingInfo_(DataProcessing::FILTERING));
FeatureXMLFile().store(out, features);
}
}
else // consensusXML
{
ConsensusMap consensus;
ConsensusXMLFile().load(in, consensus);
consensus.getUnassignedPeptideIdentifications().swap(peptides);
for (ConsensusMap::Iterator cons_it = consensus.begin();
cons_it != consensus.end(); ++cons_it)
{
peptides.insert(peptides.end(),
cons_it->getPeptideIdentifications().begin(),
cons_it->getPeptideIdentifications().end());
cons_it->getPeptideIdentifications().clear();
}
consensus.getProteinIdentifications().swap(proteins);
if (!out.empty())
{
addDataProcessing_(consensus,
getProcessingInfo_(DataProcessing::FILTERING));
ConsensusXMLFile().store(out, consensus);
}
}
if (!id_out.empty())
{
// IDMapper can match a peptide ID to several overlapping features,
// resulting in duplicates; this shouldn't be the case for peak data
if (in_type != FileTypes::MZML) removeDuplicates_(peptides);
IdXMLFile().store(id_out, proteins, peptides);
}
return EXECUTION_OK;
}