本文整理汇总了C++中TextFile::store方法的典型用法代码示例。如果您正苦于以下问题:C++ TextFile::store方法的具体用法?C++ TextFile::store怎么用?C++ TextFile::store使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TextFile
的用法示例。
在下文中一共展示了TextFile::store方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: store
void EDTAFile::store(const String& filename, const FeatureMap& map) const
{
TextFile tf;
tf.addLine("RT\tm/z\tintensity\tcharge");
for (Size i = 0; i < map.size(); ++i)
{
const Feature& f = map[i];
tf.addLine(String(f.getRT()) + "\t" + f.getMZ() + "\t" + f.getIntensity() + "\t" + f.getCharge());
}
tf.store(filename);
}
示例2: main_
//.........这里部分代码省略.........
Peak1D peak;
peak.setMZ(tic[ic].getRT());
peak.setIntensity(tic[ic].getIntensity());
tics.push_back(peak);
}
// smooth (no PP_CWT here due to efficiency reasons -- large FWHM take longer!)
double fwhm = getDoubleOption_("auto_rt:FHWM");
GaussFilter gf;
Param p = gf.getParameters();
p.setValue("gaussian_width", fwhm * 2); // wider than FWHM, just to be sure we have a fully smoothed peak. Merging two peaks is unlikely
p.setValue("use_ppm_tolerance", "false");
gf.setParameters(p);
tic_gf = tics;
gf.filter(tic_gf);
// pick peaks
PeakPickerHiRes pp;
p = pp.getParameters();
p.setValue("signal_to_noise", getDoubleOption_("auto_rt:SNThreshold"));
pp.setParameters(p);
pp.pick(tic_gf, tics_pp);
if (tics_pp.size())
{
LOG_INFO << "Found " << tics_pp.size() << " auto-rt peaks at: ";
for (Size ipp = 0; ipp != tics_pp.size(); ++ipp) LOG_INFO << " " << tics_pp[ipp].getMZ();
}
else
{
LOG_INFO << "Found no auto-rt peaks. Change threshold parameters!";
}
LOG_INFO << std::endl;
if (!out_TIC_debug.empty()) // if debug file was given
{ // store intermediate steps for debug
MSExperiment<> out_debug;
out_debug.addChromatogram(toChromatogram(tics));
out_debug.addChromatogram(toChromatogram(tic_gf));
SignalToNoiseEstimatorMedian<MSSpectrum<> > snt;
snt.init(tics);
for (Size is = 0; is < tics.size(); ++is)
{
Peak1D peak;
peak.setMZ(tic[is].getMZ());
peak.setIntensity(snt.getSignalToNoise(tics[is]));
tics_sn.push_back(peak);
}
out_debug.addChromatogram(toChromatogram(tics_sn));
out_debug.addChromatogram(toChromatogram(tics_pp));
// get rid of "native-id" missing warning
for (Size id = 0; id < out_debug.size(); ++id) out_debug[id].setNativeID(String("spectrum=") + id);
mzml_file.store(out_TIC_debug, out_debug);
LOG_DEBUG << "Storing debug AUTO-RT: " << out_TIC_debug << std::endl;
}
// add target EICs: for each m/z with no/negative RT, add all combinations of that m/z with auto-RTs
// duplicate m/z entries will be ignored!
// all other lines with positive RT values are copied unaffected
//do not allow doubles
std::set<double> mz_doubles;
for (ConsensusMap::Iterator cit = cm_local.begin(); cit != cm_local.end(); ++cit)
{
if (cit->getRT() < 0)
{
示例3: main_
//.........这里部分代码省略.........
Size one_match = count(features_truth, "matches", "1");
cout << "one match: " << one_match << percentage(one_match, features_truth.size()) << endl;
Size charge_match = count(features_truth, "correct_charge", "true");
cout << " - correct charge: " << charge_match << percentage(charge_match, features_truth.size()) << endl;
Size centroid_match = count(features_truth, "exact_centroid_match", "true");
cout << " - exact centroid match: " << centroid_match << percentage(centroid_match, features_truth.size()) << endl;
Size multi_match = features_truth.size() - count(features_truth, "matches", "0") - count(features_truth, "matches", "1");
cout << "multiple matches: " << multi_match << percentage(multi_match, features_truth.size()) << endl;
Size incorrect_match = multi_match + one_match - charge_match;
cout << "incorrect matches: " << incorrect_match << percentage(incorrect_match, features_truth.size()) << endl;
if (abort_reasons.size())
{
cout << "reasons for unmatched features:" << endl;
for (Map<String, UInt>::iterator it = abort_strings.begin(); it != abort_strings.end(); ++it)
{
cout << " - " << String(it->second).fillLeft(' ', 4) << ": " << it->first << endl;
}
}
//------------------------ intensity ------------------------
cout << endl;
cout << "intensity statistics:" << endl;
cout << "=====================" << endl;
if (ints_i.empty())
{
cout << "correlation of found features: nan" << endl;
}
else
{
cout << "correlation of found features: " << pearsonCorrelationCoefficient(ints_i.begin(), ints_i.end(), ints_t.begin(), ints_t.end()) << endl;
}
if (ints_found.empty())
{
cout << "intensity distribution of found: 0.0 0.0 0.0 0.0 0.0" << endl;
}
else
{
cout << "intensity distribution of found: " << fiveNumbers(ints_found, 1) << endl;
}
if (ints_missed.empty())
{
cout << "intensity distribution of missed: 0.0 0.0 0.0 0.0 0.0" << endl;
}
else
{
cout << "intensity distribution of missed: " << fiveNumbers(ints_missed, 1) << endl;
}
//------------------------ charges ------------------------
cout << endl;
cout << "charge matches statistics:" << endl;
cout << "===========================" << endl;
Map<UInt, UInt> present_charges, found_charges;
for (Size i = 0; i < features_truth.size(); ++i)
{
UInt charge = features_truth[i].getCharge();
present_charges[charge]++;
if (features_truth[i].getMetaValue("correct_charge").toString() == "true")
{
found_charges[charge]++;
}
}
for (Map<UInt, UInt>::const_iterator it = present_charges.begin(); it != present_charges.end(); ++it)
{
cout << "charge " << it->first << ": " << found_charges[it->first] << "/" << it->second << percentage(found_charges[it->first], it->second) << endl;
}
//write output
if (getStringOption_("out") != "")
{
FeatureXMLFile().store(getStringOption_("out"), features_truth);
}
//ROC curve
if (getStringOption_("out_roc") != "")
{
TextFile tf;
tf.addLine("false\tcorrect\tFDR\tTPR");
features_in.sortByIntensity(true);
UInt f_correct = 0;
UInt f_false = 0;
double found = features_in.size();
double correct = features_truth.size();
for (Size i = 0; i < features_in.size(); ++i)
{
if (features_in[i].metaValueExists("correct_hit"))
{
++f_correct;
}
else
{
++f_false;
}
tf.addLine(String(f_false) + "\t" + f_correct + "\t" + String::number(f_false / found, 3) + "\t" + String::number(f_correct / correct, 3));
}
tf.store(getStringOption_("out_roc"));
}
return EXECUTION_OK;
}
示例4: main_
ExitCodes main_(int, const char**)
{
//----------------------------------------------------------------
// load data
//----------------------------------------------------------------
StringList in_list = getStringList_("in");
String out = getStringOption_("out");
String out_csv = getStringOption_("out_csv");
String format = getStringOption_("out_type");
if (out.empty() && out_csv.empty())
{
LOG_ERROR << "Neither 'out' nor 'out_csv' were provided. Please assign at least one of them." << std::endl;
return ILLEGAL_PARAMETERS;
}
if (!out.empty() && format == "") // get from filename
{
try
{
format = out.suffix('.');
}
catch (Exception::ElementNotFound& /*e*/)
{
format = "nosuffix";
}
// check if format is valid:
if (!ListUtils::contains(out_formats_, format.toLower()))
{
LOG_ERROR << "No explicit image output format was provided via 'out_type', and the suffix ('" << format << "') does not resemble a valid type. Please fix one of them." << std::endl;
return ILLEGAL_PARAMETERS;
}
}
double q_min = getDoubleOption_("q_min");
double q_max = getDoubleOption_("q_max");
if (q_min >= q_max)
{
LOG_ERROR << "The parameter 'q_min' must be smaller than 'q_max'. Quitting..." << std::endl;
return ILLEGAL_PARAMETERS;
}
IDEvaluationBase* mw = new IDEvaluationBase();
Param alg_param = mw->getParameters();
alg_param.insert("", getParam_().copy("algorithm:", true));
mw->setParameters(alg_param);
if (!mw->loadFiles(in_list))
{
LOG_ERROR << "Tool failed. See above." << std::endl;
return INCOMPATIBLE_INPUT_DATA;
}
mw->setVisibleArea(q_min, q_max);
if (!out.empty()) // save as image and exit
{
String error;
bool r = mw->exportAsImage(out.toQString(), error, format.toQString());
if (r) return EXECUTION_OK;
else
{
LOG_ERROR << error << std::endl;
return ILLEGAL_PARAMETERS;
}
}
if (!out_csv.empty())
{
TextFile tf;
for (Size i = 0; i < mw->getPoints().size(); ++i)
{
MSSpectrum s = mw->getPoints()[i];
StringList sl1;
StringList sl2;
for (Size j = 0; j < s.size(); ++j)
{
sl1.push_back(s[j].getMZ());
sl2.push_back(s[j].getIntensity());
}
tf.addLine(String("# ") + String(s.getMetaValue("search_engine")));
tf.addLine(ListUtils::concatenate(sl1, ","));
tf.addLine(ListUtils::concatenate(sl2, ","));
}
tf.store(out_csv);
}
delete(mw);
return EXECUTION_OK;
}
示例5: store
void IBSpectraFile::store(const String& filename, const ConsensusMap& cm)
{
// typdefs for shorter code
typedef std::vector<ProteinHit>::iterator ProtHitIt;
// general settings .. do we need to expose these?
// ----------------------------------------------------------------------
/// Allow also non-unique peptides to be exported
bool allow_non_unique = true;
/// Intensities below this value will be set to 0.0 to avoid numerical problems when quantifying
double intensity_threshold = 0.00001;
// ----------------------------------------------------------------------
// guess experiment type
boost::shared_ptr<IsobaricQuantitationMethod> quantMethod = guessExperimentType_(cm);
// we need the protein identifications to reference the protein names
ProteinIdentification protIdent;
bool has_proteinIdentifications = false;
if (cm.getProteinIdentifications().size() > 0)
{
protIdent = cm.getProteinIdentifications()[0];
has_proteinIdentifications = true;
}
// start the file by adding the tsv header
TextFile textFile;
textFile.addLine(ListUtils::concatenate(constructHeader_(*quantMethod), "\t"));
for (ConsensusMap::ConstIterator cm_iter = cm.begin();
cm_iter != cm.end();
++cm_iter)
{
const ConsensusFeature& cFeature = *cm_iter;
std::vector<IdCSV> entries;
/// 1st we extract the identification information from the consensus feature
if (cFeature.getPeptideIdentifications().size() == 0 || !has_proteinIdentifications)
{
// we store unidentified hits anyway, because the iTRAQ quant is still helpful for normalization
entries.push_back(IdCSV());
}
else
{
// protein name:
const PeptideHit& peptide_hit = cFeature.getPeptideIdentifications()[0].getHits()[0];
std::set<String> protein_accessions = peptide_hit.extractProteinAccessions();
if (protein_accessions.size() != 1)
{
if (!allow_non_unique) continue; // we only want unique peptides
}
for (std::set<String>::const_iterator prot_ac = protein_accessions.begin(); prot_ac != protein_accessions.end(); ++prot_ac)
{
IdCSV entry;
entry.charge = cFeature.getPeptideIdentifications()[0].getHits()[0].getCharge();
entry.peptide = cFeature.getPeptideIdentifications()[0].getHits()[0].getSequence().toUnmodifiedString();
entry.theo_mass = cFeature.getPeptideIdentifications()[0].getHits()[0].getSequence().getMonoWeight(Residue::Full, cFeature.getPeptideIdentifications()[0].getHits()[0].getCharge());
// write modif
entry.modif = getModifString_(cFeature.getPeptideIdentifications()[0].getHits()[0].getSequence());
ProtHitIt proteinHit = protIdent.findHit(*prot_ac);
if (proteinHit == protIdent.getHits().end())
{
std::cerr << "Protein referenced in peptide not found...\n";
continue; // protein not found
}
entry.accession = proteinHit->getAccession();
entries.push_back(entry);
}
}
// 2nd we add the quantitative information of the channels
// .. skip features with 0 intensity
if (cFeature.getIntensity() == 0)
{
continue;
}
for (std::vector<IdCSV>::iterator entry = entries.begin();
entry != entries.end();
++entry)
{
// set parent intensity
entry->parent_intens = cFeature.getIntensity();
entry->retention_time = cFeature.getRT();
entry->spectrum = cFeature.getUniqueId();
entry->exp_mass = cFeature.getMZ();
// create output line
StringList currentLine;
// add entry to currentLine
entry->toStringList(currentLine);
// extract channel intensities and positions
//.........这里部分代码省略.........
示例6: main_
//.........这里部分代码省略.........
//BEGIN - THIS IS NEEDED FOR WRITING PARSERS ONLY
/*
set<String> allowed_terms;
cv.getAllChildTerms(allowed_terms, tit->getAccession());
for (set<String>::const_iterator atit=allowed_terms.begin(); atit!=allowed_terms.end(); ++atit)
{
const ControlledVocabulary::CVTerm& child_term = cv.getTerm(*atit);
String parser_string = String("os << \"<cvParam cvRef=\\\"MS\\\" accession=\\\"") + child_term.id + "\\\" name=\\\"" + child_term.name + "\\\"";
for (Size i=0; i<child_term.unparsed.size(); ++i)
{
//TODO this does not work anymore. The type is now stored as a member
if (child_term.unparsed[i].hasSubstring("value-type:xsd\\:int") || child_term.unparsed[i].hasSubstring("value-type:xsd\\:float") || child_term.unparsed[i].hasSubstring("value-type:xsd\\:string"))
{
parser_string += " value=\\\"\" << << \"\\\"";
}
}
parser_string += "/>\\n\";<BR>";
file.push_back(parser_string);
}*/
}
else
{
file.addLine(" - Missing terms, CV not loaded...");
cerr << "Warning: no child terms for " << tit->getAccession() << " found!" << endl;
}
file.addLine(" </div>");
file.addLine(" </TD></TD></TR>");
}
}
}
file.addLine(" </TABLE>");
file.addLine(" </BODY>");
file.addLine("</HTML>");
file.store(getStringOption_("html"));
return EXECUTION_OK;
}
// iterator over all mapping rules and store the mentioned terms
StringList ignore_namespaces = getStringList_("ignore_cv");
set<String> ignore_cv_list;
for (StringList::const_iterator it = ignore_namespaces.begin(); it != ignore_namespaces.end(); ++it)
{
ignore_cv_list.insert(*it);
}
set<String> used_terms;
for (vector<CVMappingRule>::const_iterator it = mappings.getMappingRules().begin(); it != mappings.getMappingRules().end(); ++it)
{
set<String> allowed_terms;
// iterate over all allowed terms
for (vector<CVMappingTerm>::const_iterator tit = it->getCVTerms().begin(); tit != it->getCVTerms().end(); ++tit)
{
// check whether the term itself it allowed, or only its children
if (tit->getUseTerm())
{
allowed_terms.insert(tit->getAccession());
}
// check whether we need the whole tree, or just the term itself
if (tit->getAllowChildren())
{
// check whether we want to ignore this term
if (!(tit->getAccession().has(':') && ignore_cv_list.find(tit->getAccession().prefix(':')) != ignore_cv_list.end()))
{
cv.getAllChildTerms(allowed_terms, tit->getAccession());
}