本文整理汇总了C++中ConsensusMap::clear方法的典型用法代码示例。如果您正苦于以下问题:C++ ConsensusMap::clear方法的具体用法?C++ ConsensusMap::clear怎么用?C++ ConsensusMap::clear使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ConsensusMap
的用法示例。
在下文中一共展示了ConsensusMap::clear方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main_
ExitCodes main_(int, const char**)
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
StringList in = getStringList_("in");
String edta = getStringOption_("pos");
String out = getStringOption_("out");
String out_sep = getStringOption_("out_separator");
String out_TIC_debug = getStringOption_("auto_rt:out_debug_TIC");
StringList in_header = getStringList_("in_header");
// number of out_debug_TIC files and input files must be identical
/*if (out_TIC_debug.size() > 0 && in.size() != out_TIC_debug.size())
{
LOG_FATAL_ERROR << "Error: number of input file 'in' and auto_rt:out_debug_TIC files must be identical!" << std::endl;
return ILLEGAL_PARAMETERS;
}*/
// number of header files and input files must be identical
if (in_header.size() > 0 && in.size() != in_header.size())
{
LOG_FATAL_ERROR << "Error: number of input file 'in' and 'in_header' files must be identical!" << std::endl;
return ILLEGAL_PARAMETERS;
}
if (!getFlag_("auto_rt:enabled") && !out_TIC_debug.empty())
{
LOG_FATAL_ERROR << "Error: TIC output file requested, but auto_rt is not enabled! Either do not request the file or switch on 'auto_rt:enabled'." << std::endl;
return ILLEGAL_PARAMETERS;
}
double rttol = getDoubleOption_("rt_tol");
double mztol = getDoubleOption_("mz_tol");
Size rt_collect = getIntOption_("rt_collect");
//-------------------------------------------------------------
// loading input
//-------------------------------------------------------------
MzMLFile mzml_file;
mzml_file.setLogType(log_type_);
MSExperiment<Peak1D> exp, exp_pp;
EDTAFile ed;
ConsensusMap cm;
ed.load(edta, cm);
StringList tf_single_header0, tf_single_header1, tf_single_header2; // header content, for each column
std::vector<String> vec_single; // one line for each compound, multiple columns per experiment
vec_single.resize(cm.size());
for (Size fi = 0; fi < in.size(); ++fi)
{
// load raw data
mzml_file.load(in[fi], exp);
exp.sortSpectra(true);
if (exp.empty())
{
LOG_WARN << "The given file does not contain any conventional peak data, but might"
" contain chromatograms. This tool currently cannot handle them, sorry." << std::endl;
return INCOMPATIBLE_INPUT_DATA;
}
// try to detect RT peaks (only for the first input file -- all others should align!)
// cm.size() might change in here...
if (getFlag_("auto_rt:enabled") && fi == 0)
{
ConsensusMap cm_local = cm; // we might have different RT peaks for each map if 'auto_rt' is enabled
cm.clear(false); // reset global list (about to be filled)
// compute TIC
MSChromatogram<> tic = exp.getTIC();
MSSpectrum<> tics, tic_gf, tics_pp, tics_sn;
for (Size ic = 0; ic < tic.size(); ++ic)
{ // rewrite Chromatogram to MSSpectrum (GaussFilter requires it)
Peak1D peak;
peak.setMZ(tic[ic].getRT());
peak.setIntensity(tic[ic].getIntensity());
tics.push_back(peak);
}
// smooth (no PP_CWT here due to efficiency reasons -- large FWHM take longer!)
double fwhm = getDoubleOption_("auto_rt:FHWM");
GaussFilter gf;
Param p = gf.getParameters();
p.setValue("gaussian_width", fwhm * 2); // wider than FWHM, just to be sure we have a fully smoothed peak. Merging two peaks is unlikely
p.setValue("use_ppm_tolerance", "false");
gf.setParameters(p);
tic_gf = tics;
gf.filter(tic_gf);
// pick peaks
PeakPickerHiRes pp;
p = pp.getParameters();
p.setValue("signal_to_noise", getDoubleOption_("auto_rt:SNThreshold"));
pp.setParameters(p);
pp.pick(tic_gf, tics_pp);
if (tics_pp.size())
//.........这里部分代码省略.........
示例2: extractChannels
void IsobaricChannelExtractor::extractChannels(const MSExperiment<Peak1D>& ms_exp_data, ConsensusMap& consensus_map)
{
if (ms_exp_data.empty())
{
LOG_WARN << "The given file does not contain any conventional peak data, but might"
" contain chromatograms. This tool currently cannot handle them, sorry.\n";
throw Exception::MissingInformation(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Experiment has no scans!");
}
// clear the output map
consensus_map.clear(false);
consensus_map.setExperimentType("labeled_MS2");
// create predicate for spectrum checking
LOG_INFO << "Selecting scans with activation mode: " << (selected_activation_ == "" ? "any" : selected_activation_) << "\n";
HasActivationMethod<MSExperiment<Peak1D>::SpectrumType> activation_predicate(StringList::create(selected_activation_));
// now we have picked data
// --> assign peaks to channels
UInt64 element_index(0);
// remember the current precusor spectrum
MSExperiment<Peak1D>::ConstIterator prec_spec = ms_exp_data.end();
for (MSExperiment<Peak1D>::ConstIterator it = ms_exp_data.begin(); it != ms_exp_data.end(); ++it)
{
// remember the last MS1 spectra as we assume it to be the precursor spectrum
if (it->getMSLevel() == 1) prec_spec = it;
if (selected_activation_ == "" || activation_predicate(*it))
{
// check if precursor is available
if (it->getPrecursors().empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, __PRETTY_FUNCTION__, String("No precursor information given for scan native ID ") + it->getNativeID() + " with RT " + String(it->getRT()));
}
// check precursor constraints
if (!isValidPrecursor_(it->getPrecursors()[0]))
{
LOG_DEBUG << "Skip spectrum " << it->getNativeID() << ": Precursor doesn't fulfill all constraints." << std::endl;
continue;
}
// check precursor purity if we have a valid precursor ..
if (prec_spec != ms_exp_data.end())
{
const DoubleReal purity = computePrecursorPurity_(it, prec_spec);
if (purity < min_precursor_purity_)
{
LOG_DEBUG << "Skip spectrum " << it->getNativeID() << ": Precursor purity is below the threshold. [purity = " << purity << "]" << std::endl;
continue;
}
}
else
{
LOG_INFO << "No precursor available for spectrum: " << it->getNativeID() << std::endl;
}
if (!(prec_spec == ms_exp_data.end()) && computePrecursorPurity_(it, prec_spec) < min_precursor_purity_)
{
LOG_DEBUG << "Skip spectrum " << it->getNativeID() << ": Precursor purity is below the threshold." << std::endl;
continue;
}
// store RT&MZ of parent ion as centroid of ConsensusFeature
ConsensusFeature cf;
cf.setUniqueId();
cf.setRT(it->getRT());
cf.setMZ(it->getPrecursors()[0].getMZ());
Peak2D channel_value;
channel_value.setRT(it->getRT());
// for each each channel
UInt64 map_index = 0;
Peak2D::IntensityType overall_intensity = 0;
for (IsobaricQuantitationMethod::IsobaricChannelList::const_iterator cl_it = quant_method_->getChannelInformation().begin();
cl_it != quant_method_->getChannelInformation().end();
++cl_it)
{
// set mz-position of channel
channel_value.setMZ(cl_it->center);
// reset intensity
channel_value.setIntensity(0);
// as every evaluation requires time, we cache the MZEnd iterator
const MSExperiment<Peak1D>::SpectrumType::ConstIterator mz_end = it->MZEnd(cl_it->center + reporter_mass_shift_);
// add up all signals
for (MSExperiment<Peak1D>::SpectrumType::ConstIterator mz_it = it->MZBegin(cl_it->center - reporter_mass_shift_);
mz_it != mz_end;
++mz_it)
{
channel_value.setIntensity(channel_value.getIntensity() + mz_it->getIntensity());
}
// discard contribution of this channel as it is below the required intensity threshold
if (channel_value.getIntensity() < min_reporter_intensity_)
{
channel_value.setIntensity(0);
}
//.........这里部分代码省略.........
示例3: run
/// @brief extracts the iTRAQ channels from the MS data and stores intensity values in a consensus map
///
/// @param ms_exp_data Raw data to read
/// @param consensus_map Output each MS² scan as a consensus feature
/// @throws Exception::MissingInformation if no scans present or MS² scan has no precursor
void ItraqChannelExtractor::run(const MSExperiment<Peak1D>& ms_exp_data, ConsensusMap& consensus_map)
{
if (ms_exp_data.empty())
{
LOG_WARN << "The given file does not contain any conventional peak data, but might"
" contain chromatograms. This tool currently cannot handle them, sorry.";
throw Exception::MissingInformation(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Experiment has no scans!");
}
MSExperiment<> ms_exp_MS2;
String mode = (String) param_.getValue("select_activation");
std::cout << "Selecting scans with activation mode: " << (mode == "" ? "any" : mode) << "\n";
HasActivationMethod<MSExperiment<Peak1D>::SpectrumType> activation_predicate(ListUtils::create<String>(mode));
for (size_t idx = 0; idx < ms_exp_data.size(); ++idx)
{
if (ms_exp_data[idx].getMSLevel() == 2)
{
if (mode == "" || activation_predicate(ms_exp_data[idx]))
{
// copy only MS² scans
ms_exp_MS2.addSpectrum(ms_exp_data[idx]);
}
else
{
//std::cout << "deleting spectrum # " << idx << " with RT: " << ms_exp_data[idx].getRT() << "\n";
}
}
}
#ifdef ITRAQ_DEBUG
std::cout << "we have " << ms_exp_MS2.size() << " scans left of level " << ms_exp_MS2[0].getMSLevel() << std::endl;
std::cout << "run: channel_map_ has " << channel_map_.size() << " entries!" << std::endl;
#endif
consensus_map.clear(false);
// set <mapList> header
Int index_cnt = 0;
for (ChannelMapType::const_iterator cm_it = channel_map_.begin(); cm_it != channel_map_.end(); ++cm_it)
{
// structure of Map cm_it
// first == channel-name as Int e.g. 114
// second == ChannelInfo struct
ConsensusMap::FileDescription channel_as_map;
// label is the channel + description provided in the Params
if (itraq_type_ != TMT_SIXPLEX)
channel_as_map.label = "iTRAQ_" + String(cm_it->second.name) + "_" + String(cm_it->second.description);
else
channel_as_map.label = "TMT_" + String(cm_it->second.name) + "_" + String(cm_it->second.description);
channel_as_map.size = ms_exp_MS2.size();
//TODO what about .filename? leave empty?
// add some more MetaInfo
channel_as_map.setMetaValue("channel_name", cm_it->second.name);
channel_as_map.setMetaValue("channel_id", cm_it->second.id);
channel_as_map.setMetaValue("channel_description", cm_it->second.description);
channel_as_map.setMetaValue("channel_center", cm_it->second.center);
channel_as_map.setMetaValue("channel_active", String(cm_it->second.active ? "true" : "false"));
consensus_map.getFileDescriptions()[index_cnt++] = channel_as_map;
}
// create consensusElements
Peak2D::CoordinateType allowed_deviation = (Peak2D::CoordinateType) param_.getValue("reporter_mass_shift");
// now we have picked data
// --> assign peaks to channels
UInt element_index(0);
for (MSExperiment<>::ConstIterator it = ms_exp_MS2.begin(); it != ms_exp_MS2.end(); ++it)
{
// store RT&MZ of parent ion as centroid of ConsensusFeature
ConsensusFeature cf;
cf.setUniqueId();
cf.setRT(it->getRT());
if (it->getPrecursors().size() >= 1)
{
cf.setMZ(it->getPrecursors()[0].getMZ());
}
else
{
throw Exception::MissingInformation(__FILE__, __LINE__, __PRETTY_FUNCTION__, String("No precursor information given for scan native ID ") + String(it->getNativeID()) + " with RT " + String(it->getRT()));
}
Peak2D channel_value;
channel_value.setRT(it->getRT());
// for each each channel
Int index = 0;
Peak2D::IntensityType overall_intensity = 0;
for (ChannelMapType::const_iterator cm_it = channel_map_.begin(); cm_it != channel_map_.end(); ++cm_it)
{
// set mz-position of channel
channel_value.setMZ(cm_it->second.center);
// reset intensity
channel_value.setIntensity(0);
//.........这里部分代码省略.........
示例4: run
void StablePairFinder::run(const std::vector<ConsensusMap>& input_maps,
ConsensusMap& result_map)
{
// empty output destination:
result_map.clear(false);
// sanity checks:
if (input_maps.size() != 2)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__,
"exactly two input maps required");
}
checkIds_(input_maps);
// set up the distance functor:
double max_intensity = max(input_maps[0].getMaxInt(),
input_maps[1].getMaxInt());
Param distance_params = param_.copy("");
distance_params.remove("use_identifications");
distance_params.remove("second_nearest_gap");
FeatureDistance feature_distance(max_intensity, false);
feature_distance.setParameters(distance_params);
// keep track of pairing:
std::vector<bool> is_singleton[2];
is_singleton[0].resize(input_maps[0].size(), true);
is_singleton[1].resize(input_maps[1].size(), true);
typedef pair<double, double> DoublePair;
DoublePair init = make_pair(FeatureDistance::infinity,
FeatureDistance::infinity);
// for every element in map 0:
// - index of nearest neighbor in map 1:
vector<UInt> nn_index_0(input_maps[0].size(), UInt(-1));
// - distances to nearest and second-nearest neighbors in map 1:
vector<DoublePair> nn_distance_0(input_maps[0].size(), init);
// for every element in map 1:
// - index of nearest neighbor in map 0:
vector<UInt> nn_index_1(input_maps[1].size(), UInt(-1));
// - distances to nearest and second-nearest neighbors in map 0:
vector<DoublePair> nn_distance_1(input_maps[1].size(), init);
// iterate over all feature pairs, find nearest neighbors:
// TODO: iterate over SENSIBLE RT (and m/z) window -- sort the maps beforehand
// to save a lot of processing time...
// Once done, remove the warning in the description of the 'use_identifications' parameter
for (UInt fi0 = 0; fi0 < input_maps[0].size(); ++fi0)
{
const ConsensusFeature& feat0 = input_maps[0][fi0];
for (UInt fi1 = 0; fi1 < input_maps[1].size(); ++fi1)
{
const ConsensusFeature& feat1 = input_maps[1][fi1];
if (use_IDs_ && !compatibleIDs_(feat0, feat1)) // check peptide IDs
{
continue; // mismatch
}
pair<bool, double> result = feature_distance(feat0, feat1);
double distance = result.second;
// we only care if distance constraints are satisfied for "best
// matches", not for second-best; this means that second-best distances
// can become smaller than best distances
// (e.g. the RT is larger than allowed (->invalid pair), but m/z is perfect and has the most weight --> better score!)
bool valid = result.first;
// update entries for map 0:
if (distance < nn_distance_0[fi0].second)
{
if (valid && (distance < nn_distance_0[fi0].first))
{
nn_distance_0[fi0].second = nn_distance_0[fi0].first;
nn_distance_0[fi0].first = distance;
nn_index_0[fi0] = fi1;
}
else
{
nn_distance_0[fi0].second = distance;
}
}
// update entries for map 1:
if (distance < nn_distance_1[fi1].second)
{
if (valid && (distance < nn_distance_1[fi1].first))
{
nn_distance_1[fi1].second = nn_distance_1[fi1].first;
nn_distance_1[fi1].first = distance;
nn_index_1[fi1] = fi0;
}
else
{
nn_distance_1[fi1].second = distance;
}
}
}
}
//.........这里部分代码省略.........
示例5: run
void LabeledPairFinder::run(const vector<ConsensusMap>& input_maps, ConsensusMap& result_map)
{
if (input_maps.size() != 1)
throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "exactly one input map required");
if (result_map.getFileDescriptions().size() != 2)
throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "two file descriptions required");
if (result_map.getFileDescriptions().begin()->second.filename != result_map.getFileDescriptions().rbegin()->second.filename)
throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "the two file descriptions have to contain the same file name");
checkIds_(input_maps);
//look up the light and heavy index
Size light_index = numeric_limits<Size>::max();
Size heavy_index = numeric_limits<Size>::max();
for (ConsensusMap::FileDescriptions::const_iterator it = result_map.getFileDescriptions().begin();
it != result_map.getFileDescriptions().end();
++it)
{
if (it->second.label == "heavy")
{
heavy_index = it->first;
}
else if (it->second.label == "light")
{
light_index = it->first;
}
}
if (light_index == numeric_limits<Size>::max() || heavy_index == numeric_limits<Size>::max())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "the input maps have to be labeled 'light' and 'heavy'");
}
result_map.clear(false);
// sort consensus features by RT (and MZ) to speed up searching afterwards
typedef ConstRefVector<ConsensusMap> RefMap;
RefMap model_ref(input_maps[0].begin(), input_maps[0].end());
model_ref.sortByPosition();
//calculate matches
ConsensusMap matches;
//settings
double rt_pair_dist = param_.getValue("rt_pair_dist");
double rt_dev_low = param_.getValue("rt_dev_low");
double rt_dev_high = param_.getValue("rt_dev_high");
double mz_dev = param_.getValue("mz_dev");
DoubleList mz_pair_dists = param_.getValue("mz_pair_dists");
bool mrm = param_.getValue("mrm").toBool();
//estimate RT parameters
if (param_.getValue("rt_estimate") == "true")
{
//find all possible RT distances of features with the same charge and a good m/z distance
vector<double> dists;
dists.reserve(model_ref.size());
for (RefMap::const_iterator it = model_ref.begin(); it != model_ref.end(); ++it)
{
for (RefMap::const_iterator it2 = model_ref.begin(); it2 != model_ref.end(); ++it2)
{
for (DoubleList::const_iterator dist_it = mz_pair_dists.begin(); dist_it != mz_pair_dists.end(); ++dist_it)
{
double mz_pair_dist = *dist_it;
if (it2->getCharge() == it->getCharge()
&& it2->getMZ() >= it->getMZ() + mz_pair_dist / it->getCharge() - mz_dev
&& it2->getMZ() <= it->getMZ() + mz_pair_dist / it->getCharge() + mz_dev)
{
dists.push_back(it2->getRT() - it->getRT());
}
}
}
}
if (dists.empty())
{
cout << "Warning: Could not find pairs for RT distance estimation. The manual settings are used!" << endl;
}
else
{
if (dists.size() < 50)
{
cout << "Warning: Found only " << dists.size() << " pairs. The estimated shift and std deviation are probably not reliable!" << endl;
}
//--------------------------- estimate initial parameters of fit ---------------------------
GaussFitter::GaussFitResult result(-1, -1, -1);
//first estimate of the optimal shift: median of the distances
sort(dists.begin(), dists.end());
Size median_index = dists.size() / 2;
result.x0 = dists[median_index];
//create histogram of distances
//consider only the maximum of pairs, centered around the optimal shift
Size max_pairs = model_ref.size() / 2;
Size start_index = (Size) max((SignedSize)0, (SignedSize)(median_index - max_pairs / 2));
Size end_index = (Size) min((SignedSize)(dists.size() - 1), (SignedSize)(median_index + max_pairs / 2));
double start_value = dists[start_index];
double end_value = dists[end_index];
double bin_step = fabs(end_value - start_value) / 99.999; //ensure that we have 100 bins
Math::Histogram<> hist(start_value, end_value, bin_step);
//std::cout << "HIST from " << start_value << " to " << end_value << " (bin size " << bin_step << ")" << endl;
for (Size i = start_index; i <= end_index; ++i)
{
hist.inc(dists[i]);
}
//.........这里部分代码省略.........
示例6: run_
void QTClusterFinder::run_(const vector<MapType> & input_maps,
ConsensusMap & result_map)
{
num_maps_ = input_maps.size();
if (num_maps_ < 2)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__,
"At least two input maps required");
}
// set up the distance functor (and set other parameters):
DoubleReal max_intensity = input_maps[0].getMaxInt();
DoubleReal max_mz = input_maps[0].getMax()[1];
for (Size map_index = 1; map_index < num_maps_; ++map_index)
{
max_intensity = max(max_intensity, input_maps[map_index].getMaxInt());
max_mz = max(max_mz, input_maps[map_index].getMax()[0]);
}
setParameters_(max_intensity, max_mz);
// create the hash grid and fill it with features:
//cout << "Hashing..." << endl;
list<GridFeature> grid_features;
Grid grid(Grid::ClusterCenter(max_diff_rt_, max_diff_mz_));
for (Size map_index = 0; map_index < num_maps_; ++map_index)
{
for (Size feature_index = 0; feature_index < input_maps[map_index].size();
++feature_index)
{
grid_features.push_back(
GridFeature(input_maps[map_index][feature_index], map_index,
feature_index));
GridFeature & gfeature = grid_features.back();
// sort peptide hits once now, instead of multiple times later:
BaseFeature & feature = const_cast<BaseFeature &>(
grid_features.back().getFeature());
for (vector<PeptideIdentification>::iterator pep_it =
feature.getPeptideIdentifications().begin(); pep_it !=
feature.getPeptideIdentifications().end(); ++pep_it)
{
pep_it->sort();
}
grid.insert(std::make_pair(Grid::ClusterCenter(gfeature.getRT(), gfeature.getMZ()), &gfeature));
}
}
// compute QT clustering:
//cout << "Clustering..." << endl;
list<QTCluster> clustering;
computeClustering_(grid, clustering);
// number of clusters == number of data points:
Size size = clustering.size();
// Create a temporary map where we store which GridFeatures are next to which Clusters
OpenMSBoost::unordered_map<GridFeature *, std::vector< QTCluster * > > element_mapping;
for (list<QTCluster>::iterator it = clustering.begin(); it != clustering.end(); ++it)
{
OpenMSBoost::unordered_map<Size, GridFeature *> elements;
typedef std::multimap<DoubleReal, GridFeature *> InnerNeighborMap;
typedef OpenMSBoost::unordered_map<Size, InnerNeighborMap > NeighborMap;
NeighborMap neigh = it->getNeighbors();
for (NeighborMap::iterator n_it = neigh.begin(); n_it != neigh.end(); ++n_it)
{
for (InnerNeighborMap::iterator i_it = n_it->second.begin(); i_it != n_it->second.end(); ++i_it)
{
element_mapping[i_it->second].push_back( &(*it) );
}
}
}
ProgressLogger logger;
logger.setLogType(ProgressLogger::CMD);
logger.startProgress(0, size, "linking features");
Size progress = 0;
result_map.clear(false);
while (!clustering.empty())
{
// cout << "Clusters: " << clustering.size() << endl;
ConsensusFeature consensus_feature;
makeConsensusFeature_(clustering, consensus_feature, element_mapping);
if (!clustering.empty())
{
result_map.push_back(consensus_feature);
}
logger.setProgress(progress++);
}
logger.endProgress();
}