本文整理汇总了C++中ConsensusMap::getMax方法的典型用法代码示例。如果您正苦于以下问题:C++ ConsensusMap::getMax方法的具体用法?C++ ConsensusMap::getMax怎么用?C++ ConsensusMap::getMax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ConsensusMap
的用法示例。
在下文中一共展示了ConsensusMap::getMax方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: outputTo
//.........这里部分代码省略.........
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
dp = cons.getDataProcessing();
}
int i = 0;
for (vector<DataProcessing>::iterator it = dp.begin(); it != dp.end(); ++it)
{
os << "Data processing " << i << endl;
os << "\tcompletion_time: " << (*it).getCompletionTime().getDate() << 'T' << (*it).getCompletionTime().getTime() << endl;
os << "\tsoftware name: " << (*it).getSoftware().getName() << " version " << (*it).getSoftware().getVersion() << endl;
for (set<DataProcessing::ProcessingAction>::const_iterator paIt = (*it).getProcessingActions().begin(); paIt != (*it).getProcessingActions().end(); ++paIt)
{
os << "\t\tprocessing action: " << DataProcessing::NamesOfProcessingAction[*paIt] << endl;
}
}
++i;
}
//-------------------------------------------------------------
// statistics
//-------------------------------------------------------------
if (getFlag_("s"))
{
//-------------------------------------------------------------
// Content statistics
//-------------------------------------------------------------
Map<String, int> meta_names;
if (in_type == FileTypes::FEATUREXML) //features
{
os << "Number of features: " << feat.size() << endl
<< endl
<< "Ranges:" << endl
<< " retention time: " << String::number(feat.getMin()[Peak2D::RT], 2) << " : " << String::number(feat.getMax()[Peak2D::RT], 2) << endl
<< " mass-to-charge: " << String::number(feat.getMin()[Peak2D::MZ], 2) << " : " << String::number(feat.getMax()[Peak2D::MZ], 2) << endl
<< " intensity: " << String::number(feat.getMinInt(), 2) << " : " << String::number(feat.getMaxInt(), 2) << endl
<< endl;
// Charge distribution
Map<UInt, UInt> charges;
for (Size i = 0; i < feat.size(); ++i)
{
charges[feat[i].getCharge()]++;
}
os << "Charge distribution" << endl;
for (Map<UInt, UInt>::const_iterator it = charges.begin();
it != charges.end(); ++it)
{
os << "charge " << it->first << ": " << it->second << endl;
}
}
else if (in_type == FileTypes::CONSENSUSXML) //consensus features
{
map<Size, UInt> num_consfeat_of_size;
for (ConsensusMap::const_iterator cmit = cons.begin();
cmit != cons.end(); ++cmit)
{
++num_consfeat_of_size[cmit->size()];
}
os << endl << "Number of consensus features:" << endl;
for (map<Size, UInt>::reverse_iterator i = num_consfeat_of_size.rbegin(); i != num_consfeat_of_size.rend(); ++i)
{
os << " of size " << setw(2) << i->first << ": " << setw(6) << i->second << endl;
}
示例2: run
void PoseClusteringShiftSuperimposer::run(const ConsensusMap & map_model, const ConsensusMap & map_scene, TransformationDescription & transformation)
{
typedef ConstRefVector<ConsensusMap> PeakPointerArray_;
typedef Math::LinearInterpolation<double, double> LinearInterpolationType_;
LinearInterpolationType_ shift_hash_;
// OLD STUFF
// LinearInterpolationType_ scaling_hash_1;
// LinearInterpolationType_ scaling_hash_2;
// LinearInterpolationType_ shift_hash_;
// LinearInterpolationType_ rt_high_hash_;
/// Maximum deviation in mz of two partner points
const double mz_pair_max_distance = param_.getValue("mz_pair_max_distance");
/// Size of each shift bucket
const double shift_bucket_size = param_.getValue("shift_bucket_size");
const UInt struc_elem_length_datapoints = 21; // MAGIC ALERT: number of data points in structuring element for tophat filter, which removes baseline from histogram
const double scaling_histogram_crossing_slope = 3.0; // MAGIC ALERT: used when distinguishing noise level and enriched histogram bins
const double scaling_cutoff_stdev_multiplier = 1.5; // MAGIC ALERT: multiplier for stdev in cutoff for outliers
const UInt loops_mean_stdev_cutoff = 3; // MAGIC ALERT: number of loops in stdev cutoff for outliers
startProgress(0, 100, "shift pose clustering");
UInt actual_progress = 0;
setProgress(++actual_progress);
// Optionally, we will write dumps of the hash table buckets.
bool do_dump_buckets = false;
String dump_buckets_basename;
if (param_.getValue("dump_buckets") != "")
{
do_dump_buckets = true;
dump_buckets_basename = param_.getValue("dump_buckets");
}
setProgress(++actual_progress);
// Even more optionally, we will write dumps of the hashed pairs.
bool do_dump_pairs = false;
String dump_pairs_basename;
if (param_.getValue("dump_pairs") != "")
{
do_dump_pairs = true;
dump_pairs_basename = param_.getValue("dump_pairs");
}
setProgress(++actual_progress);
//**************************************************************************
// Select the most abundant data points only. After that, disallow modifications
// (we tend to have annoying issues with const_iterator versus iterator).
PeakPointerArray_ model_map_ini(map_model.begin(), map_model.end());
const PeakPointerArray_ & model_map(model_map_ini);
PeakPointerArray_ scene_map_ini(map_scene.begin(), map_scene.end());
const PeakPointerArray_ & scene_map(scene_map_ini);
{
// truncate the data as necessary
// casting to SignedSize is done on PURPOSE here! (num_used_points will be maximal if -1 is used)
const Size num_used_points = (SignedSize) param_.getValue("num_used_points");
if (model_map_ini.size() > num_used_points)
{
model_map_ini.sortByIntensity(true);
model_map_ini.resize(num_used_points);
}
model_map_ini.sortByComparator(Peak2D::MZLess());
setProgress(++actual_progress);
if (scene_map_ini.size() > num_used_points)
{
scene_map_ini.sortByIntensity(true);
scene_map_ini.resize(num_used_points);
}
scene_map_ini.sortByComparator(Peak2D::MZLess());
setProgress(++actual_progress);
// Note: model_map_ini and scene_map_ini will not be used further below
}
setProgress((actual_progress = 10));
//**************************************************************************
// Preprocessing
// get RT ranges (NOTE: we trust that min and max have been updated in the
// ConsensusMap::convert() method !)
const double model_low = map_model.getMin()[ConsensusFeature::RT];
const double scene_low = map_scene.getMin()[ConsensusFeature::RT];
const double model_high = map_model.getMax()[ConsensusFeature::RT];
const double scene_high = map_scene.getMax()[ConsensusFeature::RT];
// OLD STUFF
// const double rt_low = (maps[0].getMin()[ConsensusFeature::RT] + maps[1].getMin()[ConsensusFeature::RT]) / 2.;
// const double rt_high = (maps[0].getMax()[ConsensusFeature::RT] + maps[1].getMax()[ConsensusFeature::RT]) / 2.;
// Initialize the hash tables: shift_hash_
// OLD STUFF: was: rt_scaling_hash_, rt_low_hash_, and rt_high_hash_
{
// (over)estimate the required number of buckets for shifting
double max_shift = param_.getValue("max_shift");
// actually the largest possible shift can be much smaller, depending on the data
do
{
//.........这里部分代码省略.........