本文整理汇总了C++中ConsensusMap::back方法的典型用法代码示例。如果您正苦于以下问题:C++ ConsensusMap::back方法的具体用法?C++ ConsensusMap::back怎么用?C++ ConsensusMap::back使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ConsensusMap
的用法示例。
在下文中一共展示了ConsensusMap::back方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: run
//.........这里部分代码省略.........
{
nn_distance_0[fi0].second = distance;
}
}
// update entries for map 1:
if (distance < nn_distance_1[fi1].second)
{
if (valid && (distance < nn_distance_1[fi1].first))
{
nn_distance_1[fi1].second = nn_distance_1[fi1].first;
nn_distance_1[fi1].first = distance;
nn_index_1[fi1] = fi0;
}
else
{
nn_distance_1[fi1].second = distance;
}
}
}
}
// if features from the two maps are nearest neighbors of each other, they
// can become a pair:
for (UInt fi0 = 0; fi0 < input_maps[0].size(); ++fi0)
{
UInt fi1 = nn_index_0[fi0]; // nearest neighbor of "fi0" in map 1
// cout << "index: " << fi0 << ", RT: " << input_maps[0][fi0].getRT()
// << ", MZ: " << input_maps[0][fi0].getMZ() << endl
// << "neighbor: " << fi1 << ", RT: " << input_maps[1][fi1].getRT()
// << ", MZ: " << input_maps[1][fi1].getMZ() << endl
// << "d(i,j): " << nn_distance_0[fi0].first << endl
// << "d2(i): " << nn_distance_0[fi0].second << endl
// << "d2(j): " << nn_distance_1[fi1].second << endl;
// criteria set by the parameters must be fulfilled:
if ((nn_distance_0[fi0].first < FeatureDistance::infinity) &&
(nn_distance_0[fi0].first * second_nearest_gap_ <= nn_distance_0[fi0].second))
{
// "fi0" satisfies constraints...
if ((nn_index_1[fi1] == fi0) &&
(nn_distance_1[fi1].first * second_nearest_gap_ <= nn_distance_1[fi1].second))
{
// ...nearest neighbor of "fi0" also satisfies constraints (yay!)
// cout << "match!" << endl;
result_map.push_back(ConsensusFeature());
ConsensusFeature& f = result_map.back();
f.insert(input_maps[0][fi0]);
f.getPeptideIdentifications().insert(f.getPeptideIdentifications().end(),
input_maps[0][fi0].getPeptideIdentifications().begin(),
input_maps[0][fi0].getPeptideIdentifications().end());
f.insert(input_maps[1][fi1]);
f.getPeptideIdentifications().insert(f.getPeptideIdentifications().end(),
input_maps[1][fi1].getPeptideIdentifications().begin(),
input_maps[1][fi1].getPeptideIdentifications().end());
f.computeConsensus();
double quality = 1.0 - nn_distance_0[fi0].first;
double quality0 = 1.0 - nn_distance_0[fi0].first * second_nearest_gap_ / nn_distance_0[fi0].second;
double quality1 = 1.0 - nn_distance_1[fi1].first * second_nearest_gap_ / nn_distance_1[fi1].second;
quality = quality * quality0 * quality1; // TODO other formula?
// incorporate existing quality values:
Size size0 = max(input_maps[0][fi0].size(), size_t(1));
Size size1 = max(input_maps[1][fi1].size(), size_t(1));
// quality contribution from first map:
quality0 = input_maps[0][fi0].getQuality() * (size0 - 1);
// quality contribution from second map:
quality1 = input_maps[1][fi1].getQuality() * (size1 - 1);
f.setQuality((quality + quality0 + quality1) / (size0 + size1 - 1));
is_singleton[0][fi0] = false;
is_singleton[1][fi1] = false;
}
}
}
// write out unmatched consensus features
for (UInt input = 0; input <= 1; ++input)
{
for (UInt index = 0; index < input_maps[input].size(); ++index)
{
if (is_singleton[input][index])
{
result_map.push_back(input_maps[input][index]);
if (result_map.back().size() < 2) // singleton consensus feature
{
result_map.back().setQuality(0.0);
}
}
}
}
// canonical ordering for checking the results, and the ids have no real meaning anyway
result_map.sortByMZ();
// protein IDs and unassigned peptide IDs are added to the result by the
// FeatureGroupingAlgorithm!
}
示例2: run
void LabeledPairFinder::run(const vector<ConsensusMap>& input_maps, ConsensusMap& result_map)
{
if (input_maps.size() != 1)
throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "exactly one input map required");
if (result_map.getFileDescriptions().size() != 2)
throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "two file descriptions required");
if (result_map.getFileDescriptions().begin()->second.filename != result_map.getFileDescriptions().rbegin()->second.filename)
throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "the two file descriptions have to contain the same file name");
checkIds_(input_maps);
//look up the light and heavy index
Size light_index = numeric_limits<Size>::max();
Size heavy_index = numeric_limits<Size>::max();
for (ConsensusMap::FileDescriptions::const_iterator it = result_map.getFileDescriptions().begin();
it != result_map.getFileDescriptions().end();
++it)
{
if (it->second.label == "heavy")
{
heavy_index = it->first;
}
else if (it->second.label == "light")
{
light_index = it->first;
}
}
if (light_index == numeric_limits<Size>::max() || heavy_index == numeric_limits<Size>::max())
{
throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "the input maps have to be labeled 'light' and 'heavy'");
}
result_map.clear(false);
// sort consensus features by RT (and MZ) to speed up searching afterwards
typedef ConstRefVector<ConsensusMap> RefMap;
RefMap model_ref(input_maps[0].begin(), input_maps[0].end());
model_ref.sortByPosition();
//calculate matches
ConsensusMap matches;
//settings
double rt_pair_dist = param_.getValue("rt_pair_dist");
double rt_dev_low = param_.getValue("rt_dev_low");
double rt_dev_high = param_.getValue("rt_dev_high");
double mz_dev = param_.getValue("mz_dev");
DoubleList mz_pair_dists = param_.getValue("mz_pair_dists");
bool mrm = param_.getValue("mrm").toBool();
//estimate RT parameters
if (param_.getValue("rt_estimate") == "true")
{
//find all possible RT distances of features with the same charge and a good m/z distance
vector<double> dists;
dists.reserve(model_ref.size());
for (RefMap::const_iterator it = model_ref.begin(); it != model_ref.end(); ++it)
{
for (RefMap::const_iterator it2 = model_ref.begin(); it2 != model_ref.end(); ++it2)
{
for (DoubleList::const_iterator dist_it = mz_pair_dists.begin(); dist_it != mz_pair_dists.end(); ++dist_it)
{
double mz_pair_dist = *dist_it;
if (it2->getCharge() == it->getCharge()
&& it2->getMZ() >= it->getMZ() + mz_pair_dist / it->getCharge() - mz_dev
&& it2->getMZ() <= it->getMZ() + mz_pair_dist / it->getCharge() + mz_dev)
{
dists.push_back(it2->getRT() - it->getRT());
}
}
}
}
if (dists.empty())
{
cout << "Warning: Could not find pairs for RT distance estimation. The manual settings are used!" << endl;
}
else
{
if (dists.size() < 50)
{
cout << "Warning: Found only " << dists.size() << " pairs. The estimated shift and std deviation are probably not reliable!" << endl;
}
//--------------------------- estimate initial parameters of fit ---------------------------
GaussFitter::GaussFitResult result(-1, -1, -1);
//first estimate of the optimal shift: median of the distances
sort(dists.begin(), dists.end());
Size median_index = dists.size() / 2;
result.x0 = dists[median_index];
//create histogram of distances
//consider only the maximum of pairs, centered around the optimal shift
Size max_pairs = model_ref.size() / 2;
Size start_index = (Size) max((SignedSize)0, (SignedSize)(median_index - max_pairs / 2));
Size end_index = (Size) min((SignedSize)(dists.size() - 1), (SignedSize)(median_index + max_pairs / 2));
double start_value = dists[start_index];
double end_value = dists[end_index];
double bin_step = fabs(end_value - start_value) / 99.999; //ensure that we have 100 bins
Math::Histogram<> hist(start_value, end_value, bin_step);
//std::cout << "HIST from " << start_value << " to " << end_value << " (bin size " << bin_step << ")" << endl;
for (Size i = start_index; i <= end_index; ++i)
{
hist.inc(dists[i]);
}
//.........这里部分代码省略.........