本文整理汇总了C++中TransformationDescription::fitModel方法的典型用法代码示例。如果您正苦于以下问题:C++ TransformationDescription::fitModel方法的具体用法?C++ TransformationDescription::fitModel怎么用?C++ TransformationDescription::fitModel使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TransformationDescription
的用法示例。
在下文中一共展示了TransformationDescription::fitModel方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: adjustRetentionTimes_
void adjustRetentionTimes_(MapType& map, const String& trafo_out,
bool first_file)
{
map.updateRanges();
TransformationDescription trafo;
if (first_file) // no transformation necessary
{
rt_offset_ = map.getMax()[0] + rt_gap_;
trafo.fitModel("identity");
}
else // subsequent file -> apply transformation
{
TransformationDescription::DataPoints points(2);
double rt_min = map.getMin()[0], rt_max = map.getMax()[0];
points[0] = make_pair(rt_min, rt_offset_);
rt_offset_ += rt_max - rt_min;
points[1] = make_pair(rt_max, rt_offset_);
trafo.setDataPoints(points);
trafo.fitModel("linear");
MapAlignmentTransformer::transformRetentionTimes(map, trafo, true);
rt_offset_ += rt_gap_;
}
if (!trafo_out.empty())
{
TransformationXMLFile().store(trafo_out, trafo);
}
}
示例2: align
void MapAlignmentAlgorithmPoseClustering::align(const ConsensusMap & map, TransformationDescription & trafo)
{
// TODO: move this to updateMembers_? (if consensusMap prevails)
// TODO: why does superimposer work on consensus map???
const ConsensusMap & map_model = reference_;
ConsensusMap map_scene = map;
// run superimposer to find the global transformation
TransformationDescription si_trafo;
superimposer_.run(map_model, map_scene, si_trafo);
// apply transformation to consensus features and contained feature
// handles
for (Size j = 0; j < map_scene.size(); ++j)
{
//Calculate new RT
double rt = map_scene[j].getRT();
rt = si_trafo.apply(rt);
//Set RT of consensus feature centroid
map_scene[j].setRT(rt);
//Set RT of consensus feature handles
map_scene[j].begin()->asMutable().setRT(rt);
}
//run pairfinder to find pairs
ConsensusMap result;
//TODO: add another 2map interface to pairfinder?
std::vector<ConsensusMap> input(2);
input[0] = map_model;
input[1] = map_scene;
pairfinder_.run(input, result);
// calculate the local transformation
si_trafo.invert(); // to undo the transformation applied above
TransformationDescription::DataPoints data;
for (ConsensusMap::Iterator it = result.begin(); it != result.end();
++it)
{
if (it->size() == 2) // two matching features
{
ConsensusFeature::iterator feat_it = it->begin();
double y = feat_it->getRT();
double x = si_trafo.apply((++feat_it)->getRT());
// one feature should be from the reference map:
if (feat_it->getMapIndex() != 0)
{
data.push_back(make_pair(x, y));
}
else
{
data.push_back(make_pair(y, x));
}
}
}
trafo = TransformationDescription(data);
trafo.fitModel("linear");
}
示例3: computeTransformations_
//.........这里部分代码省略.........
else // compute overall RT median per sequence (median of medians per run)
{
LOG_DEBUG << "Computing overall RT medians per sequence..." << endl;
// remove peptides that don't occur in enough runs (at least two):
LOG_DEBUG << "Removing peptides that occur in too few runs..." << endl;
SeqToList temp;
for (SeqToList::iterator med_it = medians_per_seq.begin();
med_it != medians_per_seq.end(); ++med_it)
{
if (med_it->second.size() >= min_run_occur_)
{
temp.insert(temp.end(), *med_it);
}
}
LOG_DEBUG << "Removed " << medians_per_seq.size() - temp.size() << " of "
<< medians_per_seq.size() << " peptides." << endl;
temp.swap(medians_per_seq);
computeMedians_(medians_per_seq, reference_);
}
if (reference_.empty())
{
throw Exception::MissingInformation(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "No reference RT information left after filtering");
}
double max_rt_shift = param_.getValue("max_rt_shift");
if (max_rt_shift <= 1)
{
// compute max. allowed shift from overall retention time range:
double rt_min = numeric_limits<double>::infinity(), rt_max = -rt_min;
for (SeqToValue::iterator it = reference_.begin(); it != reference_.end();
++it)
{
rt_min = min(rt_min, it->second);
rt_max = max(rt_max, it->second);
}
double rt_range = rt_max - rt_min;
max_rt_shift *= rt_range;
// in the degenerate case of only one reference point, "max_rt_shift"
// should be zero (because "rt_range" is zero) - this is covered below
}
if (max_rt_shift == 0)
{
max_rt_shift = numeric_limits<double>::max();
}
LOG_DEBUG << "Max. allowed RT shift (in seconds): " << max_rt_shift << endl;
// generate RT transformations:
LOG_DEBUG << "Generating RT transformations..." << endl;
LOG_INFO << "\nAlignment based on:" << endl; // diagnostic output
Size offset = 0; // offset in case of internal reference
for (Int i = 0; i < size + 1; ++i)
{
if (i == reference_index_)
{
// if one of the input maps was used as reference, it has been skipped
// so far - now we have to consider it again:
TransformationDescription trafo;
trafo.fitModel("identity");
transforms.push_back(trafo);
LOG_INFO << "- " << reference_.size() << " data points for sample "
<< i + 1 << " (reference)\n";
offset = 1;
}
if (i >= size) break;
// to be useful for the alignment, a peptide sequence has to occur in the
// current run ("medians_per_run[i]"), but also in at least one other run
// ("medians_overall"):
TransformationDescription::DataPoints data;
Size n_outliers = 0;
for (SeqToValue::iterator med_it = medians_per_run[i].begin();
med_it != medians_per_run[i].end(); ++med_it)
{
SeqToValue::const_iterator pos = reference_.find(med_it->first);
if (pos != reference_.end())
{
if (abs(med_it->second - pos->second) <= max_rt_shift)
{ // found, and satisfies "max_rt_shift" condition!
TransformationDescription::DataPoint point(med_it->second,
pos->second, pos->first);
data.push_back(point);
}
else
{
n_outliers++;
}
}
}
transforms.push_back(TransformationDescription(data));
LOG_INFO << "- " << data.size() << " data points for sample "
<< i + offset + 1;
if (n_outliers) LOG_INFO << " (" << n_outliers << " outliers removed)";
LOG_INFO << "\n";
}
LOG_INFO << endl;
// delete temporary reference
if (!reference_given) reference_.clear();
}
示例4: main_
ExitCodes main_(int, const char**)
{
//-------------------------------------------------------------
// parameter handling
//-------------------------------------------------------------
String in = getStringOption_("in");
String out = getStringOption_("out");
String trafo_in = getStringOption_("trafo_in");
String trafo_out = getStringOption_("trafo_out");
Param model_params = getParam_().copy("model:", true);
String model_type = model_params.getValue("type");
model_params = model_params.copy(model_type + ":", true);
ProgressLogger progresslogger;
progresslogger.setLogType(log_type_);
//-------------------------------------------------------------
// check for valid input
//-------------------------------------------------------------
if (out.empty() && trafo_out.empty())
{
writeLog_("Error: Either a data or a transformation output file has to be provided (parameters 'out'/'trafo_out')");
return ILLEGAL_PARAMETERS;
}
if (in.empty() != out.empty())
{
writeLog_("Error: Data input and output parameters ('in'/'out') must be used together");
return ILLEGAL_PARAMETERS;
}
//-------------------------------------------------------------
// apply transformation
//-------------------------------------------------------------
TransformationXMLFile trafoxml;
TransformationDescription trafo;
trafoxml.load(trafo_in, trafo);
if (model_type != "none")
{
trafo.fitModel(model_type, model_params);
}
if (getFlag_("invert"))
{
trafo.invert();
}
if (!trafo_out.empty())
{
trafoxml.store(trafo_out, trafo);
}
if (!in.empty()) // load input
{
FileTypes::Type in_type = FileHandler::getType(in);
if (in_type == FileTypes::MZML)
{
MzMLFile file;
MSExperiment<> map;
applyTransformation_(in, out, trafo, file, map);
}
else if (in_type == FileTypes::FEATUREXML)
{
FeatureXMLFile file;
FeatureMap map;
applyTransformation_(in, out, trafo, file, map);
}
else if (in_type == FileTypes::CONSENSUSXML)
{
ConsensusXMLFile file;
ConsensusMap map;
applyTransformation_(in, out, trafo, file, map);
}
else if (in_type == FileTypes::IDXML)
{
IdXMLFile file;
vector<ProteinIdentification> proteins;
vector<PeptideIdentification> peptides;
file.load(in, proteins, peptides);
bool store_original_rt = getFlag_("store_original_rt");
MapAlignmentTransformer::transformRetentionTimes(peptides, trafo,
store_original_rt);
// no "data processing" section in idXML
file.store(out, proteins, peptides);
}
}
return EXECUTION_OK;
}
示例5: main_
ExitCodes main_(int, const char**) override
{
ExitCodes ret = TOPPMapAlignerBase::checkParameters_();
if (ret != EXECUTION_OK) return ret;
MapAlignmentAlgorithmPoseClustering algorithm;
Param algo_params = getParam_().copy("algorithm:", true);
algorithm.setParameters(algo_params);
algorithm.setLogType(log_type_);
StringList in_files = getStringList_("in");
StringList out_files = getStringList_("out");
StringList out_trafos = getStringList_("trafo_out");
Size reference_index = getIntOption_("reference:index");
String reference_file = getStringOption_("reference:file");
FileTypes::Type in_type = FileHandler::getType(in_files[0]);
String file;
if (!reference_file.empty())
{
file = reference_file;
reference_index = in_files.size(); // points to invalid index
}
else if (reference_index > 0) // normal reference (index was checked before)
{
file = in_files[--reference_index]; // ref. index is 1-based in parameters, but should be 0-based here
}
else if (reference_index == 0) // no reference given
{
LOG_INFO << "Picking a reference (by size) ..." << std::flush;
// use map with highest number of features as reference:
Size max_count(0);
FeatureXMLFile f;
for (Size i = 0; i < in_files.size(); ++i)
{
Size s = 0;
if (in_type == FileTypes::FEATUREXML)
{
s = f.loadSize(in_files[i]);
}
else if (in_type == FileTypes::MZML) // this is expensive!
{
PeakMap exp;
MzMLFile().load(in_files[i], exp);
exp.updateRanges(1);
s = exp.getSize();
}
if (s > max_count)
{
max_count = s;
reference_index = i;
}
}
LOG_INFO << " done" << std::endl;
file = in_files[reference_index];
}
FeatureXMLFile f_fxml;
if (out_files.empty()) // no need to store featureXML, thus we can load only minimum required information
{
f_fxml.getOptions().setLoadConvexHull(false);
f_fxml.getOptions().setLoadSubordinates(false);
}
if (in_type == FileTypes::FEATUREXML)
{
FeatureMap map_ref;
FeatureXMLFile f_fxml_tmp; // for the reference, we never need CH or subordinates
f_fxml_tmp.getOptions().setLoadConvexHull(false);
f_fxml_tmp.getOptions().setLoadSubordinates(false);
f_fxml_tmp.load(file, map_ref);
algorithm.setReference(map_ref);
}
else if (in_type == FileTypes::MZML)
{
PeakMap map_ref;
MzMLFile().load(file, map_ref);
algorithm.setReference(map_ref);
}
ProgressLogger plog;
plog.setLogType(log_type_);
plog.startProgress(0, in_files.size(), "Aligning input maps");
Size progress(0); // thread-safe progress
// TODO: it should all work on featureXML files, since we might need them for output anyway. Converting to consensusXML is just wasting memory!
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic, 1)
#endif
for (int i = 0; i < static_cast<int>(in_files.size()); ++i)
{
TransformationDescription trafo;
if (in_type == FileTypes::FEATUREXML)
{
FeatureMap map;
// workaround for loading: use temporary FeatureXMLFile since it is not thread-safe
FeatureXMLFile f_fxml_tmp; // do not use OMP-firstprivate, since FeatureXMLFile has no copy c'tor
f_fxml_tmp.getOptions() = f_fxml.getOptions();
f_fxml_tmp.load(in_files[i], map);
if (i == static_cast<int>(reference_index)) trafo.fitModel("identity");
//.........这里部分代码省略.........
示例6: main_
ExitCodes main_(int, const char **)
{
StringList file_list = getStringList_("in");
String tr_file_str = getStringOption_("tr");
String out = getStringOption_("out");
bool is_swath = getFlag_("is_swath");
bool ppm = getFlag_("ppm");
bool extract_MS1 = getFlag_("extract_MS1");
double min_upper_edge_dist = getDoubleOption_("min_upper_edge_dist");
double mz_extraction_window = getDoubleOption_("mz_window");
double rt_extraction_window = getDoubleOption_("rt_window");
String extraction_function = getStringOption_("extraction_function");
// If we have a transformation file, trafo will transform the RT in the
// scoring according to the model. If we dont have one, it will apply the
// null transformation.
String trafo_in = getStringOption_("rt_norm");
TransformationDescription trafo;
if (trafo_in.size() > 0)
{
TransformationXMLFile trafoxml;
String model_type = getStringOption_("model:type");
Param model_params = getParam_().copy("model:", true);
trafoxml.load(trafo_in, trafo);
trafo.fitModel(model_type, model_params);
}
TransformationDescription trafo_inverse = trafo;
trafo_inverse.invert();
const char * tr_file = tr_file_str.c_str();
MapType out_exp;
std::vector< OpenMS::MSChromatogram > chromatograms;
TraMLFile traml;
OpenMS::TargetedExperiment targeted_exp;
std::cout << "Loading TraML file" << std::endl;
traml.load(tr_file, targeted_exp);
std::cout << "Loaded TraML file" << std::endl;
// Do parallelization over the different input files
// Only in OpenMP 3.0 are unsigned loop variables allowed
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(file_list.size()); ++i)
{
boost::shared_ptr<PeakMap > exp(new PeakMap);
MzMLFile f;
// Logging and output to the console
// IF_MASTERTHREAD f.setLogType(log_type_);
// Find the transitions to extract and extract them
MapType tmp_out;
OpenMS::TargetedExperiment transition_exp_used;
f.load(file_list[i], *exp);
if (exp->empty() ) { continue; } // if empty, go on
OpenSwath::SpectrumAccessPtr expptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(exp);
bool do_continue = true;
if (is_swath)
{
do_continue = OpenSwathHelper::checkSwathMapAndSelectTransitions(*exp, targeted_exp, transition_exp_used, min_upper_edge_dist);
}
else
{
transition_exp_used = targeted_exp;
}
#ifdef _OPENMP
#pragma omp critical (OpenSwathChromatogramExtractor_metadata)
#endif
// after loading the first file, copy the meta data from that experiment
// this may happen *after* chromatograms were already added to the
// output, thus we do NOT fill the experiment here but rather store all
// the chromatograms in the "chromatograms" array and store them in
// out_exp afterwards.
if (i == 0)
{
out_exp = *exp;
out_exp.clear(false);
}
std::cout << "Extracting " << transition_exp_used.getTransitions().size() << " transitions" << std::endl;
std::vector< OpenSwath::ChromatogramPtr > chromatogram_ptrs;
std::vector< ChromatogramExtractor::ExtractionCoordinates > coordinates;
// continue if the map is not empty
if (do_continue)
{
// Prepare the coordinates (with or without rt extraction) and then extract the chromatograms
ChromatogramExtractor extractor;
if (rt_extraction_window < 0)
{
extractor.prepare_coordinates(chromatogram_ptrs, coordinates, transition_exp_used, rt_extraction_window, extract_MS1);
}
else
{
//.........这里部分代码省略.........
示例7: computeTransformations_
//.........这里部分代码省略.........
SeqToValue temp;
SeqToValue::iterator pos = temp.begin(); // to prevent segfault below
for (SeqToValue::iterator ref_it = reference_.begin();
ref_it != reference_.end(); ++ref_it)
{
SeqToList::iterator med_it = medians_per_seq.find(ref_it->first);
if ((med_it != medians_per_seq.end()) &&
(med_it->second.size() + 1 >= min_run_occur_))
{
temp.insert(pos, *ref_it);
pos = --temp.end(); // would cause segfault if "temp" was empty
}
}
temp.swap(reference_);
}
else // compute overall RT median per sequence (median of medians per run)
{
LOG_DEBUG << "Computing overall RT medians per sequence..." << endl;
// remove peptides that don't occur in enough runs (at least two):
LOG_DEBUG << "Removing peptides that occur in too few runs..." << endl;
SeqToList temp;
SeqToList::iterator pos = temp.begin(); // to prevent segfault below
for (SeqToList::iterator med_it = medians_per_seq.begin();
med_it != medians_per_seq.end(); ++med_it)
{
if (med_it->second.size() >= min_run_occur_)
{
temp.insert(pos, *med_it);
pos = --temp.end(); // would cause segfault if "temp" was empty
}
}
temp.swap(medians_per_seq);
computeMedians_(medians_per_seq, reference_);
}
DoubleReal max_rt_shift = param_.getValue("max_rt_shift");
if (max_rt_shift == 0)
{
max_rt_shift = numeric_limits<DoubleReal>::max();
}
else if (max_rt_shift <= 1) // compute max. allowed shift from overall retention time range:
{
DoubleReal rt_range, rt_min = reference_.begin()->second,
rt_max = rt_min;
for (SeqToValue::iterator it = ++reference_.begin();
it != reference_.end(); ++it)
{
rt_min = min(rt_min, it->second);
rt_max = max(rt_max, it->second);
}
rt_range = rt_max - rt_min;
max_rt_shift *= rt_range;
}
LOG_DEBUG << "Max. allowed RT shift (in seconds): " << max_rt_shift << endl;
// generate RT transformations:
LOG_DEBUG << "Generating RT transformations..." << endl;
LOG_INFO << "\nAlignment based on:" << endl; // diagnostic output
for (Size i = 0, offset = 0; i < size + 1; ++i)
{
if (i == reference_index_ - 1)
{
// if one of the input maps was used as reference, it has been skipped
// so far - now we have to consider it again:
TransformationDescription trafo;
trafo.fitModel("identity");
transforms.push_back(trafo);
LOG_INFO << "- 0 data points for sample " << i + 1 << " (reference)\n";
offset = 1;
}
if (i >= size)
break;
// to be useful for the alignment, a peptide sequence has to occur in the
// current run ("medians_per_run[i]"), but also in at least one other run
// ("medians_overall"):
TransformationDescription::DataPoints data;
for (SeqToValue::iterator med_it = medians_per_run[i].begin();
med_it != medians_per_run[i].end(); ++med_it)
{
SeqToValue::const_iterator pos = reference_.find(med_it->first);
if ((pos != reference_.end()) &&
(fabs(med_it->second - pos->second) <= max_rt_shift))
{ // found, and satisfies "max_rt_shift" condition!
data.push_back(make_pair(med_it->second, pos->second));
}
}
transforms.push_back(TransformationDescription(data));
LOG_INFO << "- " << data.size() << " data points for sample "
<< i + offset + 1 << "\n";
}
LOG_INFO << endl;
// delete temporary reference
if (!reference_given)
reference_.clear();
}
示例8: run
//.........这里部分代码省略.........
}
while (0);
setProgress(++actual_progress);
// apply freq_cutoff, setting smaller values to zero
for (Size index = 0; index < shift_hash_.getData().size(); ++index)
{
if (shift_hash_.getData()[index] < freq_cutoff_low)
{
shift_hash_.getData()[index] = 0;
}
}
setProgress(++actual_progress);
// optionally, dump after noise filtering using freq_cutoff
if (do_dump_buckets)
{
dump_buckets_file << "# after freq_cutoff, which is: " << freq_cutoff_low << '\n';
for (Size index = 0; index < shift_hash_.getData().size(); ++index)
{
const double image = shift_hash_.index2key(index);
const double height = shift_hash_.getData()[index];
dump_buckets_file << filtering_stage << '\t' << index << '\t' << image << '\t' << height << '\n';
}
dump_buckets_file << '\n';
}
setProgress(++actual_progress);
// iterative cut-off based on mean and stdev - relies upon scaling_cutoff_stdev_multiplier which is a bit hard to set right.
{
Math::BasicStatistics<double> statistics;
std::vector<double>::const_iterator data_begin = shift_hash_.getData().begin();
const Size data_size = shift_hash_.getData().size();
Size data_range_begin = 0;
Size data_range_end = data_size;
for (UInt loop = 0; loop < loops_mean_stdev_cutoff; ++loop) // MAGIC ALERT: number of loops
{
statistics.update(data_begin + data_range_begin, data_begin + data_range_end);
double mean = statistics.mean() + data_range_begin;
double stdev = sqrt(statistics.variance());
data_range_begin = floor(std::max<double>(mean - scaling_cutoff_stdev_multiplier * stdev, 0));
data_range_end = ceil(std::min<double>(mean + scaling_cutoff_stdev_multiplier * stdev + 1, data_size));
const double outside_mean = shift_hash_.index2key(mean);
const double outside_stdev = stdev * shift_hash_.getScale();
// shift_low = (outside_mean - outside_stdev);
shift_centroid = (outside_mean);
// shift_high = (outside_mean + outside_stdev);
if (do_dump_buckets)
{
dump_buckets_file << "# loop: " << loop << " mean: " << outside_mean << " stdev: " << outside_stdev << " (mean-stdev): "
<< outside_mean - outside_stdev << " (mean+stdev): " << outside_mean + outside_stdev
<< " data_range_begin: " << data_range_begin << " data_range_end: "
<< data_range_end << std::endl;
}
}
setProgress(++actual_progress);
}
if (do_dump_buckets)
{
dump_buckets_file << "# EOF" << std::endl;
dump_buckets_file.close();
}
setProgress(80);
}
while (0);
//************************************************************************************
// Estimate transform
// Compute the shifts at the low and high ends by looking at (around) the fullest bins.
double intercept;
#if 1 // yes of course, use centroids for images of rt_low and rt_high
intercept = shift_centroid;
#else // ooh, use maximum bins instead (Note: this is a fossil which would disregard most of the above computations! The code is left here for developers/debugging only.)
const Size rt_low_max_index = std::distance(shift_hash_.getData().begin(),
std::max_element(shift_hash_.getData().begin(), shift_hash_.getData().end()));
intercept = shift_hash_.index2key(rt_low_max_index);
#endif
VV_(intercept);
setProgress(++actual_progress);
// set trafo
{
Param params;
params.setValue("slope", 1.0);
params.setValue("intercept", intercept);
TransformationDescription trafo;
trafo.fitModel("linear", params);
transformation = trafo;
}
setProgress(++actual_progress);
endProgress();
return;
} // run()