当前位置: 首页>>代码示例>>C++>>正文


C++ TRandom::Poisson方法代码示例

本文整理汇总了C++中TRandom::Poisson方法的典型用法代码示例。如果您正苦于以下问题:C++ TRandom::Poisson方法的具体用法?C++ TRandom::Poisson怎么用?C++ TRandom::Poisson使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在TRandom的用法示例。


在下文中一共展示了TRandom::Poisson方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: run

// Throw toy experiments to predict observed yields:
// - Throw nPredictions mean values (prediction) from the background estimates
//   considering their uncertainties
//   - Per prediction, throw nExperiments observed yields from a Poisson
//     distribution with mean value prediction
void ToyExperiments::run(unsigned int nPredictions, unsigned int nExperiments) const {
  std::cout << "Predicting event yields in " << Parameters::nBins() << " bins from " << nPredictions*nExperiments << " toy experiments ...  " << std::flush;

  // Minimal value (in standard deviations) allowed for
  //correlated fluctuation
  const double minCorr = findMinValidRandomNumberForCorrelatedUncertainties();

  // Throw mean values
  for(unsigned int p = 0; p < nPredictions; ++p) {
    // Throw one (normalized) random number for correlated
    // uncertainties that is valid in all bins
    double rCorr = rand_->Gaus(0.,1.);
    while( rCorr <= minCorr ) {
      rCorr = rand_->Gaus(0.,1.);
    }

    // Loop over all bins and get individual predictions
    for(unsigned int bin = 0; bin < Parameters::nBins(); ++bin) {
      double prediction = -1.;
      bool negativePrediction = true;
      while( negativePrediction ) {
	// Throw one (normalized) random number for uncorrelated
	// uncertainties that is valid in this bin only
	double rUncorr = rand_->Gaus(0.,1.);
	
	// Scale the normalized random numbers by the uncertainties' size
	// to obtain variation of yield
	double uncorrVar = rUncorr * uncorrelatedUncerts_.at(bin);
	double corrVar   = rCorr   * correlatedUncerts_.at(bin);
	
	// Add variations to yield
	prediction = meanPredictions_.at(bin) + uncorrVar + corrVar;
	
	// Check if prediction is positive
	if( prediction >= 0. ) {
	  negativePrediction = false;
	}
      }

      // Throw predicted yields from Poisson with
      // this mean
      for(unsigned int e = 0; e < nExperiments; ++e) {
	predictedYields_.at(bin)->Fill(rand_->Poisson(prediction));
      }
    }
  }
  std::cout << "ok" << std::endl;

  for(unsigned int bin = 0; bin < Parameters::nBins(); ++bin) {
    if( predictedYields_.at(bin)->GetBinContent(Parameters::maxYield()+1) > 0 ) {
      std::cerr << "\n\nWARNING: Overflows in yield histograms!" << std::endl;
      std::cerr << "This is probably safe, but better increase Parameters::maxYield.\n\n" << std::endl;
    }
  }
}
开发者ID:mschrode,项目名称:UserCode,代码行数:60,代码来源:computeSignificance.C

示例2: printGlobalPValueOfLocalFluctuation

// PValue for finding a local p-value as observed in 'bin' or worse 
void ToyExperiments::printGlobalPValueOfLocalFluctuation(unsigned int bin, unsigned int nExperiments) const {
  std::cout << "Determining global p-value for observed fluctuation in bin " << bin << " from " << nExperiments << " toy experiments ...  " << std::flush;

  // Find the predicted yields that correspond
  // to the local p-value 'localPValue'
  std::vector<unsigned int> limitYields = yields(localPValue(bin,observedYields_.at(bin)));

  TH1* hIsAbovePValue = new TH1D("hIsAbovePValue","",2,0,2);

  const double minCorr = findMinValidRandomNumberForCorrelatedUncertainties();

  for(unsigned int p = 0; p < nExperiments; ++p) {
    bool isAbovePValue = false;
    double rCorr = rand_->Gaus(0.,1.);
    while( rCorr <= minCorr ) {
      rCorr = rand_->Gaus(0.,1.);
    }
    for(unsigned int b = 0; b < Parameters::nBins(); ++b) {
      double prediction = -1.;
      bool negativePrediction = true;
      while( negativePrediction ) {
	double rUncorr = rand_->Gaus(0.,1.);
	double uncorrVar = rUncorr * uncorrelatedUncerts_.at(b);
	double corrVar   = rCorr   * correlatedUncerts_.at(b);
	prediction = meanPredictions_.at(b) + uncorrVar + corrVar;
	if( prediction >= 0. ) {
	  negativePrediction = false;
	}
      }
      double predictedYield = rand_->Poisson(prediction);
      if( predictedYield >= limitYields.at(b) ) {
	isAbovePValue = true;
	break;
      }      
    }
    if( isAbovePValue ) {
      hIsAbovePValue->Fill(1);
    } else {
      hIsAbovePValue->Fill(0);
    }
  }
  std::cout << "ok" << std::endl;

  double lpv      = localPValue(bin,observedYields_.at(bin));
  double gpUncorr = 1. - pow(1.-lpv,Parameters::nBins());
  double gpCorr   = hIsAbovePValue->Integral(2,2)/hIsAbovePValue->Integral(1,2);

  std::cout << "\n\n----- Global p-value for observed fluctuation in bin " << bin << " -----" << std::endl;
  std::cout << "  local p-value                           : " << lpv << " (" << TMath::NormQuantile(1.-lpv) << "sig)" << std::endl;
  std::cout << "  global p-value (without correlations)   : " << gpUncorr  << " (" << TMath::NormQuantile(1.-gpUncorr) << "sig)" << std::endl;
  std::cout << "  global p-value (including correlations) : " << gpCorr  << " (" << TMath::NormQuantile(1.-gpCorr) << "sig)" << std::endl;
}
开发者ID:mschrode,项目名称:UserCode,代码行数:53,代码来源:computeSignificance.C

示例3: write

double write(int n) {



  TRandom R;
  TStopwatch timer;


  TFile f1("mathcoreLV.root","RECREATE");

  // create tree
  TTree t1("t1","Tree with new LorentzVector");

  std::vector<ROOT::Math::XYZTVector>  tracks;
  std::vector<ROOT::Math::XYZTVector> * pTracks = &tracks;
  t1.Branch("tracks","std::vector<ROOT::Math::LorentzVector<ROOT::Math::PxPyPzE4D<double> > >",&pTracks);

  double M = 0.13957;  // set pi+ mass

  timer.Start();
  double sum = 0;
  for (int i = 0; i < n; ++i) {
    int nPart = R.Poisson(5);
    pTracks->clear();
    pTracks->reserve(nPart);
    for (int j = 0; j < nPart; ++j) {
      double px = R.Gaus(0,10);
      double py = R.Gaus(0,10);
      double pt = sqrt(px*px +py*py);
      double eta = R.Uniform(-3,3);
      double phi = R.Uniform(0.0 , 2*TMath::Pi() );
      RhoEtaPhiVector vcyl( pt, eta, phi);
      // set energy
      double E = sqrt( vcyl.R()*vcyl.R() + M*M);
      XYZTVector q( vcyl.X(), vcyl.Y(), vcyl.Z(), E);
      // fill track vector
      pTracks->push_back(q);
      // evaluate sum of components to check
      sum += q.x()+q.y()+q.z()+q.t();
    }
    t1.Fill();
  }

  f1.Write();
  timer.Stop();
  std::cout << " Time for new Vector " << timer.RealTime() << "  " << timer.CpuTime() << std::endl;

  t1.Print();
  return sum;
}
开发者ID:digideskio,项目名称:root,代码行数:50,代码来源:mathcoreVectorCollection.C

示例4: error

double error(double a, double b) {

 TRandom rndm;
 rndm.SetSeed(12345);
 
 if (a<=0.000000001) return 0.;

 double eff = a/b;
 int n0 = rndm.Poisson((a+b));
 // int n1 = rndm.Binomial(n0,eff);

 double err=sqrt(eff*(1-eff)/n0);

 return err;
}
开发者ID:bluejelibaby,项目名称:AnalysisV2,代码行数:15,代码来源:makeEtaPlots_Signal_andW.C

示例5: FitBias

void FitBias(TString CAT,TString CUT,float SIG,float BKG,int NTOYS)
{
  gROOT->ForceStyle();
  
  RooMsgService::instance().setSilentMode(kTRUE);
  RooMsgService::instance().setStreamStatus(0,kFALSE);
  RooMsgService::instance().setStreamStatus(1,kFALSE);
  
  // -----------------------------------------
  TFile *fTemplates = TFile::Open("templates_"+CUT+"_"+CAT+"_workspace.root");
  RooWorkspace *wTemplates = (RooWorkspace*)fTemplates->Get("w");
  RooRealVar *x            = (RooRealVar*)wTemplates->var("mTop");
  RooAbsPdf *pdf_signal    = (RooAbsPdf*)wTemplates->pdf("ttbar_pdf_Nominal");
  RooAbsPdf *pdf_bkg       = (RooAbsPdf*)wTemplates->pdf("qcdCor_pdf"); 
  TRandom *rnd = new TRandom();
  rnd->SetSeed(0);
  x->setBins(250);   
  RooPlot *frame;

  TFile *outf;

  if (NTOYS > 1) { 
    outf = TFile::Open("FitBiasToys_"+CUT+"_"+CAT+".root","RECREATE");
  }

  float nSigInj,nBkgInj,nSigFit,nBkgFit,eSigFit,eBkgFit,nll;

  TTree *tr = new TTree("toys","toys");
  
  tr->Branch("nSigInj",&nSigInj,"nSigInj/F");
  tr->Branch("nSigFit",&nSigFit,"nSigFit/F");
  tr->Branch("nBkgInj",&nBkgInj,"nBkgInj/F");
  tr->Branch("nBkgFit",&nBkgFit,"nBkgFit/F");
  tr->Branch("eSigFit",&eSigFit,"eSigFit/F");
  tr->Branch("eBkgFit",&eBkgFit,"eBkgFit/F");
  tr->Branch("nll"    ,&nll    ,"nll/F");

  for(int itoy=0;itoy<NTOYS;itoy++) {
    // generate pseudodataset
    nSigInj = rnd->Poisson(SIG);
    nBkgInj = rnd->Poisson(BKG);
    RooRealVar *nSig = new RooRealVar("nSig","nSig",nSigInj);
    RooRealVar *nBkg = new RooRealVar("nBkg","nBkg",nBkgInj);
    RooAddPdf *model = new RooAddPdf("model","model",RooArgList(*pdf_signal,*pdf_bkg),RooArgList(*nSig,*nBkg)); 
    RooDataSet *data = model->generate(*x,nSigInj+nBkgInj);
    
    RooDataHist *roohist = new RooDataHist("roohist","roohist",RooArgList(*x),*data);
    // build fit model
    RooRealVar *nFitSig = new RooRealVar("nFitSig","nFitSig",SIG,0,10*SIG);
    RooRealVar *nFitBkg = new RooRealVar("nFitBkg","nFitBkg",BKG,0,10*BKG);
    RooAddPdf *modelFit = new RooAddPdf("modelFit","modelFit",RooArgList(*pdf_signal,*pdf_bkg),RooArgList(*nFitSig,*nFitBkg)); 
    // fit the pseudo dataset
    RooFitResult *res = modelFit->fitTo(*roohist,RooFit::Save(),RooFit::Extended(kTRUE));
    //res->Print();
    nSigFit = nFitSig->getVal();
    nBkgFit = nFitBkg->getVal();
    eSigFit = nFitSig->getError();
    eBkgFit = nFitBkg->getError();
    nll     = res->minNll();
    tr->Fill();
    if (itoy % 100 == 0) {
      cout<<"Toy #"<<itoy<<": injected = "<<nSigInj<<", fitted = "<<nSigFit<<", error = "<<eSigFit<<endl;
    }
    if (NTOYS == 1) {
      frame = x->frame();
      roohist->plotOn(frame); 
      model->plotOn(frame);
    }
  }
  if (NTOYS == 1) {
    TCanvas *can = new TCanvas("Toy","Toy",900,600);
    frame->Draw();
  }  
  else {
    outf->cd();
    tr->Write();
    outf->Close();
    fTemplates->Close();
  }  
}
开发者ID:kkousour,项目名称:UserCode,代码行数:80,代码来源:FitBias.C

示例6: principal

void principal(Int_t n=10, Int_t m=10000) 
{
  // 
  // Principal Components Analysis (PCA) example
  // 
  // Example of using TPrincipal as a stand alone class. 
  // 
  // We create n-dimensional data points, where c = trunc(n / 5) + 1
  // are  correlated with the rest n - c randomly distributed variables. 
  //
  // Here's the plot of the eigenvalues Begin_Html
  // <IMG SRC="gif/principal_eigen.gif">
  // End_Html
  //Authors: Rene Brun, Christian Holm Christensen
    
  Int_t c = n / 5 + 1;

  cout << "*************************************************" << endl; 
  cout << "*         Principal Component Analysis          *" << endl;
  cout << "*                                               *" << endl;
  cout << "*  Number of variables:           " << setw(4) << n 
       << "          *" << endl;
  cout << "*  Number of data points:         " << setw(8) << m
       << "      *" << endl;
  cout << "*  Number of dependent variables: " << setw(4) << c
       << "          *" << endl;
  cout << "*                                               *" << endl;
  cout << "*************************************************" << endl; 
  
      
  // Initilase the TPrincipal object. Use the empty string for the
  // final argument, if you don't wan't the covariance
  // matrix. Normalising the covariance matrix is a good idea if your
  // variables have different orders of magnitude. 
  TPrincipal* principal = new TPrincipal(n,"ND");
  
  // Use a pseudo-random number generator
  TRandom* random = new TRandom;
  
  // Make the m data-points
  // Make a variable to hold our data
  // Allocate memory for the data point
  Double_t* data = new Double_t[n];
  for (Int_t i = 0; i < m; i++) {

    // First we create the un-correlated, random variables, according
    // to one of three distributions 
    for (Int_t j = 0; j < n - c; j++) {
       if (j % 3 == 0)
          data[j] = random->Gaus(5,1);
       else if (j % 3 == 1)
          data[j] = random->Poisson(8);
       else
          data[j] = random->Exp(2);
    }

    // Then we create the correlated variables
    for (Int_t j = 0 ; j < c; j++) {
       data[n - c + j] = 0;
       for (Int_t k = 0; k < n - c - j; k++)
          data[n - c + j] += data[k];
    }
    
    // Finally we're ready to add this datapoint to the PCA
    principal->AddRow(data);
  }
    
  // We delete the data after use, since TPrincipal got it by now. 
  delete [] data;
  
  // Do the actual analysis
  principal->MakePrincipals();
  
  // Print out the result on
  principal->Print();

  // Test the PCA 
  principal->Test();

  // Make some histograms of the orginal, principal, residue, etc data 
  principal->MakeHistograms();
  
  // Make two functions to map between feature and pattern space 
  principal->MakeCode();

  // Start a browser, so that we may browse the histograms generated
  // above 
  TBrowser* b = new TBrowser("principalBrowser", principal);
  
}
开发者ID:chunjie-sam-liu,项目名称:genome_resequencing_pipeline,代码行数:90,代码来源:principal.C

示例7: printGlobalPValueOfObservation

// The probability to find a value of the test statistic equal or
// worse than the one obtained with the observed yields
void ToyExperiments::printGlobalPValueOfObservation(unsigned int nExperiments) const {
  std::cout << "Performing " << nExperiments << " toy experiments to compute global p-value of observation ...  " << std::flush;

  // The test statistic
  TH1* hTestStat = new TH1D("hTestStat","",10000*Parameters::nBins(),0,1000);
  std::vector<unsigned int> obs(Parameters::nBins(),0);

  // Minimal value (in standard deviations) allowed for
  //correlated fluctuation
  const double minCorr = findMinValidRandomNumberForCorrelatedUncertainties();

  // Throw mean values
  for(unsigned int p = 0; p < nExperiments; ++p) {
    // Throw one (normalized) random number for correlated
    // uncertainties that is valid in all bins
    double rCorr = rand_->Gaus(0.,1.);
    while( rCorr <= minCorr ) {
      rCorr = rand_->Gaus(0.,1.);
    }

    // Loop over all bins and get individual predictions
    for(unsigned int bin = 0; bin < Parameters::nBins(); ++bin) {
      double prediction = -1.;
      bool negativePrediction = true;
      while( negativePrediction ) {
	// Throw one (normalized) random number for uncorrelated
	// uncertainties that is valid in this bin only
	double rUncorr = rand_->Gaus(0.,1.);
	
	// Scale the normalized random numbers by the uncertainties' size
	// to obtain variation of yield
	double uncorrVar = rUncorr * uncorrelatedUncerts_.at(bin);
	double corrVar   = rCorr   * correlatedUncerts_.at(bin);
	
	// Add variations to yield
	prediction = meanPredictions_.at(bin) + uncorrVar + corrVar;
	
	// Check if prediction is positive
	if( prediction >= 0. ) {
	  negativePrediction = false;
	}
      }

      // Throw predicted yields from Poisson with
      // this mean
      obs.at(bin) = rand_->Poisson(prediction);
    } // End of loop over bins
    
    hTestStat->Fill(testStatistic(obs));
  }
  std::cout << "ok" << std::endl;
  
  std::cout << "\n\n----- Global p-value of observation -----" << std::endl;
  double tSObs = testStatistic(observedYields_);
  int binTSObs = hTestStat->FindBin(tSObs);
  int binTSTot = hTestStat->GetNbinsX()+1; // Include overflows
  double gpVal = hTestStat->Integral(binTSObs,binTSTot)/hTestStat->Integral(1,binTSTot);
  if( binTSObs == binTSTot ) {
    std::cerr << "  ERROR: observed value of test statistic out of histogram range" << std::endl;
  } else if( hTestStat->GetBinContent(binTSTot) > 0 ) {
    std::cerr << "  WARNING: distribution of test statistic has overflows (N = " << hTestStat->GetBinContent(binTSTot) << ")" << std::endl;
  }
  std::cout << "         observed value of test statistic : " << tSObs << std::endl;
  std::cout << "  global p-value (including correlations) : " << gpVal  << " (" << TMath::NormQuantile(1.-gpVal) << "sig)" << std::endl;

  hTestStat->Draw();
  gPad->SetLogy();
  gPad->SaveAs("TestStatistic.pdf");

  delete hTestStat;
}
开发者ID:mschrode,项目名称:UserCode,代码行数:73,代码来源:computeSignificance.C


注:本文中的TRandom::Poisson方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。