本文整理汇总了C++中CData::PassEdits方法的典型用法代码示例。如果您正苦于以下问题:C++ CData::PassEdits方法的具体用法?C++ CData::PassEdits怎么用?C++ CData::PassEdits使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CData
的用法示例。
在下文中一共展示了CData::PassEdits方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: count_x_out_fn
int CFeasibilityMap::count_x_out_fn(CData &Data,int i_tau, int i_original,int n_simul, Uniform &randUnif) {
// double case2_count_out = 0;
int case2_count_out = 0; // Changed by Hang on 5/16/2015
ColumnVector s_i = tau_to_s_fn( i_tau, Data.n_var );
ColumnVector item_by_joint = Data.copy_non_balance_edit(s_i);
ColumnVector tilde_y_i = Data.log_D_Observed.row(i_original).t();
for (int i_simul=1; i_simul<=n_simul; i_simul++){
//Generate from uniform distribution
ColumnVector y_q = tilde_y_i;
for ( int temp_j=1; temp_j<=Data.n_var; temp_j++ ){
if ( item_by_joint(temp_j)==1 ){
y_q(temp_j) = Data.logB_L(temp_j)+Data.logB_U_L(temp_j)*randUnif.Next();
}
}
ColumnVector x_q = exp_ColumnVector(y_q) ;
Data.update_full_x_for_balance_edit(x_q);
// if (!Data.PassEdits(x_q)) { case2_count_out += 1.0;}
if (!Data.PassEdits(x_q)) { case2_count_out += 1;} // Changed by Hang on 5/16/2015
}
if (case2_count_out ==0) {
case2_count_out = 1;
}
return case2_count_out; // ADDED by Hang on 5/16/2015
}
示例2: CalculateInitProbA
void CParam::CalculateInitProbA(CData &Data) {
// initial value of Y_out_compact and z_out
int count_out = 0; int count_in = 0;
ColumnVector z_out_large(toolarge_nout);
Matrix Y_out_compact_large(toolarge_nout,n_var_independent);
while ( (count_in < Data.n_sample) && (count_out < toolarge_nout) ) {
int k = rdiscrete_fn(pi);
ColumnVector mu_k = Mu.column(k); // Note that mu_k is Mu.column(k)
ColumnVector y_compact_i = rMVN_fn( mu_k,LSIGMA[k-1] );
ColumnVector x_compact_i = exp_ColumnVector(y_compact_i);
ColumnVector x_i(Data.n_var) ;
Data.UpdateFullVector(x_compact_i, x_i);
Data.update_full_x_for_balance_edit(x_i);
if (Data.PassEdits(x_i)) {
count_in++;
} else {
count_out++;
Y_out_compact_large.row(count_out) = y_compact_i.t();
z_out_large(count_out)=k;
}
}
Matrix Y_out_compact = Y_out_compact_large.rows(1,count_out) ; // cut extra space
ColumnVector z_out = z_out_large.rows(1,count_out) ; // cut extra space
// calculate n_z and Sum_groupX
Matrix Y_aug_compact = Y_in_compact & Y_out_compact;
ColumnVector z_aug = z_in & z_out;
int n_out = z_out.nrows();
Prob_A = (1.0 * Data.n_sample / (Data.n_sample+n_out));
}
示例3: y_q
void CParam::S2_add(Uniform &randUnif,CData &Data) {
int n_needtoupdate = 0;
for (int i_faulty=1; i_faulty<=Data.n_faulty; i_faulty++){
int i_original = Data.Faulty2Original[i_faulty-1];
ColumnVector item_by_bal;
ColumnVector s_i = S_Mat.column(i_faulty);
ColumnVector item_by_rnorm = Data.get_item_by_norm_indicator(s_i,item_by_bal);
//Generate from normal distribution
if ( item_by_rnorm.sum() >= 1 ) { // if no random number, other values by balanc edits remain same
n_needtoupdate++;
ColumnVector mu_z_i = Mu.column(z_in(i_original)) ;
ColumnVector tilde_y_i = Data.log_D_Observed.row(i_original).t();
ColumnVector s_1_compact = Data.get_compact_vector(item_by_rnorm);
ColumnVector Mu_1i = subvector(mu_z_i,s_1_compact);
LowerTriangularMatrix LSigma_1i_i;
ColumnVector y_q(n_var);
double log_cond_norm_q = calculate_log_cond_norm(Data, i_original, item_by_rnorm, tilde_y_i, y_q, true, LSigma_1i_i, s_i); // MODIFIED 2015/02/16
ColumnVector y_i = (Y_in.row(i_original)).t() ;
// ColumnVector y_part_i = subvector(y_i,item_by_rnorm);
// Put values from balance edits
ColumnVector x_q = exp_ColumnVector(y_q) ;
Data.set_balance_edit_values_for_x_q(s_i, x_q, item_by_bal); // CHANGED by Hang, 2014/12/29
// double log_cond_norm_i = log_MVN_fn(y_part_i,Mu_1i,LSigma_1i_i);
double log_cond_norm_i = calculate_log_cond_norm(Data, i_original, item_by_rnorm, tilde_y_i, y_q, false, LSigma_1i_i, s_i); // CHANGED 2015/01/27 , // MODIFIED 2015/02/16
// Acceptance/Rejection
if (Data.PassEdits(x_q)) { // Check constraints
y_q = log_ColumnVector(x_q) ;
ColumnVector y_compact_q = Data.get_compact_vector(y_q);
ColumnVector y_compact_i = Data.get_compact_vector(y_i);
double log_full_norm_q = log_MVN_fn(y_compact_q,mu_z_i,LSIGMA_i[z_in(i_original)-1],logdet_and_more(z_in(i_original)));
double log_full_norm_i = log_MVN_fn(y_compact_i,mu_z_i,LSIGMA_i[z_in(i_original)-1],logdet_and_more(z_in(i_original)));
// Calculate acceptance ratio
double logNum = log_full_norm_q - log_cond_norm_q;
double logDen = log_full_norm_i - log_cond_norm_i;
accept_rate(2) = exp( logNum - logDen );
if (randUnif.Next() < accept_rate(2)){
Y_in.row(i_original) = y_q.t();
is_accept(2)++;
}
}
}
}
is_accept(2) = is_accept(2) / n_needtoupdate;
}
示例4: Rprintf
void CFeasibilityMap::Simulate_logUnif_case2(int n_simul, Uniform &randUnif, CData &Data) {
if (useMap) {return;}
if (feasibleMap.nrows() == 0 || feasibleMap.maximum()==0) {
Rprintf( "Feasibility Map need to be set or computed first\n");
return;
}
Data.logUnif_case2 =Matrix(Data.n_faulty,Data.n_tau); Data.logUnif_case2 = 0;
for (int i_original=1; i_original<=Data.n_sample; i_original++){
if (Data.is_case(i_original,2)) {
int i_faulty = Data.Original2Faulty[i_original-1];
ColumnVector tilde_y_i = Data.log_D_Observed.row(i_original).t();
for (int i_tau=1; i_tau<=Data.n_tau; i_tau++){
if ( feasibleMap(i_tau,i_faulty)==1 ){
double case2_count_out = 0;
ColumnVector s_i = tau_to_s_fn( i_tau, Data.n_var );
ColumnVector item_by_joint = Data.copy_non_balance_edit(s_i);
for (int i_simul=1; i_simul<=n_simul; i_simul++){
//Generate from uniform distribution
ColumnVector y_q = tilde_y_i;
for ( int temp_j=1; temp_j<=Data.n_var; temp_j++ ){
if ( item_by_joint(temp_j)==1 ){
y_q(temp_j) = Data.logB_L(temp_j)+Data.logB_U_L(temp_j)*randUnif.Next();
}
}
ColumnVector x_q = exp_ColumnVector(y_q) ;
Data.update_full_x_for_balance_edit(x_q);
if (!Data.PassEdits(x_q)) { case2_count_out += 1.0;}
}
double Area = 1.0;
for ( int temp_j=1; temp_j<=Data.n_var; temp_j++ ){
if ( item_by_joint(temp_j)==1 ){
Area = Area * Data.logB_U_L(temp_j) ;
}
}
Area = Area * case2_count_out / n_simul ;
Data.set_logUnif_case2(i_original, i_tau, -log(Area));
}
}
if ( ((1.0*i_original/100)==(floor(1.0*i_original/100))) ){
Rprintf( "logUnif_y_tilde for i_sample= %d\n",i_original);
}
}
}
}
示例5: initilize_D_and_S
void CFeasibilityMap::initilize_D_and_S(CData &Data) {
ColumnVector order_to_test = get_order_to_test(Data.n_tau, Data.n_var);
ColumnVector list_feasible_type2 = get_feasible_tau(Data);
for (int i_faulty = 1; i_faulty <= Data.n_faulty; i_faulty++) {
bool is_pass = false;
int i_original = Data.Faulty2Original[i_faulty-1];
ColumnVector x_tilde_i = (Data.D_Observed.row(i_original)).t();
for (int i_order = 1; i_order <= order_to_test.nrows() && !is_pass; i_order++) {
int i_tau = order_to_test(i_order);
ColumnVector s_i = tau_to_s_fn(i_tau,Data.n_var);
bool skip_for_type2 = false;
if (Data.is_case(i_original,2) && list_feasible_type2(i_tau) == 0) { skip_for_type2 = true;}
if (!skip_for_type2){
ColumnVector x_mean;
int is_feasible = feasible_test_fn(Data, x_tilde_i, s_i, i_original, true, Data.epsilon, x_mean);
if (is_feasible > 0) {
//copy solution to a temp vector
ColumnVector temp = x_tilde_i;
for (int index = 1, count =0; index <= Data.n_var; index++){
if (s_i(index) == 1) {
count++;
temp(index) = x_mean(count);
}
}
Data.Debug = Debug;
if (Data.PassEdits(temp)) { //then check if it satifies edits
is_pass = true;
Data.initial_S_Mat.row(i_faulty) = s_i.t();
Data.D_initial.row(i_original) = temp.t();
}
}//is_feasible
Debug = false;
}
}
}
}
示例6: z_out_large
void CParam::S4_Z_out(CData &Data) {
int count_out = 0, count_in = 0;
ColumnVector z_out_large(toolarge_nout);
Matrix Y_out_compact_large(toolarge_nout,n_var_independent);
Matrix X_aux(n_sample,n_var);
while ( (count_in < n_sample) && (count_out < toolarge_nout) ) {
int k = rdiscrete_fn(pi);
ColumnVector mu_k = Mu.column(k); // Note that mu_k is Mu.column(k)
ColumnVector y_compact_i = rMVN_fn( mu_k, LSIGMA[k-1] );
// ADDED by HANG to check infinity value of x_full_i
int check_infinity = 0 ;
if (y_compact_i.maximum()>700){
check_infinity = 1 ;
if ( msg_level >= 1 ) {
Rprintf( " Warning: x_out from N(Mu_k,Sigma_k) > exp(700). There is no harm for convergence and inference, but the computation may get slower if you see this warning too often, e.g. every iteration\n");
}
}
if (check_infinity==0){
ColumnVector x_compact_i = exp_ColumnVector(y_compact_i);
ColumnVector x_full_i(n_var) ;
Data.UpdateFullVector(x_compact_i,x_full_i);
Data.update_full_x_for_balance_edit(x_full_i);
// do not need to consdier min(x_full_i)>0 since x_compact_i=exp(y_compact_i)
if (Data.PassEdits(x_full_i)) {
X_aux.row(++count_in) = x_full_i.t();
} else {
Y_out_compact_large.row(++count_out) = y_compact_i.t();
z_out_large(count_out)=k;
}
} // if (check_infinity==0) : ADDED by HANG
} // while ( (count_in < n_sample) && (count_out < toolarge_nout) )
Matrix Y_out_compact = Y_out_compact_large.rows(1,count_out) ; // cut extra space
ColumnVector z_out = z_out_large.rows(1,count_out) ; // cut extra space
Prob_A = 1.0 * n_sample / (n_sample+count_out) ;
// calculate n_z and Sum_groupX
Data.UpdateCompactMatrix(Y_in_compact,Y_in);
Y_aug_compact = Y_in_compact & Y_out_compact;
z_aug = z_in & z_out;
int n_aug = z_aug.nrows() ;
n_z = 0 ;
X_bar = Matrix(n_var_independent,K); X_bar= 0.0 ;
for (int i_aug=1; i_aug<=n_aug; i_aug++) {
int k = z_aug(i_aug);
n_z(k) = n_z(k) + 1 ;
RowVector y_compact_t = Y_aug_compact.row(i_aug);
ColumnVector y_i_compact = y_compact_t.t() ;
X_bar.column(k) += y_i_compact;
}
for (int k = 1; k <= K; k++) {
if (n_z(k) > 0) {
X_bar.column(k) *= (1.0/n_z(k));
}
}
}