本文整理汇总了C++中DVector::save方法的典型用法代码示例。如果您正苦于以下问题:C++ DVector::save方法的具体用法?C++ DVector::save怎么用?C++ DVector::save使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DVector
的用法示例。
在下文中一共展示了DVector::save方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
//.........这里部分代码省略.........
fm.regv = reg[0];
((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
} else if (reg.size() == 3) {
fm.reg0 = reg[0];
fm.regw = reg[1];
fm.regv = reg[2];
((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
} else {
fm.reg0 = reg[0];
fm.regw = 0.0;
fm.regv = 0.0;
int j = 1;
for (uint g = 0; g < meta.num_attr_groups; g++) {
((fm_learn_mcmc*)fml)->w_lambda(g) = reg[j];
j++;
}
for (uint g = 0; g < meta.num_attr_groups; g++) {
for (int f = 0; f < fm.num_factor; f++) {
((fm_learn_mcmc*)fml)->v_lambda(g,f) = reg[j];
}
j++;
}
}
}
} else {
// set the regularization; for standard SGD, groups are not supported
{
vector<double> reg = cmdline.getDblValues(param_regular);
assert((reg.size() == 0) || (reg.size() == 1) || (reg.size() == 3));
if (reg.size() == 0) {
fm.reg0 = 0.0;
fm.regw = 0.0;
fm.regv = 0.0;
} else if (reg.size() == 1) {
fm.reg0 = reg[0];
fm.regw = reg[0];
fm.regv = reg[0];
} else {
fm.reg0 = reg[0];
fm.regw = reg[1];
fm.regv = reg[2];
}
}
}
{
fm_learn_sgd* fmlsgd= dynamic_cast<fm_learn_sgd*>(fml);
if (fmlsgd) {
// set the learning rates (individual per layer)
{
vector<double> lr = cmdline.getDblValues(param_learn_rate);
assert((lr.size() == 1) || (lr.size() == 3));
if (lr.size() == 1) {
fmlsgd->learn_rate = lr[0];
fmlsgd->learn_rates.init(lr[0]);
} else {
fmlsgd->learn_rate = 0;
fmlsgd->learn_rates(0) = lr[0];
fmlsgd->learn_rates(1) = lr[1];
fmlsgd->learn_rates(2) = lr[2];
}
}
}
}
if (rlog != NULL) {
rlog->init();
}
if (cmdline.getValue(param_verbosity, 0) > 0) {
fm.debug();
fml->debug();
}
// () learn
fml->learn(train, test);
// () Prediction at the end (not for mcmc and als)
if (cmdline.getValue(param_method).compare("mcmc")) {
cout << "Final\t" << "Train=" << fml->evaluate(train) << "\tTest=" << fml->evaluate(test) << endl;
}
// () Save prediction
if (cmdline.hasParameter(param_out)) {
DVector<double> pred;
pred.setSize(test.num_cases);
fml->predict(test, pred);
pred.save(cmdline.getValue(param_out));
}
} catch (string &e) {
cerr << endl << "ERROR: " << e << endl;
} catch (char const* &e) {
cerr << endl << "ERROR: " << e << endl;
}
}
示例2: main
//.........这里部分代码省略.........
// (2) Setup the factorization machine
fm_model fm;
{
fm.num_attribute = max(train.num_feature, test.num_feature);
fm.init_stdev = cmdline.getValue(param_init_stdev, 0.01);
// set the number of dimensions in the factorization
{
vector<int> dim = cmdline.getIntValues(param_dim);
assert(dim.size() == 3);
fm.k0 = dim[0] != 0;
fm.k1 = dim[1] != 0;
fm.num_factor = dim[2];
}
fm.init();
// set the regularization
{
vector<double> reg = cmdline.getDblValues(param_regular);
assert(reg.size() == 3);
fm.reg0 = reg[0];
fm.regw.init(reg[1]);
fm.regv.init(reg[2]);
}
}
// (3) Setup the learning method:
fm_learn* fml;
if (! cmdline.getValue(param_method, "SGD").compare("sgd")) {
fml = new fm_learn_sgd_element();
((fm_learn_sgd*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);
((fm_learn_sgd*)fml)->learn_rate = cmdline.getValue(param_learn_rate, 0.1);
} else if (! cmdline.getValue(param_method).compare("als")) {
fml = new fm_learn_als_simultaneous();
((fm_learn_als*)fml)->num_iter = cmdline.getValue(param_num_iter, 100);
if (cmdline.getValue("task").compare("r") ) {
throw "ALS can only solve regression tasks.";
}
} else {
throw "unknown method";
}
fml->fm = &fm;
fml->max_target = train.max_target;
fml->min_target = train.min_target;
if (! cmdline.getValue("task").compare("r") ) {
fml->task = 0;
} else if (! cmdline.getValue("task").compare("c") ) {
fml->task = 1;
for (uint i = 0; i < train.target.dim; i++) { if (train.target(i) <= 0.0) { train.target(i) = -1.0; } else {train.target(i) = 1.0; } }
for (uint i = 0; i < test.target.dim; i++) { if (test.target(i) <= 0.0) { test.target(i) = -1.0; } else {test.target(i) = 1.0; } }
} else {
throw "unknown task";
}
// (4) init the logging
RLog* rlog = NULL;
if (cmdline.hasParameter(param_r_log)) {
ofstream* out_rlog = NULL;
std::string r_log_str = cmdline.getValue(param_r_log);
out_rlog = new ofstream(r_log_str.c_str());
if (! out_rlog->is_open()) {
throw "Unable to open file " + r_log_str;
}
std::cout << "logging to " << r_log_str.c_str() << std::endl;
rlog = new RLog(out_rlog);
}
fml->log = rlog;
fml->init();
if (rlog != NULL) {
rlog->init();
}
if (cmdline.getValue(param_verbosity, 0) > 0) {
fm.debug();
fml->debug();
}
// () learn
fml->learn(train, test);
// () Prediction
std::cout << "Final\t" << "Train=" << fml->evaluate(train) << "\tTest=" << fml->evaluate(test) << std::endl;
// () Save prediction
if (cmdline.hasParameter(param_out)) {
DVector<double> pred;
pred.setSize(test.data.dim);
fml->predict(test, pred);
pred.save(cmdline.getValue(param_out));
}
} catch (std::string &e) {
std::cerr << e << std::endl;
} catch (char const* &e) {
std::cerr << e << std::endl;
}
}
示例3: main
//.........这里部分代码省略.........
// set the learning rates (individual per layer)
{
vector<double> lr = cmdline.getDblValues(param_learn_rate);
assert((lr.size() == 1) || (lr.size() == 3));
if (lr.size() == 1) {
fmlsgd->learn_rate = lr[0];
fmlsgd->learn_rates.init(lr[0]);
} else {
fmlsgd->learn_rate = 0;
fmlsgd->learn_rates(0) = lr[0];
fmlsgd->learn_rates(1) = lr[1];
fmlsgd->learn_rates(2) = lr[2];
}
}
}
}
if (rlog != NULL) {
rlog->init();
}
if (cmdline.getValue(param_verbosity, 0) > 0) {
fm.debug();
fml->debug();
}
// () learn
fml->learn(train, test);
// () Prediction at the end (not for mcmc and als)
if (cmdline.getValue(param_method).compare("mcmc")) {
std::cout << "Final\t" << "Train=" << fml->evaluate(train) << "\tTest=" << fml->evaluate(test) << std::endl;
}
//compute output ranked list for target ids
if (cmdline.hasParameter(param_out_ranked_list_dir)){
std::cout << "Compute and store output ranked list for target ids...\t" << std::endl;
int TOP_K = cmdline.getValue(param_top_k, 100);
string out_ranked_list_dir = cmdline.getValue(param_out_ranked_list_dir);
vector<int> target_ids;
if (cmdline.hasParameter(param_list_id_output)){
target_ids = cmdline.getIntValues(param_list_id_output);
}
else{
int n_fixed_cases = train.relation(FIXED_BLOCK).data->num_cases;
target_ids.resize(n_fixed_cases);
for (int i = 0; i<n_fixed_cases; i++){
target_ids[i] = i;
}
}
Recommendation rec;
rec.fm = &fm;
rec.target_ids = target_ids;
rec.MAX_THREADS = NUM_THREADS;
rec.TOP_K = TOP_K;
rec.OUT_DIR = out_ranked_list_dir;
rec.evaluate(train);
}
// () Save prediction
if (cmdline.hasParameter(param_out)) {
DVector<double> pred;
pred.setSize(test.num_cases);
if (cmdline.getValue(param_method).compare("bpr") && cmdline.hasParameter(param_relation)) { //BLOCK BPR
if (NUM_THREADS>1){
//PARALLEL BLOCK BPR
((fm_learn_sgd_element_BPR_blocks_parallel*)fml)->predict(test, pred);
}
else{
//BLOCK BPR
((fm_learn_sgd_element_BPR_blocks*)fml)->predict(test, pred);
}
}
else if (cmdline.getValue(param_method).compare("bpra")){ //BLOCK BPR with adaptive regularization
if (NUM_THREADS>1){
//PARALLEL BLOCK BPRA
((fm_learn_sgd_element_BPR_blocks_adapt_reg_parallel*)fml)->predict(test, pred);
}
else{
//BLOCK BPRA
((fm_learn_sgd_element_BPR_blocks_adapt_reg*)fml)->predict(test, pred);
}
}
else{
fml->predict(test, pred);
}
pred.save(cmdline.getValue(param_out));
}
// () write down the latent vectors (unary and pairwise interactions)
if (cmdline.hasParameter(param_out_vectors)) {
fm.printOutState(cmdline.getValue(param_out_vectors));
}
} catch (std::string &e) {
std::cerr << std::endl << "ERROR: " << e << std::endl;
} catch (char const* &e) {
std::cerr << std::endl << "ERROR: " << e << std::endl;
}
}
示例4: fm_train_test
//.........这里部分代码省略.........
}
else
{
// set the regularization; for standard SGD, groups are not supported
{
const Value& regValue = config["regular"];
vector<double> reg;
for (int i = 0; i < regValue.Size(); i++)
reg.push_back(regValue[i].GetDouble());
assert(
(reg.size() == 0) || (reg.size() == 1)
|| (reg.size() == 3));
if (reg.size() == 0)
{
fm.reg0 = 0.0;
fm.regw = 0.0;
fm.regv = 0.0;
}
else if (reg.size() == 1)
{
fm.reg0 = reg[0];
fm.regw = reg[0];
fm.regv = reg[0];
}
else
{
fm.reg0 = reg[0];
fm.regw = reg[1];
fm.regv = reg[2];
}
}
}
{
fm_learn_sgd* fmlsgd = dynamic_cast<fm_learn_sgd*>(fml);
if (fmlsgd)
{
// set the learning rates (individual per layer)
{
const Value& lrValue = config["learn_rate"];
vector<double> lr;
for (int i = 0; i < lrValue.Size(); i++)
lr.push_back(lrValue[i].GetDouble());
assert((lr.size() == 1) || (lr.size() == 3));
if (lr.size() == 1)
{
fmlsgd->learn_rate = lr[0];
fmlsgd->learn_rates.init(lr[0]);
}
else
{
fmlsgd->learn_rate = 0;
fmlsgd->learn_rates(0) = lr[0];
fmlsgd->learn_rates(1) = lr[1];
fmlsgd->learn_rates(2) = lr[2];
}
}
}
}
// () learn
fml->learn(train, test);
// () Prediction at the end (not for mcmc and als)
if (string(config["method"].GetString()) != "mcmc")
{
std::cout << "Final\t" << "Train=" << fml->evaluate(train)
<< "\tTest=" << fml->evaluate(test) << std::endl;
}
// () Save prediction
DVector<double> pred;
pred.setSize(test.num_cases);
fml->predict(test, pred);
for (int i = 0; i < test.num_cases; i++)
prediction.push_back(pred(i));
if (config["pred_output"].GetBool())
pred.save(config["pred"].GetString());
if (string(config["method"].GetString()) == "sgd")
{
fm_learn_sgd_element* fml_sgd = dynamic_cast<fm_learn_sgd_element*>(fml);
delete fml_sgd;
}
else if (string(config["method"].GetString()) == "mcmc")
{
fm_learn_mcmc_simultaneous* fml_mcmc = dynamic_cast<fm_learn_mcmc_simultaneous*>(fml);
delete fml_mcmc;
}
} catch (std::string &e)
{
std::cerr << std::endl << "ERROR: " << e << std::endl;
} catch (char const* &e)
{
std::cerr << std::endl << "ERROR: " << e << std::endl;
}
return 0;
}
示例5: executeFM
//.........这里部分代码省略.........
fm.num_attribute = num_all_attribute;
fm.init_stdev = 0.1;
// set the number of dimensions in the factorization
{
vector<int> dim = { 1, 1, k };
assert(dim.size() == 3);
fm.k0 = dim[0] != 0;
fm.k1 = dim[1] != 0;
fm.num_factor = dim[2];
}
fm.init();
}
// Setup the learning method:
fm_learn* fml;
fm.w.init_normal(fm.init_mean, fm.init_stdev);
fml = new fm_learn_mcmc_simultaneous();
fml->validation = validation;
((fm_learn_mcmc*)fml)->num_iter = learn_iter;
((fm_learn_mcmc*)fml)->num_eval_cases = test.num_cases;
((fm_learn_mcmc*)fml)->do_sample = true;
((fm_learn_mcmc*)fml)->do_multilevel = true;
fml->fm = &fm;
fml->max_target = train.max_target;
fml->min_target = train.min_target;
fml->task = 0;
fml->meta = &meta;
// std::cout << "Opening output file" << endl;
RLog* rlog = NULL;
ofstream* out_rlog = NULL;
out_rlog = new ofstream(stats_filename);
if (!out_rlog->is_open()) {
throw "Unable to open file " + stats_filename;
}
// std::cout << "logging to " << r_log_str.c_str() << endl;
rlog = new RLog(out_rlog);
//
fml->log = rlog;
fml->init();
// set the regularization; for als and mcmc this can be individual per group
vector<double> reg = {};
assert((reg.size() == 0) || (reg.size() == 1) || (reg.size() == 3) || (reg.size() == (1 + meta.num_attr_groups * 2)));
if (reg.size() == 0) {
fm.reg0 = 0.0;
fm.regw = 0.0;
fm.regv = 0.0;
((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
}
else if (reg.size() == 1) {
fm.reg0 = reg[0];
fm.regw = reg[0];
fm.regv = reg[0];
((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
}
else if (reg.size() == 3) {
fm.reg0 = reg[0];
fm.regw = reg[1];
fm.regv = reg[2];
((fm_learn_mcmc*)fml)->w_lambda.init(fm.regw);
((fm_learn_mcmc*)fml)->v_lambda.init(fm.regv);
}
else {
fm.reg0 = reg[0];
fm.regw = 0.0;
fm.regv = 0.0;
int j = 1;
for (uint g = 0; g < meta.num_attr_groups; g++) {
((fm_learn_mcmc*)fml)->w_lambda(g) = reg[j];
j++;
}
for (uint g = 0; g < meta.num_attr_groups; g++) {
for (int f = 0; f < fm.num_factor; f++) {
((fm_learn_mcmc*)fml)->v_lambda(g, f) = reg[j];
}
j++;
}
}
if (rlog != NULL) {
rlog->init();
}
fm.debug();
fml->debug();
// () learn
fml->learn(train, test);
std::cout << "Save prediction" << endl;
DVector<double> pred;
pred.setSize(test.num_cases);
fml->predict(test, pred);
pred.save(results_filename);
}
catch (string &e) {
cerr << endl << "ERROR: " << e << endl;
}
catch (char const* &e) {
cerr << endl << "ERROR: " << e << endl;
}
return 0;
}