本文整理汇总了C++中save_weights函数的典型用法代码示例。如果您正苦于以下问题:C++ save_weights函数的具体用法?C++ save_weights怎么用?C++ save_weights使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了save_weights函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: train_mnist_distill
void train_mnist_distill(char *cfgfile, char *weightfile)
{
data_seed = time(0);
srand(time(0));
float avg_loss = -1;
char *base = basecfg(cfgfile);
printf("%s\n", base);
network net = parse_network_cfg(cfgfile);
if(weightfile){
load_weights(&net, weightfile);
}
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
char *backup_directory = "backup";
int classes = 10;
int N = 50000;
int epoch = (*net.seen)/N;
data train;// = load_all_mnist10();
matrix soft = csv_to_matrix("results/ensemble.csv");
float weight = .9;
scale_matrix(soft, weight);
scale_matrix(train.y, 1. - weight);
matrix_add_matrix(soft, train.y);
while(get_current_batch(net) < net.max_batches || net.max_batches == 0){
clock_t time=clock();
float loss = train_network_sgd(net, train, 1);
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.95 + loss*.05;
if(get_current_batch(net)%100 == 0)
{
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
}
if(*net.seen/N > epoch){
epoch = *net.seen/N;
char buff[256];
sprintf(buff, "%s/%s_%d.weights",backup_directory,base, epoch);
save_weights(net, buff);
}
if(get_current_batch(net)%100 == 0){
char buff[256];
sprintf(buff, "%s/%s.backup",backup_directory,base);
save_weights(net, buff);
}
}
char buff[256];
sprintf(buff, "%s/%s.weights", backup_directory, base);
save_weights(net, buff);
free_network(net);
free(base);
free_data(train);
}
示例2: train_char_rnn
void train_char_rnn(char *cfgfile, char *weightfile, char *filename)
{
FILE *fp = fopen(filename, "r");
//FILE *fp = fopen("data/ab.txt", "r");
//FILE *fp = fopen("data/grrm/asoiaf.txt", "r");
fseek(fp, 0, SEEK_END);
size_t size = ftell(fp);
fseek(fp, 0, SEEK_SET);
char *text = calloc(size, sizeof(char));
fread(text, 1, size, fp);
fclose(fp);
char *backup_directory = "/home/pjreddie/backup/";
srand(time(0));
data_seed = time(0);
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network net = parse_network_cfg(cfgfile);
if(weightfile){
load_weights(&net, weightfile);
}
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
int batch = net.batch;
int steps = net.time_steps;
int i = (*net.seen)/net.batch;
clock_t time;
while(get_current_batch(net) < net.max_batches){
i += 1;
time=clock();
float_pair p = get_rnn_data(text, size, batch/steps, steps);
float loss = train_network_datum(net, p.x, p.y) / (batch);
free(p.x);
free(p.y);
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
printf("%d: %f, %f avg, %f rate, %lf seconds\n", i, loss, avg_loss, get_current_rate(net), sec(clock()-time));
if(i%100==0){
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
if(i%10==0){
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
}
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
示例3: train_go
void train_go(char *cfgfile, char *weightfile)
{
data_seed = time(0);
srand(time(0));
float avg_loss = -1;
char *base = basecfg(cfgfile);
printf("%s\n", base);
network net = parse_network_cfg(cfgfile);
if(weightfile){
load_weights(&net, weightfile);
}
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
char *backup_directory = "/home/pjreddie/backup/";
char buff[256];
float *board = calloc(19*19*net.batch, sizeof(float));
float *move = calloc(19*19*net.batch, sizeof(float));
moves m = load_go_moves("/home/pjreddie/go.train");
//moves m = load_go_moves("games.txt");
int N = m.n;
int epoch = (*net.seen)/N;
while(get_current_batch(net) < net.max_batches || net.max_batches == 0){
clock_t time=clock();
random_go_moves(m, board, move, net.batch);
float loss = train_network_datum(net, board, move) / net.batch;
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.95 + loss*.05;
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
if(*net.seen/N > epoch){
epoch = *net.seen/N;
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory,base, epoch);
save_weights(net, buff);
}
if(get_current_batch(net)%100 == 0){
char buff[256];
sprintf(buff, "%s/%s.backup",backup_directory,base);
save_weights(net, buff);
}
if(get_current_batch(net)%10000 == 0){
char buff[256];
sprintf(buff, "%s/%s_%d.backup",backup_directory,base,get_current_batch(net));
save_weights(net, buff);
}
}
sprintf(buff, "%s/%s.weights", backup_directory, base);
save_weights(net, buff);
free_network(net);
free(base);
}
示例4: normalize_net
void normalize_net(char *cfgfile, char *weightfile, char *outfile)
{
gpu_index = -1;
network *net = load_network(cfgfile, weightfile, 0);
int i;
for(i = 0; i < net->n; ++i){
layer l = net->layers[i];
if(l.type == CONVOLUTIONAL && !l.batch_normalize){
net->layers[i] = normalize_layer(l, l.n);
}
if (l.type == CONNECTED && !l.batch_normalize) {
net->layers[i] = normalize_layer(l, l.outputs);
}
if (l.type == GRU && l.batch_normalize) {
*l.input_z_layer = normalize_layer(*l.input_z_layer, l.input_z_layer->outputs);
*l.input_r_layer = normalize_layer(*l.input_r_layer, l.input_r_layer->outputs);
*l.input_h_layer = normalize_layer(*l.input_h_layer, l.input_h_layer->outputs);
*l.state_z_layer = normalize_layer(*l.state_z_layer, l.state_z_layer->outputs);
*l.state_r_layer = normalize_layer(*l.state_r_layer, l.state_r_layer->outputs);
*l.state_h_layer = normalize_layer(*l.state_h_layer, l.state_h_layer->outputs);
net->layers[i].batch_normalize=1;
}
}
save_weights(net, outfile);
free_network(net);
}
示例5: oneoff
void oneoff(char *cfgfile, char *weightfile, char *outfile)
{
gpu_index = -1;
network *net = parse_network_cfg(cfgfile);
int oldn = net->layers[net->n - 2].n;
int c = net->layers[net->n - 2].c;
scal_cpu(oldn*c, .1, net->layers[net->n - 2].weights, 1);
scal_cpu(oldn, 0, net->layers[net->n - 2].biases, 1);
net->layers[net->n - 2].n = 11921;
net->layers[net->n - 2].biases += 5;
net->layers[net->n - 2].weights += 5*c;
if(weightfile){
load_weights(net, weightfile);
}
net->layers[net->n - 2].biases -= 5;
net->layers[net->n - 2].weights -= 5*c;
net->layers[net->n - 2].n = oldn;
printf("%d\n", oldn);
layer l = net->layers[net->n - 2];
copy_cpu(l.n/3, l.biases, 1, l.biases + l.n/3, 1);
copy_cpu(l.n/3, l.biases, 1, l.biases + 2*l.n/3, 1);
copy_cpu(l.n/3*l.c, l.weights, 1, l.weights + l.n/3*l.c, 1);
copy_cpu(l.n/3*l.c, l.weights, 1, l.weights + 2*l.n/3*l.c, 1);
*net->seen = 0;
save_weights(net, outfile);
free_network(net);
}
示例6: denormalize_net
void denormalize_net(char *cfgfile, char *weightfile, char *outfile)
{
gpu_index = -1;
network *net = load_network(cfgfile, weightfile, 0);
int i;
for (i = 0; i < net->n; ++i) {
layer l = net->layers[i];
if ((l.type == DECONVOLUTIONAL || l.type == CONVOLUTIONAL) && l.batch_normalize) {
denormalize_convolutional_layer(l);
net->layers[i].batch_normalize=0;
}
if (l.type == CONNECTED && l.batch_normalize) {
denormalize_connected_layer(l);
net->layers[i].batch_normalize=0;
}
if (l.type == GRU && l.batch_normalize) {
denormalize_connected_layer(*l.input_z_layer);
denormalize_connected_layer(*l.input_r_layer);
denormalize_connected_layer(*l.input_h_layer);
denormalize_connected_layer(*l.state_z_layer);
denormalize_connected_layer(*l.state_r_layer);
denormalize_connected_layer(*l.state_h_layer);
l.input_z_layer->batch_normalize = 0;
l.input_r_layer->batch_normalize = 0;
l.input_h_layer->batch_normalize = 0;
l.state_z_layer->batch_normalize = 0;
l.state_r_layer->batch_normalize = 0;
l.state_h_layer->batch_normalize = 0;
net->layers[i].batch_normalize=0;
}
}
save_weights(net, outfile);
free_network(net);
}
示例7: train_cifar
void train_cifar(char *cfgfile, char *weightfile)
{
srand(time(0));
float avg_loss = -1;
char *base = basecfg(cfgfile);
printf("%s\n", base);
network net = parse_network_cfg(cfgfile);
if(weightfile){
load_weights(&net, weightfile);
}
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
char *backup_directory = "/home/pjreddie/backup/";
int classes = 10;
int N = 50000;
char **labels = get_labels("data/cifar/labels.txt");
int epoch = (*net.seen)/N;
data train = load_all_cifar10();
while(get_current_batch(net) < net.max_batches || net.max_batches == 0){
clock_t time=clock();
float loss = train_network_sgd(net, train, 1);
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.95 + loss*.05;
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
if(*net.seen/N > epoch){
epoch = *net.seen/N;
char buff[256];
sprintf(buff, "%s/%s_%d.weights",backup_directory,base, epoch);
save_weights(net, buff);
}
if(get_current_batch(net)%100 == 0){
char buff[256];
sprintf(buff, "%s/%s.backup",backup_directory,base);
save_weights(net, buff);
}
}
char buff[256];
sprintf(buff, "%s/%s.weights", backup_directory, base);
save_weights(net, buff);
free_network(net);
free_ptrs((void**)labels, classes);
free(base);
free_data(train);
}
示例8: partial
void partial(char *cfgfile, char *weightfile, char *outfile, int max)
{
network net = parse_network_cfg(cfgfile);
if(weightfile){
load_weights_upto(&net, weightfile, max);
}
net.seen = 0;
save_weights(net, outfile);
}
示例9: average
void average(int argc, char *argv[])
{
char *cfgfile = argv[2];
char *outfile = argv[3];
gpu_index = -1;
network *net = parse_network_cfg(cfgfile);
network *sum = parse_network_cfg(cfgfile);
char *weightfile = argv[4];
load_weights(sum, weightfile);
int i, j;
int n = argc - 5;
for(i = 0; i < n; ++i){
weightfile = argv[i+5];
load_weights(net, weightfile);
for(j = 0; j < net->n; ++j){
layer l = net->layers[j];
layer out = sum->layers[j];
if(l.type == CONVOLUTIONAL){
int num = l.n*l.c*l.size*l.size;
axpy_cpu(l.n, 1, l.biases, 1, out.biases, 1);
axpy_cpu(num, 1, l.weights, 1, out.weights, 1);
if(l.batch_normalize){
axpy_cpu(l.n, 1, l.scales, 1, out.scales, 1);
axpy_cpu(l.n, 1, l.rolling_mean, 1, out.rolling_mean, 1);
axpy_cpu(l.n, 1, l.rolling_variance, 1, out.rolling_variance, 1);
}
}
if(l.type == CONNECTED){
axpy_cpu(l.outputs, 1, l.biases, 1, out.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, out.weights, 1);
}
}
}
n = n+1;
for(j = 0; j < net->n; ++j){
layer l = sum->layers[j];
if(l.type == CONVOLUTIONAL){
int num = l.n*l.c*l.size*l.size;
scal_cpu(l.n, 1./n, l.biases, 1);
scal_cpu(num, 1./n, l.weights, 1);
if(l.batch_normalize){
scal_cpu(l.n, 1./n, l.scales, 1);
scal_cpu(l.n, 1./n, l.rolling_mean, 1);
scal_cpu(l.n, 1./n, l.rolling_variance, 1);
}
}
if(l.type == CONNECTED){
scal_cpu(l.outputs, 1./n, l.biases, 1);
scal_cpu(l.outputs*l.inputs, 1./n, l.weights, 1);
}
}
save_weights(sum, outfile);
}
示例10: train_imagenet
void train_imagenet(char *cfgfile, char *weightfile)
{
data_seed = time(0);
srand(time(0));
float avg_loss = -1;
char *base = basecfg(cfgfile);
printf("%s\n", base);
network net = parse_network_cfg(cfgfile);
if(weightfile){
load_weights(&net, weightfile);
}
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
//net.seen=0;
int imgs = 1024;
int i = net.seen/imgs;
char **labels = get_labels("data/inet.labels.list");
list *plist = get_paths("/data/imagenet/cls.train.list");
char **paths = (char **)list_to_array(plist);
printf("%d\n", plist->size);
clock_t time;
pthread_t load_thread;
data train;
data buffer;
load_thread = load_data_thread(paths, imgs, plist->size, labels, 1000, 256, 256, &buffer);
while(1){
++i;
time=clock();
pthread_join(load_thread, 0);
train = buffer;
/*
image im = float_to_image(256, 256, 3, train.X.vals[114]);
show_image(im, "training");
cvWaitKey(0);
*/
load_thread = load_data_thread(paths, imgs, plist->size, labels, 1000, 256, 256, &buffer);
printf("Loaded: %lf seconds\n", sec(clock()-time));
time=clock();
float loss = train_network(net, train);
net.seen += imgs;
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
printf("%d: %f, %f avg, %lf seconds, %d images\n", i, loss, avg_loss, sec(clock()-time), net.seen);
free_data(train);
if((i % 20000) == 0) net.learning_rate *= .1;
//if(i%100 == 0 && net.learning_rate > .00001) net.learning_rate *= .97;
if(i%1000==0){
char buff[256];
sprintf(buff, "/home/pjreddie/imagenet_backup/%s_%d.weights",base, i);
save_weights(net, buff);
}
}
}
示例11: rgbgr_net
void rgbgr_net(char *cfgfile, char *weightfile, char *outfile)
{
gpu_index = -1;
network *net = load_network(cfgfile, weightfile, 0);
int i;
for(i = 0; i < net->n; ++i){
layer l = net->layers[i];
if(l.type == CONVOLUTIONAL){
rgbgr_weights(l);
break;
}
}
save_weights(net, outfile);
free_network(net);
}
示例12: rgbgr_net
void rgbgr_net(char *cfgfile, char *weightfile, char *outfile)
{
network net = parse_network_cfg(cfgfile);
if(weightfile){
load_weights(&net, weightfile);
}
int i;
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
if(l.type == CONVOLUTIONAL){
rgbgr_filters(l);
break;
}
}
save_weights(net, outfile);
}
示例13: denormalize_net
static void denormalize_net(char *cfgfile, char *weightfile, char *outfile)
{
gpu_index = -1;
network net = parse_network_cfg(cfgfile);
if (weightfile) {
load_weights(&net, weightfile);
}
int i;
for (i = 0; i < net.n; ++i) {
layer_t l = net.layers[i];
if (l.type == CONVOLUTIONAL && l.batch_normalize) {
denormalize_convolutional_layer(l);
net.layers[i].batch_normalize=0;
}
}
save_weights(net, outfile);
}
示例14: rescale_net
static void rescale_net(char *cfgfile, char *weightfile, char *outfile)
{
gpu_index = -1;
network net = parse_network_cfg(cfgfile);
if(weightfile){
load_weights(&net, weightfile);
}
int i;
for(i = 0; i < net.n; ++i){
layer_t l = net.layers[i];
if(l.type == CONVOLUTIONAL){
rescale_filters(l, 2, -.5);
break;
}
}
save_weights(net, outfile);
}
示例15: average
static void average(int argc, char *argv[])
{
char *cfgfile = argv[2];
char *outfile = argv[3];
gpu_index = -1;
network net = parse_network_cfg(cfgfile);
network sum = parse_network_cfg(cfgfile);
char *weightfile = argv[4];
load_weights(&sum, weightfile);
int i, j;
int n = argc - 5;
for(i = 0; i < n; ++i){
weightfile = argv[i+5];
load_weights(&net, weightfile);
for(j = 0; j < net.n; ++j){
layer_t l = net.layers[j];
layer_t out = sum.layers[j];
if(l.type == CONVOLUTIONAL){
int num = l.n*l.c*l.size*l.size;
fltadd(out.biases, l.biases, l.n);
fltadd(out.filters, l.filters, num);
}
if(l.type == CONNECTED){
fltadd(out.biases, l.biases, l.outputs);
fltadd(out.weights, l.weights, l.outputs * l.inputs);
}
}
}
n = n+1;
for(j = 0; j < net.n; ++j){
layer_t l = sum.layers[j];
if(l.type == CONVOLUTIONAL){
int num = l.n*l.c*l.size*l.size;
scal_cpu(l.n, 1./n, l.biases, 1);
scal_cpu(num, 1./n, l.filters, 1);
}
if(l.type == CONNECTED){
scal_cpu(l.outputs, 1./n, l.biases, 1);
scal_cpu(l.outputs*l.inputs, 1./n, l.weights, 1);
}
}
save_weights(sum, outfile);
}