本文整理汇总了C++中Symbol::SimpleBind方法的典型用法代码示例。如果您正苦于以下问题:C++ Symbol::SimpleBind方法的具体用法?C++ Symbol::SimpleBind怎么用?C++ Symbol::SimpleBind使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Symbol
的用法示例。
在下文中一共展示了Symbol::SimpleBind方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Extract
void Extract(NDArray data) {
/*Normalize the pictures*/
data.Slice(0, 1) -= mean_img;
data.Slice(1, 2) -= mean_img;
args_map["data"] = data;
/*bind the excutor*/
executor = net.SimpleBind(global_ctx, args_map, map<string, NDArray>(),
map<string, OpReqType>(), aux_map);
executor->Forward(false);
/*print out the features*/
auto array = executor->outputs[0].Copy(Context(kCPU, 0));
NDArray::WaitAll();
for (int i = 0; i < 1024; ++i) {
cout << array.At(0, i) << ",";
}
cout << endl;
}
示例2: Run
//.........这里部分代码省略.........
Shape(2, 2), 500);
Symbol tanh3 = Activation("tanh3", conv3, ActivationActType::tanh);
Symbol pool3 = Pooling("pool3", tanh3, Shape(2, 2), PoolingPoolType::max,
false, false, PoolingPoolingConvention::valid, Shape(1, 1));
Symbol flatten = Flatten("flatten", pool3);
Symbol fc1 = FullyConnected("fc1", flatten, fc1_w, fc1_b, 500);
Symbol tanh4 = Activation("tanh4", fc1, ActivationActType::tanh);
Symbol fc2 = FullyConnected("fc2", tanh4, fc2_w, fc2_b, 10);
Symbol lenet = SoftmaxOutput("softmax", fc2, data_label);
for (auto s : lenet.ListArguments()) {
LG << s;
}
/*setup basic configs*/
int val_fold = 1;
int W = 28;
int H = 28;
int batch_size = 42;
int max_epoch = 100000;
float learning_rate = 1e-4;
float weight_decay = 1e-4;
/*prepare the data*/
vector<float> data_vec, label_vec;
size_t data_count = GetData(&data_vec, &label_vec);
const float *dptr = data_vec.data();
const float *lptr = label_vec.data();
NDArray data_array = NDArray(Shape(data_count, 1, W, H), ctx_cpu,
false); // store in main memory, and copy to
// device memory while training
NDArray label_array =
NDArray(Shape(data_count), ctx_cpu,
false); // it's also ok if just store them all in device memory
data_array.SyncCopyFromCPU(dptr, data_count * W * H);
label_array.SyncCopyFromCPU(lptr, data_count);
data_array.WaitToRead();
label_array.WaitToRead();
size_t train_num = data_count * (1 - val_fold / 10.0);
train_data = data_array.Slice(0, train_num);
train_label = label_array.Slice(0, train_num);
val_data = data_array.Slice(train_num, data_count);
val_label = label_array.Slice(train_num, data_count);
LG << "here read fin";
/*init some of the args*/
// map<string, NDArray> args_map;
args_map["data"] = data_array.Slice(0, batch_size).Copy(ctx_dev);
args_map["data_label"] = label_array.Slice(0, batch_size).Copy(ctx_dev);
NDArray::WaitAll();
LG << "here slice fin";
/*
* we can also feed in some of the args other than the input all by
* ourselves,
* fc2-w , fc1-b for example:
* */
// args_map["fc2_w"] =
// NDArray(mshadow::Shape2(500, 4 * 4 * 50), ctx_dev, false);
// NDArray::SampleGaussian(0, 1, &args_map["fc2_w"]);
// args_map["fc1_b"] = NDArray(mshadow::Shape1(10), ctx_dev, false);
// args_map["fc1_b"] = 0;
lenet.InferArgsMap(ctx_dev, &args_map, args_map);
Optimizer* opt = OptimizerRegistry::Find("ccsgd");
opt->SetParam("momentum", 0.9)
->SetParam("rescale_grad", 1.0)
->SetParam("clip_gradient", 10);
for (int ITER = 0; ITER < max_epoch; ++ITER) {
size_t start_index = 0;
while (start_index < train_num) {
if (start_index + batch_size > train_num) {
start_index = train_num - batch_size;
}
args_map["data"] =
train_data.Slice(start_index, start_index + batch_size)
.Copy(ctx_dev);
args_map["data_label"] =
train_label.Slice(start_index, start_index + batch_size)
.Copy(ctx_dev);
start_index += batch_size;
NDArray::WaitAll();
Executor *exe = lenet.SimpleBind(ctx_dev, args_map);
exe->Forward(true);
exe->Backward();
exe->UpdateAll(opt, learning_rate, weight_decay);
delete exe;
}
LG << "Iter " << ITER
<< ", accuracy: " << ValAccuracy(batch_size * 10, lenet);
}
}