本文整理汇总了C++中Net::ForwardPrefilled方法的典型用法代码示例。如果您正苦于以下问题:C++ Net::ForwardPrefilled方法的具体用法?C++ Net::ForwardPrefilled怎么用?C++ Net::ForwardPrefilled使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Net
的用法示例。
在下文中一共展示了Net::ForwardPrefilled方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: solver_teststep_multigpu
// Usage: caffe_('solver_teststep_multigpu')
static void solver_teststep_multigpu(MEX_ARGS) {
mxCHECK(nrhs == 0,
"Usage: caffe_('solver_teststep_multigpu')");
vector<shared_ptr<P2PSync<float>>>* sync_vec = sync_ptr->get_syncs();
int initial_device;
CUDA_CHECK(cudaGetDevice(&initial_device));
for (int i = 0; i < sync_vec->size(); i++)
{
Solver<float> *solver;
if (i == 0)
{
solver = sync_ptr->solver().get();
}
else
{
solver = (*sync_vec)[i]->solver().get();
}
Net<float> *net = solver->net().get();
CUDA_CHECK(cudaSetDevice(solver->param().device_id()));
net->ForwardPrefilled();
CUDA_CHECK(cudaSetDevice(initial_device));
}
}
示例2: computeFeatures
void computeFeatures(Net<Dtype>& caffe_test_net,
const vector<Mat>& imgs,
string LAYER,
int BATCH_SIZE,
vector<vector<Dtype>>& output) {
int nImgs = imgs.size();
int nBatches = ceil(nImgs * 1.0f / BATCH_SIZE);
for (int batch = 0; batch < nBatches; batch++) {
int actBatchSize = min(nImgs - batch * BATCH_SIZE, BATCH_SIZE);
vector<Mat> imgs_b;
if (actBatchSize >= BATCH_SIZE) {
imgs_b.insert(imgs_b.end(), imgs.begin() + batch * BATCH_SIZE,
imgs.begin() + (batch + 1) * BATCH_SIZE);
} else {
imgs_b.insert(imgs_b.end(), imgs.begin() + batch * BATCH_SIZE, imgs.end());
for (int j = actBatchSize; j < BATCH_SIZE; j++)
imgs_b.push_back(imgs[0]);
}
vector<int> dvl(BATCH_SIZE, 0);
boost::dynamic_pointer_cast<caffe::MemoryDataLayer<Dtype>>(
caffe_test_net.layers()[0])->AddMatVector(imgs_b, dvl);
vector<Blob<Dtype>*> dummy_bottom_vec;
Dtype loss = 0.0f;
caffe_test_net.ForwardPrefilled(&loss);
const boost::shared_ptr<Blob<Dtype>> feat = caffe_test_net.blob_by_name(LAYER);
for (int i = 0; i < actBatchSize; i++) {
Dtype* feat_data = feat->mutable_cpu_data() + feat->offset(i);
output.push_back(vector<Dtype>(feat_data, feat_data + feat->count() / feat->num()));
}
LOG(INFO) << "Batch " << batch << "/" << nBatches << " (" << actBatchSize << " images) done";
}
}
示例3: net_forward
// Usage: caffe_('net_forward', hNet)
static void net_forward(MEX_ARGS) {
mxCHECK(nrhs <= 3 && mxIsStruct(prhs[0]),
"Usage: caffe_('net_forward', hNet, from_layer=0, to_layer=end)");
Net<float>* net = handle_to_ptr<Net<float> >(prhs[0]);
if (nrhs == 1)
net->ForwardPrefilled();
else if (nrhs == 2)
{
mxCHECK(mxIsDouble(prhs[1]),
"Usage: caffe_('net_forward', hNet, from_layer=0, to_layer=end)");
net->ForwardFrom((int)mxGetScalar(prhs[1]));
}
else if (nrhs == 3)
{
mxCHECK(mxIsDouble(prhs[1]) && mxIsDouble(prhs[2]),
"Usage: caffe_('net_forward', hNet, from_layer=0, to_layer=end)");
net->ForwardFromTo((int)mxGetScalar(prhs[1]), (int)mxGetScalar(prhs[2]));
}
}
示例4: net_forward
// Usage: caffe_('net_forward', hNet)
static void net_forward(MEX_ARGS) {
mxCHECK(nrhs == 1 && mxIsStruct(prhs[0]),
"Usage: caffe_('net_forward', hNet)");
Net<float>* net = handle_to_ptr<Net<float> >(prhs[0]);
net->ForwardPrefilled();
}