本文整理汇总了C++中Blob::cpu_data方法的典型用法代码示例。如果您正苦于以下问题:C++ Blob::cpu_data方法的具体用法?C++ Blob::cpu_data怎么用?C++ Blob::cpu_data使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Blob
的用法示例。
在下文中一共展示了Blob::cpu_data方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
void conv_col2im_gpu(const Dtype* col_buff, Dtype* data){
if (!force_nd_im2col&&num_spatial_axes == 2){
col2im_gpu(col_buff, conv_in_channels, conv_input_shape.cpu_data()[1], conv_input_shape.cpu_data()[2],
kernel_shape.cpu_data()[0], kernel_shape.cpu_data()[1], pad.cpu_data()[0], pad.cpu_data()[1],
stride.cpu_data()[0], stride.cpu_data()[1], data);
}
}
示例2:
inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) {
if (!force_nd_im2col_ && num_spatial_axes_ == 2) {
col2im_gpu(col_buff, conv_in_channels_,
conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],
kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],
pad_.cpu_data()[0], pad_.cpu_data()[1],
stride_.cpu_data()[0], stride_.cpu_data()[1],
dilation_.cpu_data()[0], dilation_.cpu_data()[1], data);
} else {
col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_,
conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(),
kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(),
dilation_.gpu_data(), data);
}
}
示例3: min
void LRNLayerTest<Dtype>::ReferenceLRNForward(
const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
Blob<Dtype>* blob_top) {
blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
blob_bottom.height(), blob_bottom.width());
const Dtype* bottom_data = blob_bottom.cpu_data();
Dtype* top_data = blob_top->mutable_cpu_data();
Dtype alpha = layer_param.alpha();
Dtype beta = layer_param.beta();
int size = layer_param.local_size();
for (int n = 0; n < blob_bottom.num(); ++n) {
for (int c = 0; c < blob_bottom.channels(); ++c) {
for (int h = 0; h < blob_bottom.height(); ++h) {
for (int w = 0; w < blob_bottom.width(); ++w) {
int c_start = c - (size - 1) / 2;
int c_end = min(c_start + size, blob_bottom.channels());
c_start = max(c_start, 0);
Dtype scale = 1.;
for (int i = c_start; i < c_end; ++i) {
Dtype value = blob_bottom.data_at(n, i, h, w);
scale += value * value * alpha / size;
}
*(top_data + blob_top->offset(n, c, h, w)) =
blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
}
}
}
}
}
示例4: Reshape
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
if (num_ != source.num() || channels_ != source.channels() ||
height_ != source.height() || width_ != source.width()) {
if (reshape) {
Reshape(source.num(), source.channels(), source.height(), source.width());
} else {
LOG(FATAL) << "Trying to copy blobs of different sizes.";
}
}
switch (Caffe::mode()) {
case Caffe::GPU:
if (copy_diff) {
caffe_copy(count_, source.gpu_diff(),
static_cast<Dtype*>(diff_->mutable_gpu_data()));
} else {
caffe_copy(count_, source.gpu_data(),
static_cast<Dtype*>(data_->mutable_gpu_data()));
}
break;
case Caffe::CPU:
if (copy_diff) {
caffe_copy(count_, source.cpu_diff(),
static_cast<Dtype*>(diff_->mutable_cpu_data()));
} else {
caffe_copy(count_, source.cpu_data(),
static_cast<Dtype*>(data_->mutable_cpu_data()));
}
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}
示例5: FillDatum
TYPED_TEST(DataTransformTest, TestCropSize) {
TransformationParameter transform_param;
const bool unique_pixels = false; // all pixels the same equal to label
const int_tp label = 0;
const int_tp channels = 3;
const int_tp height = 4;
const int_tp width = 5;
const int_tp crop_size = 2;
transform_param.set_crop_size(crop_size);
Datum datum;
FillDatum(label, channels, height, width, unique_pixels, &datum);
DataTransformer<TypeParam>* transformer =
new DataTransformer<TypeParam>(transform_param, TEST,
Caffe::GetDefaultDevice());
transformer->InitRand();
Blob<TypeParam>* blob =
new Blob<TypeParam>(1, channels, crop_size, crop_size);
for (int_tp iter = 0; iter < this->num_iter_; ++iter) {
transformer->Transform(datum, blob);
EXPECT_EQ(blob->num(), 1);
EXPECT_EQ(blob->channels(), datum.channels());
EXPECT_EQ(blob->height(), crop_size);
EXPECT_EQ(blob->width(), crop_size);
for (int_tp j = 0; j < blob->count(); ++j) {
EXPECT_EQ(blob->cpu_data()[j], label);
}
}
}
示例6: ReshapeLike
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
if (source.count() != count_ || source.shape() != shape_) {
if (reshape) {
ReshapeLike(source);
} else {
LOG(FATAL) << "Trying to copy blobs of different sizes.";
}
}
switch (Caffe::mode()) {
case Caffe::GPU:
if (copy_diff) {
caffe_copy(count_, source.gpu_diff(),
static_cast<Dtype*>(diff_->mutable_gpu_data()));
} else {
caffe_copy(count_, source.gpu_data(),
static_cast<Dtype*>(data_->mutable_gpu_data()));
}
break;
case Caffe::CPU:
if (copy_diff) {
caffe_copy(count_, source.cpu_diff(),
static_cast<Dtype*>(diff_->mutable_cpu_data()));
} else {
caffe_copy(count_, source.cpu_data(),
static_cast<Dtype*>(data_->mutable_cpu_data()));
}
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}
示例7: Reshape
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
if (num_ != source.num() || channels_ != source.channels() ||
height_ != source.height() || width_ != source.width()) {
if (reshape) {
Reshape(source.num(), source.channels(), source.height(), source.width());
} else {
LOG(FATAL) << "Trying to copy blobs of different sizes.";
}
}
switch (Caffe::mode()) {
#if 0
case Caffe::GPU:
if (copy_diff) {
CUDA_CHECK(cudaMemcpy(diff_->mutable_gpu_data(), source.gpu_diff(),
sizeof(Dtype) * count_, cudaMemcpyDeviceToDevice));
} else {
CUDA_CHECK(cudaMemcpy(data_->mutable_gpu_data(), source.gpu_data(),
sizeof(Dtype) * count_, cudaMemcpyDeviceToDevice));
}
break;
#endif
case Caffe::CPU:
if (copy_diff) {
memcpy(diff_->mutable_cpu_data(), source.cpu_diff(),
sizeof(Dtype) * count_);
} else {
memcpy(data_->mutable_cpu_data(), source.cpu_data(),
sizeof(Dtype) * count_);
}
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}
示例8: TestForward
void TestForward() {
// Get the loss without a specified objective weight -- should be
// equivalent to explicitly specifiying a weight of 1.
LayerParameter layer_param;
layer_param.mutable_multi_t_loss_param()->set_num_center(N_);
EntropyTLossLayer<Dtype> layer_weight_1(layer_param);
layer_weight_1.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
layer_weight_1.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
FillerParameter filler_param;
GaussianFiller<Dtype> filler2(filler_param);
filler2.Fill(layer_weight_1.blobs()[0].get());
caffe_rng_uniform(layer_weight_1.blobs()[1]->count(), Dtype(0.9), Dtype(1.1), layer_weight_1.blobs()[1]->mutable_cpu_data());
layer_weight_1.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
const Dtype loss_weight_1 =
layer_weight_1.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
// Get the loss again with a different objective weight; check that it is
// scaled appropriately.
const Dtype kLossWeight = 3.7;
LayerParameter layer_param2;
layer_param2.mutable_multi_t_loss_param()->set_num_center(N_);
layer_param2.add_loss_weight(kLossWeight);
EntropyTLossLayer<Dtype> layer_weight_2(layer_param2);
layer_weight_2.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
layer_weight_2.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
caffe_copy(layer_weight_2.blobs()[0]->count(), layer_weight_1.blobs()[0]->cpu_data(),
layer_weight_2.blobs()[0]->mutable_cpu_data());
caffe_copy(layer_weight_2.blobs()[1]->count(), layer_weight_1.blobs()[1]->cpu_data(),
layer_weight_2.blobs()[1]->mutable_cpu_data());
layer_weight_2.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
const Dtype loss_weight_2 =
layer_weight_2.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
const Dtype kErrorMargin = 1e-3;
EXPECT_NEAR(loss_weight_1 * kLossWeight, loss_weight_2, kErrorMargin);
// Make sure the loss is non-trivial.
const Dtype kNonTrivialAbsThresh = 1e-1;
EXPECT_GE(fabs(loss_weight_1), kNonTrivialAbsThresh);
int m = M_, n = layer_param.multi_t_loss_param().num_center(), p = K_;
Blob<Dtype> *distance = layer_weight_1.distance();
const Dtype *cpu_data = blob_bottom_data_->cpu_data();
const Dtype *cpu_dist = distance->cpu_data();
const Dtype *cpu_center = layer_weight_1.blobs()[0]->cpu_data();
const Dtype *cpu_sigma = layer_weight_1.blobs()[1]->cpu_data();
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
Dtype acc = Dtype(0);
for (int k = 0; k < p; ++k) {
acc += (cpu_data[i*p + k] - cpu_center[k*n + j])*(cpu_data[i*p + k] - cpu_center[k*n + j])*cpu_sigma[k*n+j];
}
EXPECT_NEAR(acc, cpu_dist[i*n + j], kErrorMargin) << i << " " << j;
}
}
}
示例9:
void hdf5_save_nd_dataset<double>(
const hid_t file_id, const string& dataset_name, const Blob<double>& blob) {
hsize_t dims[HDF5_NUM_DIMS];
dims[0] = blob.num();
dims[1] = blob.channels();
dims[2] = blob.height();
dims[3] = blob.width();
herr_t status = H5LTmake_dataset_double(
file_id, dataset_name.c_str(), HDF5_NUM_DIMS, dims, blob.cpu_data());
CHECK_GE(status, 0) << "Failed to make double dataset " << dataset_name;
}
示例10: NumSequenceMatches
int NumSequenceMatches(const TransformationParameter transform_param,
const Datum& datum, Phase phase) {
// Get crop sequence with Caffe seed 1701.
DataTransformer<Dtype>* transformer =
new DataTransformer<Dtype>(transform_param, phase);
const int crop_size = transform_param.crop_size();
int crop_h = transform_param.crop_h();
int crop_w = transform_param.crop_w();
if (crop_size > 0) {
crop_h = crop_w = crop_size;
}
Caffe::set_random_seed(seed_);
transformer->InitRand();
Blob<Dtype>* blob =
new Blob<Dtype>(1, datum.channels(), datum.height(), datum.width());
if (crop_h > 0 || crop_w > 0) {
blob->Reshape(1, datum.channels(), crop_h, crop_w);
}
vector<vector<Dtype> > crop_sequence;
for (int iter = 0; iter < this->num_iter_; ++iter) {
vector<Dtype> iter_crop_sequence;
transformer->Transform(datum, blob);
for (int j = 0; j < blob->count(); ++j) {
iter_crop_sequence.push_back(blob->cpu_data()[j]);
}
crop_sequence.push_back(iter_crop_sequence);
}
// Check if the sequence differs from the previous
int num_sequence_matches = 0;
for (int iter = 0; iter < this->num_iter_; ++iter) {
vector<Dtype> iter_crop_sequence = crop_sequence[iter];
transformer->Transform(datum, blob);
for (int j = 0; j < blob->count(); ++j) {
num_sequence_matches +=
(crop_sequence[iter][j] == blob->cpu_data()[j]);
}
}
return num_sequence_matches;
}
示例11:
inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) {
if (!force_nd_im2col_ && num_spatial_axes_ == 2) {
im2col_gpu(data, conv_in_channels_,
conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],
kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],
pad_.cpu_data()[0], pad_.cpu_data()[1],
stride_.cpu_data()[0], stride_.cpu_data()[1], col_buff);
} else {
im2col_nd_gpu(data, num_spatial_axes_, num_kernels_im2col_,
conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(),
kernel_shape_.gpu_data(), pad_.gpu_data(),
stride_.gpu_data(), col_buff);
}
}
示例12: filler
void LocalLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* x_data = col_buffer_.mutable_cpu_data();
const Dtype* weight = this->blobs_[0]->cpu_data();
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
Blob<Dtype> E;
E.Reshape(1, 1, 1, K_);
FillerParameter filler_param;
filler_param.set_value(1);
ConstantFiller<Dtype> filler(filler_param);
filler.Fill(&E);
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, K_, N_);
for (int n=0; n<num_; n++) {
im2col_cpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_, pad_, pad_, stride_, stride_, x_data);
for (int m=0; m<num_output_; m++) {
caffe_mul(K_*N_, x_data, weight+this->blobs_[0]->offset(m),
intermediate.mutable_cpu_data());
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, N_, K_,
(Dtype)1., E.cpu_data(),
intermediate.cpu_data(),
(Dtype)0., top_data + top[0]->offset(n, m));
}
if (bias_term_) {
caffe_add(M_ * N_, this->blobs_[1]->cpu_data(),
top_data + top[0]->offset(n),
top_data + top[0]->offset(n));
}
}
}
示例13: write_blob_to_file
static void write_blob_to_file(const std::string& file_name,
const Blob<Dtype>& blob) {
std::ofstream file(file_name.c_str(), std::ios::out | std::ios::binary);
if (file.fail()) {
ASSERT_FALSE(true);
return;
}
file.write(reinterpret_cast<const char*>(&blob.shape()[0]), 4 * sizeof(int));
ASSERT_FALSE(file.fail());
file.write(reinterpret_cast<const char*>(blob.cpu_data()),
blob.count() * sizeof(Dtype));
ASSERT_FALSE(file.fail());
file.close();
}
示例14: LOG
void BatchTripletLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Blob<Dtype>* feat = bottom[0];
const Dtype* feat_data = feat->cpu_data();
Dtype* feat_diff = feat->mutable_cpu_diff();
int count = feat->count();
int num = feat->num();
int dim = count / num;
int agg_step = num * sizeof(Dtype);
Dtype * agg_data = (Dtype *)aggregator_->mutable_cpu_data();
caffe_memset(num * agg_step, 0, agg_data);
Dtype scale1 = Dtype(2) / triplets_.size() * mu_;
for (int i=0; i<triplets_.size(); ++i) {
int qry_id = triplets_[i].first_;
int pos_id = triplets_[i].second_;
int neg_id = triplets_[i].third_;
agg_data[qry_id * num + neg_id] += scale1;
agg_data[qry_id * num + pos_id] -= scale1;
agg_data[pos_id * num + pos_id] += scale1;
agg_data[pos_id * num + qry_id] -= scale1;
agg_data[neg_id * num + qry_id] += scale1;
agg_data[neg_id * num + neg_id] -= scale1;
}
Dtype scale2 = Dtype(2) / pos_pairs_.size() * (Dtype(1) - mu_);
for (int i=0; i<pos_pairs_.size(); ++i) {
int qry_id = pos_pairs_[i].first;
int pos_id = pos_pairs_[i].second;
agg_data[qry_id * num + qry_id] += scale2;
agg_data[qry_id * num + pos_id] -= scale2;
agg_data[pos_id * num + pos_id] += scale2;
agg_data[pos_id * num + qry_id] -= scale2;
}
caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, num,
Dtype(1), agg_data, feat_data, Dtype(0), feat_diff);
}
}
示例15: ReshapeLike
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
if (source.count() != count_ || source.shape() != shape_) {
if (reshape) {
ReshapeLike(source);
} else {
LOG(FATAL)<< "Trying to copy blobs of different sizes.";
}
}
switch (Caffe::mode()) {
case Caffe::GPU: {
if (device_->backend() == BACKEND_CUDA) {
if (copy_diff) {
caffe_copy(count_, source.gpu_diff(),
static_cast<Dtype*>(diff_->mutable_gpu_data()));
} else {
caffe_copy(count_, source.gpu_data(),
static_cast<Dtype*>(data_->mutable_gpu_data()));
}
} else {
#ifdef USE_GREENTEA
if (copy_diff) {
greentea_copy<Dtype>(
count_, (cl_mem) (source.gpu_diff()), 0,
(cl_mem) (diff_->mutable_gpu_data()), 0,
&viennacl::ocl::get_context(device_->id()));
} else {
greentea_copy<Dtype>(
count_, (cl_mem) (source.gpu_data()), 0,
(cl_mem) (data_->mutable_gpu_data()), 0,
&viennacl::ocl::get_context(device_->id()));
}
#endif
}
break;
}
case Caffe::CPU: {
if (copy_diff) {
caffe_cpu_copy(count_, source.cpu_diff(),
static_cast<Dtype*>(diff_->mutable_cpu_data()));
} else {
caffe_cpu_copy(count_, source.cpu_data(),
static_cast<Dtype*>(data_->mutable_cpu_data()));
}
break;
}
default:
LOG(FATAL)<< "Unknown caffe mode.";
}
}