本文整理汇总了C++中Blob::count方法的典型用法代码示例。如果您正苦于以下问题:C++ Blob::count方法的具体用法?C++ Blob::count怎么用?C++ Blob::count使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Blob
的用法示例。
在下文中一共展示了Blob::count方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Dtype
void ScalarLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const ScalarParameter& param = this->layer_param_.scalar_param();
Blob<Dtype>* scalar = (bottom.size() > 1) ? bottom[1] : this->blobs_[0].get();
// Always set axis_ == 0 in special case where scalar is an actual scalar
// (num_axes == 0). Mathematically equivalent for any choice of axis_, so the
// actual setting can be safely ignored; and computation is most efficient
// with axis_ == 0 and (therefore) outer_dim_ == 1. (Setting axis_ to
// bottom[0]->num_axes() - 1, giving inner_dim_ == 1, would be equally
// performant.)
axis_ = (scalar->num_axes() == 0) ?
0 : bottom[0]->CanonicalAxisIndex(param.axis());
CHECK_GE(bottom[0]->num_axes(), axis_ + scalar->num_axes())
<< "scalar blob's shape extends past bottom[0]'s shape when applied "
<< "starting with bottom[0] axis = " << axis_;
for (int i = 0; i < scalar->num_axes(); ++i) {
CHECK_EQ(bottom[0]->shape(axis_ + i), scalar->shape(i))
<< "dimension mismatch between bottom[0]->shape(" << axis_ + i
<< ") and scalar->shape(" << i << ")";
}
outer_dim_ = bottom[0]->count(0, axis_);
scalar_dim_ = scalar->count();
inner_dim_ = bottom[0]->count(axis_ + scalar->num_axes());
if (bottom[0] == top[0]) { // in-place computation
temp_.ReshapeLike(*bottom[0]);
} else {
top[0]->ReshapeLike(*bottom[0]);
}
sum_result_.Reshape(vector<int>(1, outer_dim_ * scalar_dim_));
const int sum_mult_size = std::max(outer_dim_, inner_dim_);
sum_multiplier_.Reshape(vector<int>(1, sum_mult_size));
if (sum_multiplier_.cpu_data()[sum_mult_size - 1] != Dtype(1)) {
caffe_set(sum_mult_size, Dtype(1), sum_multiplier_.mutable_cpu_data());
}
}
示例2: FillDatum
TYPED_TEST(DataTransformTest, TestCropSize) {
TransformationParameter transform_param;
const bool unique_pixels = false; // all pixels the same equal to label
const int label = 0;
const int channels = 3;
const int height = 4;
const int width = 5;
const int crop_size = 2;
transform_param.set_crop_size(crop_size);
Datum datum;
FillDatum(label, channels, height, width, unique_pixels, &datum);
DataTransformer<TypeParam>* transformer =
new DataTransformer<TypeParam>(transform_param, TEST,
Caffe::GetDefaultDeviceContext());
transformer->InitRand();
Blob<TypeParam>* blob =
new Blob<TypeParam>(1, channels, crop_size, crop_size);
for (int iter = 0; iter < this->num_iter_; ++iter) {
transformer->Transform(datum, blob);
EXPECT_EQ(blob->num(), 1);
EXPECT_EQ(blob->channels(), datum.channels());
EXPECT_EQ(blob->height(), crop_size);
EXPECT_EQ(blob->width(), crop_size);
for (int j = 0; j < blob->count(); ++j) {
EXPECT_EQ(blob->cpu_data()[j], label);
}
}
}
示例3:
Blob::Blob(const Blob &source) {
if (data_)
data_ = nullptr;
shape_ = source.shape();
count_ = source.count();
data_ = source.data();
}
示例4: layer
TYPED_TEST(Col2ImgMaskLayerTest, TestForward_2) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ConvolutionParameter* convolution_param =
layer_param.mutable_convolution_param();
//convolution_param->set_kernel_size(0,3);
//convolution_param->set_stride(0,2);
convolution_param->add_kernel_size(3);
convolution_param->add_stride(2);
caffe_set(blob_bottom_->count(), (Dtype)2, blob_bottom_->mutable_cpu_data());
Blob<Dtype> mask;
mask.ReshapeLike(*blob_bottom_);
caffe_set(mask.count(), (Dtype)1, mask.mutable_cpu_data());
vector<Blob<Dtype>*> blob_bottom_vec_2_;
blob_bottom_vec_2_.push_back(blob_bottom_);
blob_bottom_vec_2_.push_back(&mask);
Col2imgMaskLayer<Dtype> layer(layer_param);
layer.SetUp(blob_bottom_vec_2_, blob_top_vec_);
EXPECT_EQ(this->blob_top_->num(), 2);
EXPECT_EQ(this->blob_top_->channels(), 2);
EXPECT_EQ(this->blob_top_->height(), 5);
EXPECT_EQ(this->blob_top_->width(), 5);
layer.Forward(blob_bottom_vec_, blob_top_vec_);
const Dtype min_precision = 1e-5;
for (int i = 0; i < blob_top_->count(); i++)
EXPECT_NEAR(blob_top_->mutable_cpu_data()[i], 2, min_precision);
}
示例5: if
void ConcatLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
if (concat_dim_ == 0) {
int offset_num = 0;
for (int i = 0; i < bottom.size(); ++i) {
Blob<Dtype>* blob = bottom[i];
if (propagate_down[i]) {
Dtype* bottom_diff = blob->mutable_cpu_diff();
caffe_copy(blob->count(), top_diff + top[0]->offset(offset_num),
bottom_diff);
}
offset_num += blob->num();
}
} else if (concat_dim_ == 1) {
int offset_channel = 0;
for (int i = 0; i < bottom.size(); ++i) {
Blob<Dtype>* blob = bottom[i];
if (propagate_down[i]) {
Dtype* bottom_diff = blob->mutable_cpu_diff();
int num_elem = blob->channels()*blob->height()*blob->width();
for (int n = 0; n < num_; ++n) {
caffe_copy(num_elem, top_diff + top[0]->offset(n, offset_channel),
bottom_diff + blob->offset(n));
}
}
offset_channel += blob->channels();
}
} // concat_dim_ is guaranteed to be 0 or 1 by LayerSetUp.
}
示例6: Dtype
Dtype SliceLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->mutable_cpu_data();
if (slice_dim_ == 0) {
int offset_num = 0;
for (int i = 0; i < top->size(); ++i) {
Blob<Dtype>* blob = (*top)[i];
Dtype* top_data = blob->mutable_cpu_data();
caffe_copy(blob->count(), bottom_data + bottom[0]->offset(offset_num),
top_data);
offset_num += blob->num();
}
} else if (slice_dim_ == 1) {
int offset_channel = 0;
for (int i = 0; i < top->size(); ++i) {
Blob<Dtype>* blob = (*top)[i];
Dtype* top_data = blob->mutable_cpu_data();
const int num_elem = blob->channels() * blob->height() * blob->width()*blob->depth();
for (int n = 0; n < num_; ++n) {
caffe_copy(num_elem, bottom_data + bottom[0]->offset(n, offset_channel),
top_data + blob->offset(n));
}
offset_channel += blob->channels();
}
} // slice_dim_ is guaranteed to be 0 or 1 by SetUp.
return Dtype(0.);
}
示例7: is
void InfogainLossLayer<Dtype, MItype, MOtype>::Reshape(
const vector<Blob<MItype>*>& bottom,
const vector<Blob<MOtype>*>& top) {
LossLayer<Dtype, MItype, MOtype>::Reshape(bottom, top);
softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_);
infogain_axis_ =
bottom[0]->CanonicalAxisIndex(
this->layer_param_.infogain_loss_param().axis());
outer_num_ = bottom[0]->count(0, infogain_axis_);
inner_num_ = bottom[0]->count(infogain_axis_ + 1);
CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count())
<< "Number of labels must match number of predictions; "
<< "e.g., if infogain axis == 1 and prediction shape is (n, c, H, W), "
<< "label count (number of labels) must be n*H*W, "
<< "with integer values in {0, 1, ..., c-1}.";
num_labels_ = bottom[0]->shape(infogain_axis_);
Blob<Dtype>* infogain = NULL;
if (bottom.size() < 3) {
infogain = &infogain_;
} else {
infogain = bottom[2];
}
CHECK_EQ(infogain->count(), num_labels_*num_labels_);
sum_rows_H_.Reshape(vector<int_tp>(1, num_labels_));
if (bottom.size() == 2) {
// H is provided as a parameter and will not change. sum rows once
sum_rows_of_H(infogain);
}
if (top.size() >= 2) {
// softmax output
top[1]->ReshapeLike(*bottom[0]);
}
}
示例8: if
void SliceLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
if (!propagate_down[0]) { return; }
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
if (slice_dim_ == 0) {
int offset_num = 0;
for (int i = 0; i < top.size(); ++i) {
Blob<Dtype>* blob = top[i];
const Dtype* top_diff = blob->cpu_diff();
caffe_copy(blob->count(), top_diff,
bottom_diff + (*bottom)[0]->offset(offset_num));
offset_num += blob->num();
}
} else if (slice_dim_ == 1) {
int offset_channel = 0;
for (int i = 0; i < top.size(); ++i) {
Blob<Dtype>* blob = top[i];
const Dtype* top_diff = blob->cpu_diff();
const int num_elem = blob->channels() * blob->height() * blob->width() * blob->depth();
for (int n = 0; n < num_; ++n) {
caffe_copy(num_elem, top_diff + blob->offset(n),
bottom_diff + (*bottom)[0]->offset(n, offset_channel));
}
offset_channel += blob->channels();
}
} // slice_dim_ is guaranteed to be 0 or 1 by SetUp.
}
示例9: ReshapeLike
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
if (source.count() != count_ || source.shape() != shape_) {
if (reshape) {
ReshapeLike(source);
} else {
LOG(FATAL) << "Trying to copy blobs of different sizes.";
}
}
switch (Caffe::mode()) {
case Caffe::GPU:
if (copy_diff) {
caffe_copy(count_, source.gpu_diff(),
static_cast<Dtype*>(diff_->mutable_gpu_data()));
} else {
caffe_copy(count_, source.gpu_data(),
static_cast<Dtype*>(data_->mutable_gpu_data()));
}
break;
case Caffe::CPU:
if (copy_diff) {
caffe_copy(count_, source.cpu_diff(),
static_cast<Dtype*>(diff_->mutable_cpu_data()));
} else {
caffe_copy(count_, source.cpu_data(),
static_cast<Dtype*>(data_->mutable_cpu_data()));
}
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}
示例10: Dtype
void BiasLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const BiasParameter& param = this->layer_param_.bias_param();
Blob<Dtype>* bias = (bottom.size() > 1) ? bottom[1] : this->blobs_[0].get();
// Always set axis == 0 in special case where bias is a scalar
// (num_axes == 0). Mathematically equivalent for any choice of axis, so the
// actual setting can be safely ignored; and computation is most efficient
// with axis == 0 and (therefore) outer_dim_ == 1.
const int axis = (bias->num_axes() == 0) ?
0 : bottom[0]->CanonicalAxisIndex(param.axis());
CHECK_GE(bottom[0]->num_axes(), axis + bias->num_axes())
<< "bias blob's shape extends past bottom[0]'s shape when applied "
<< "starting with bottom[0] axis = " << axis;
for (int i = 0; i < bias->num_axes(); ++i) {
CHECK_EQ(bottom[0]->shape(axis + i), bias->shape(i))
<< "dimension mismatch between bottom[0]->shape(" << axis + i
<< ") and bias->shape(" << i << ")";
}
outer_dim_ = bottom[0]->count(0, axis);
bias_dim_ = bias->count();
inner_dim_ = bottom[0]->count(axis + bias->num_axes());
dim_ = bias_dim_ * inner_dim_;
if (bottom[0] != top[0]) {
top[0]->ReshapeLike(*bottom[0]);
}
bias_multiplier_.Reshape(vector<int>(1, inner_dim_));
if (bias_multiplier_.cpu_data()[inner_dim_ - 1] != Dtype(1)) {
caffe_set(inner_dim_, Dtype(1), bias_multiplier_.mutable_cpu_data());
}
}
示例11: SyncedMemory
void Blob<Dtype>::ShareDiff(const Blob& other) {
// SID MEMORY COMPACT LIGHT WEIGHT CAFFE <BEGIN>
if(_is_diff_initialized==0){
diff_.reset(new SyncedMemory(capacity_*sizeof(Dtype)));
_is_diff_initialized=1;
}
// SID MEMORY COMPACT LIGHT WEIGHT CAFFE <END>
CHECK_EQ(count_, other.count());
diff_ = other.diff();
}
示例12: NumSequenceMatches
int NumSequenceMatches(const TransformationParameter transform_param,
const Datum& datum, Phase phase) {
// Get crop sequence with Caffe seed 1701.
DataTransformer<Dtype>* transformer =
new DataTransformer<Dtype>(transform_param, phase);
const int crop_size = transform_param.crop_size();
int crop_h = transform_param.crop_h();
int crop_w = transform_param.crop_w();
if (crop_size > 0) {
crop_h = crop_w = crop_size;
}
Caffe::set_random_seed(seed_);
transformer->InitRand();
Blob<Dtype>* blob =
new Blob<Dtype>(1, datum.channels(), datum.height(), datum.width());
if (crop_h > 0 || crop_w > 0) {
blob->Reshape(1, datum.channels(), crop_h, crop_w);
}
vector<vector<Dtype> > crop_sequence;
for (int iter = 0; iter < this->num_iter_; ++iter) {
vector<Dtype> iter_crop_sequence;
transformer->Transform(datum, blob);
for (int j = 0; j < blob->count(); ++j) {
iter_crop_sequence.push_back(blob->cpu_data()[j]);
}
crop_sequence.push_back(iter_crop_sequence);
}
// Check if the sequence differs from the previous
int num_sequence_matches = 0;
for (int iter = 0; iter < this->num_iter_; ++iter) {
vector<Dtype> iter_crop_sequence = crop_sequence[iter];
transformer->Transform(datum, blob);
for (int j = 0; j < blob->count(); ++j) {
num_sequence_matches +=
(crop_sequence[iter][j] == blob->cpu_data()[j]);
}
}
return num_sequence_matches;
}
示例13: write_blob_to_file
static void write_blob_to_file(const std::string& file_name,
const Blob<Dtype>& blob) {
std::ofstream file(file_name.c_str(), std::ios::out | std::ios::binary);
if (file.fail()) {
ASSERT_FALSE(true);
return;
}
file.write(reinterpret_cast<const char*>(&blob.shape()[0]), 4 * sizeof(int));
ASSERT_FALSE(file.fail());
file.write(reinterpret_cast<const char*>(blob.cpu_data()),
blob.count() * sizeof(Dtype));
ASSERT_FALSE(file.fail());
file.close();
}
示例14: while
void AsyncParamServer<Dtype>::ProcessUpdateTask() {
const vector<Blob<Dtype> *> &net_params = solver_->net()->learnable_params();
std::deque<TaskRequest> to_update;
update_queue_mutex_.lock();
to_update.swap(update_tasks_);
update_queue_mutex_.unlock();
while (!to_update.empty() ) {
TaskRequest task = to_update.front();
to_update.pop_front();
// copy to diff in solver
int root_rank = world_rank_to_root_rank(task.part_root_rank_);
Blob<Dtype>* blob = net_params[task.param_id_];
Dtype* solver_diff = blob->mutable_cpu_diff();
Dtype* mpi_buf =
recv_buf_[make_pair(root_rank, task.param_id_)].first;
int64_t count =
recv_buf_[make_pair(root_rank, task.param_id_)].second;
CHECK(count == blob->count() );
//copy MPI buffer to solver_diff
int64_t part_offset = task.part_id_ * count / task.num_parts_;
caffe_copy(count / task.num_parts_,
mpi_buf + part_offset, solver_diff + part_offset);
// apply update
int blob_wise_iter = async_iter_[make_pair(task.param_id_, task.part_id_) ];
solver_->set_iter(blob_wise_iter);
// TODO: supports partial param update per model parts
solver_->ApplyUpdate(task.param_id_);
DLOG(INFO) << "PS (iter " << blob_wise_iter << "): param id=" << task.param_id_ << " weight=" << net_params[task.param_id_]->sumsq_diff();
DLOG(INFO) << "PS (iter " << blob_wise_iter << "): param id=" << task.param_id_ << " data=" << net_params[task.param_id_]->sumsq_data();
//clean up
solver_->net()->ClearParamDiffs(task.param_id_);
async_iter_[ make_pair(task.param_id_, task.part_id_) ] += 1;
update_cnt_ += 1;
// copy model(data) in solver to mpi buffer
mpi_buf = send_buf_[make_pair(root_rank, task.param_id_)].first;
caffe_copy(count / task.num_parts_,
blob->cpu_data() + part_offset, mpi_buf + part_offset);
//ship off
send_queue_mutex_.lock();
send_tasks_.push_back(task);
send_queue_mutex_.unlock();
}
}
示例15: ReshapeLike
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
if (source.count() != count_ || source.shape() != shape_) {
if (reshape) {
ReshapeLike(source);
} else {
LOG(FATAL)<< "Trying to copy blobs of different sizes.";
}
}
switch (Caffe::mode()) {
case Caffe::GPU: {
if (device_->backend() == BACKEND_CUDA) {
if (copy_diff) {
caffe_copy(count_, source.gpu_diff(),
static_cast<Dtype*>(diff_->mutable_gpu_data()));
} else {
caffe_copy(count_, source.gpu_data(),
static_cast<Dtype*>(data_->mutable_gpu_data()));
}
} else {
#ifdef USE_GREENTEA
if (copy_diff) {
greentea_copy<Dtype>(
count_, (cl_mem) (source.gpu_diff()), 0,
(cl_mem) (diff_->mutable_gpu_data()), 0,
&viennacl::ocl::get_context(device_->id()));
} else {
greentea_copy<Dtype>(
count_, (cl_mem) (source.gpu_data()), 0,
(cl_mem) (data_->mutable_gpu_data()), 0,
&viennacl::ocl::get_context(device_->id()));
}
#endif
}
break;
}
case Caffe::CPU: {
if (copy_diff) {
caffe_cpu_copy(count_, source.cpu_diff(),
static_cast<Dtype*>(diff_->mutable_cpu_data()));
} else {
caffe_cpu_copy(count_, source.cpu_data(),
static_cast<Dtype*>(data_->mutable_cpu_data()));
}
break;
}
default:
LOG(FATAL)<< "Unknown caffe mode.";
}
}