本文整理汇总了C++中Blob::channels方法的典型用法代码示例。如果您正苦于以下问题:C++ Blob::channels方法的具体用法?C++ Blob::channels怎么用?C++ Blob::channels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Blob
的用法示例。
在下文中一共展示了Blob::channels方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: min
void LRNLayerTest<Dtype>::ReferenceLRNForward(
const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
Blob<Dtype>* blob_top) {
blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
blob_bottom.height(), blob_bottom.width());
const Dtype* bottom_data = blob_bottom.cpu_data();
Dtype* top_data = blob_top->mutable_cpu_data();
Dtype alpha = layer_param.alpha();
Dtype beta = layer_param.beta();
int size = layer_param.local_size();
for (int n = 0; n < blob_bottom.num(); ++n) {
for (int c = 0; c < blob_bottom.channels(); ++c) {
for (int h = 0; h < blob_bottom.height(); ++h) {
for (int w = 0; w < blob_bottom.width(); ++w) {
int c_start = c - (size - 1) / 2;
int c_end = min(c_start + size, blob_bottom.channels());
c_start = max(c_start, 0);
Dtype scale = 1.;
for (int i = c_start; i < c_end; ++i) {
Dtype value = blob_bottom.data_at(n, i, h, w);
scale += value * value * alpha / size;
}
*(top_data + blob_top->offset(n, c, h, w)) =
blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
}
}
}
}
}
示例2: Reshape
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
if (num_ != source.num() || channels_ != source.channels() ||
height_ != source.height() || width_ != source.width()) {
if (reshape) {
Reshape(source.num(), source.channels(), source.height(), source.width());
} else {
LOG(FATAL) << "Trying to copy blobs of different sizes.";
}
}
switch (Caffe::mode()) {
#if 0
case Caffe::GPU:
if (copy_diff) {
CUDA_CHECK(cudaMemcpy(diff_->mutable_gpu_data(), source.gpu_diff(),
sizeof(Dtype) * count_, cudaMemcpyDeviceToDevice));
} else {
CUDA_CHECK(cudaMemcpy(data_->mutable_gpu_data(), source.gpu_data(),
sizeof(Dtype) * count_, cudaMemcpyDeviceToDevice));
}
break;
#endif
case Caffe::CPU:
if (copy_diff) {
memcpy(diff_->mutable_cpu_data(), source.cpu_diff(),
sizeof(Dtype) * count_);
} else {
memcpy(data_->mutable_cpu_data(), source.cpu_data(),
sizeof(Dtype) * count_);
}
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}
示例3: if
void ConcatLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
if (concat_dim_ == 0) {
int offset_num = 0;
for (int i = 0; i < bottom->size(); ++i) {
Blob<Dtype>* blob = (*bottom)[i];
if (propagate_down[i]) {
Dtype* bottom_diff = blob->mutable_cpu_diff();
caffe_copy(blob->count(), top_diff + top[0]->offset(offset_num),
bottom_diff);
}
offset_num += blob->num();
}
} else if (concat_dim_ == 1) {
int offset_channel = 0;
for (int i = 0; i < bottom->size(); ++i) {
Blob<Dtype>* blob = (*bottom)[i];
if (propagate_down[i]) {
Dtype* bottom_diff = blob->mutable_cpu_diff();
int num_elem = blob->channels()*blob->height()*blob->width();
for (int n = 0; n < num_; ++n) {
caffe_copy(num_elem, top_diff + top[0]->offset(n, offset_channel),
bottom_diff + blob->offset(n));
}
}
offset_channel += blob->channels();
}
} // concat_dim_ is guaranteed to be 0 or 1 by SetUp.
}
示例4: Reshape
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
if (num_ != source.num() || channels_ != source.channels() ||
height_ != source.height() || width_ != source.width()) {
if (reshape) {
Reshape(source.num(), source.channels(), source.height(), source.width());
} else {
LOG(FATAL) << "Trying to copy blobs of different sizes.";
}
}
switch (Caffe::mode()) {
case Caffe::GPU:
if (copy_diff) {
caffe_copy(count_, source.gpu_diff(),
static_cast<Dtype*>(diff_->mutable_gpu_data()));
} else {
caffe_copy(count_, source.gpu_data(),
static_cast<Dtype*>(data_->mutable_gpu_data()));
}
break;
case Caffe::CPU:
if (copy_diff) {
caffe_copy(count_, source.cpu_diff(),
static_cast<Dtype*>(diff_->mutable_cpu_data()));
} else {
caffe_copy(count_, source.cpu_data(),
static_cast<Dtype*>(data_->mutable_cpu_data()));
}
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}
示例5: if
void SliceLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
if (!propagate_down[0]) { return; }
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
if (slice_dim_ == 0) {
int offset_num = 0;
for (int i = 0; i < top.size(); ++i) {
Blob<Dtype>* blob = top[i];
const Dtype* top_diff = blob->cpu_diff();
caffe_copy(blob->count(), top_diff,
bottom_diff + (*bottom)[0]->offset(offset_num));
offset_num += blob->num();
}
} else if (slice_dim_ == 1) {
int offset_channel = 0;
for (int i = 0; i < top.size(); ++i) {
Blob<Dtype>* blob = top[i];
const Dtype* top_diff = blob->cpu_diff();
const int num_elem = blob->channels() * blob->height() * blob->width();
for (int n = 0; n < num_; ++n) {
caffe_copy(num_elem, top_diff + blob->offset(n),
bottom_diff + (*bottom)[0]->offset(n, offset_channel));
}
offset_channel += blob->channels();
}
} // slice_dim_ is guaranteed to be 0 or 1 by SetUp.
}
示例6: Dtype
Dtype SliceLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->mutable_cpu_data();
if (slice_dim_ == 0) {
int offset_num = 0;
for (int i = 0; i < top->size(); ++i) {
Blob<Dtype>* blob = (*top)[i];
Dtype* top_data = blob->mutable_cpu_data();
caffe_copy(blob->count(), bottom_data + bottom[0]->offset(offset_num),
top_data);
offset_num += blob->num();
}
} else if (slice_dim_ == 1) {
int offset_channel = 0;
for (int i = 0; i < top->size(); ++i) {
Blob<Dtype>* blob = (*top)[i];
Dtype* top_data = blob->mutable_cpu_data();
const int num_elem = blob->channels() * blob->height() * blob->width();
for (int n = 0; n < num_; ++n) {
caffe_copy(num_elem, bottom_data + bottom[0]->offset(n, offset_channel),
top_data + blob->offset(n));
}
offset_channel += blob->channels();
}
} // slice_dim_ is guaranteed to be 0 or 1 by SetUp.
return Dtype(0.);
}
示例7: switch
void LRNLayerTest<Dtype>::ReferenceLRNForward(
const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
Blob<Dtype>* blob_top) {
blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
blob_bottom.height(), blob_bottom.width());
Dtype* top_data = blob_top->mutable_cpu_data();
LRNParameter lrn_param = layer_param.lrn_param();
Dtype alpha = lrn_param.alpha();
Dtype beta = lrn_param.beta();
int size = lrn_param.local_size();
switch (lrn_param.norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
for (int n = 0; n < blob_bottom.num(); ++n) {
for (int c = 0; c < blob_bottom.channels(); ++c) {
for (int h = 0; h < blob_bottom.height(); ++h) {
for (int w = 0; w < blob_bottom.width(); ++w) {
int c_start = c - (size - 1) / 2;
int c_end = min(c_start + size, blob_bottom.channels());
c_start = max(c_start, 0);
Dtype scale = 1.;
for (int i = c_start; i < c_end; ++i) {
Dtype value = blob_bottom.data_at(n, i, h, w);
scale += value * value * alpha / size;
}
*(top_data + blob_top->offset(n, c, h, w)) =
blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
}
}
}
}
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
for (int n = 0; n < blob_bottom.num(); ++n) {
for (int c = 0; c < blob_bottom.channels(); ++c) {
for (int h = 0; h < blob_bottom.height(); ++h) {
int h_start = h - (size - 1) / 2;
int h_end = min(h_start + size, blob_bottom.height());
h_start = max(h_start, 0);
for (int w = 0; w < blob_bottom.width(); ++w) {
Dtype scale = 1.;
int w_start = w - (size - 1) / 2;
int w_end = min(w_start + size, blob_bottom.width());
w_start = max(w_start, 0);
for (int nh = h_start; nh < h_end; ++nh) {
for (int nw = w_start; nw < w_end; ++nw) {
Dtype value = blob_bottom.data_at(n, c, nh, nw);
scale += value * value * alpha / (size * size);
}
}
*(top_data + blob_top->offset(n, c, h, w)) =
blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
}
}
}
}
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
示例8: layer
TYPED_TEST(MultipleInnerProductLayerTest, TestSetup) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
MultipleInnerProductParameter* mip_param = layer_param.mutable_multiple_inner_product_param();
mip_param->set_num_layer(3);
mip_param->add_num_outputs(NUM_OUT1);
mip_param->add_num_outputs(NUM_OUT2);
mip_param->add_num_outputs(NUM_OUT3);
MultipleInnerProductLayer<Dtype> layer(layer_param);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
for(int i = 0; i < this->blob_top_vec_.size(); i++)
{
Blob<Dtype>* blob = this->blob_top_vec_[i];
EXPECT_EQ(blob->num(), NUM);
EXPECT_EQ(blob->channels(), NUM_OUT3);
EXPECT_EQ(blob->height(), 1);
EXPECT_EQ(blob->width(), 1);
}
EXPECT_EQ(layer.blobs().size(), 3 * 2);
EXPECT_EQ(layer.blobs()[0]->shape()[0], NUM_OUT1);
EXPECT_EQ(layer.blobs()[2]->shape()[0], NUM_OUT2);
EXPECT_EQ(layer.blobs()[4]->shape()[0], NUM_OUT3);
}
示例9: FillDatum
TYPED_TEST(DataTransformTest, TestCropSize) {
TransformationParameter transform_param;
const bool unique_pixels = false; // all pixels the same equal to label
const int_tp label = 0;
const int_tp channels = 3;
const int_tp height = 4;
const int_tp width = 5;
const int_tp crop_size = 2;
transform_param.set_crop_size(crop_size);
Datum datum;
FillDatum(label, channels, height, width, unique_pixels, &datum);
DataTransformer<TypeParam>* transformer =
new DataTransformer<TypeParam>(transform_param, TEST,
Caffe::GetDefaultDevice());
transformer->InitRand();
Blob<TypeParam>* blob =
new Blob<TypeParam>(1, channels, crop_size, crop_size);
for (int_tp iter = 0; iter < this->num_iter_; ++iter) {
transformer->Transform(datum, blob);
EXPECT_EQ(blob->num(), 1);
EXPECT_EQ(blob->channels(), datum.channels());
EXPECT_EQ(blob->height(), crop_size);
EXPECT_EQ(blob->width(), crop_size);
for (int_tp j = 0; j < blob->count(); ++j) {
EXPECT_EQ(blob->cpu_data()[j], label);
}
}
}
示例10:
void reducedRTFLayer<Dtype>::FillBlob(Blob<Dtype> &toFill, bool isRand, Dtype fillerConstant){
int N,C,H,W;
N = toFill.num();
C = toFill.channels();
H = toFill.height();
W = toFill.width();
Dtype* toFillPtr = toFill.mutable_cpu_data();
for(int n=0; n<N; ++n){
for(int c=0; c<C; ++c){
for(int h=0; h<H; ++h){
for(int w=0; w<W; ++w){
if(isRand){
toFillPtr[n*C*H*W + c*H*W + h*W + w] = (Dtype)(rand()/RAND_MAX);
}
else{
toFillPtr[n*C*H*W + c*H*W + h*W + w] = fillerConstant;
}
}
}
}
}
}
示例11:
void HDF5OutputLayerTest<TypeParam>::CheckBlobEqual(const Blob<Dtype>& b1,
const Blob<Dtype>& b2) {
EXPECT_EQ(b1.num(), b2.num());
EXPECT_EQ(b1.channels(), b2.channels());
EXPECT_EQ(b1.height(), b2.height());
EXPECT_EQ(b1.width(), b2.width());
for (int n = 0; n < b1.num(); ++n) {
for (int c = 0; c < b1.channels(); ++c) {
for (int h = 0; h < b1.height(); ++h) {
for (int w = 0; w < b1.width(); ++w) {
EXPECT_EQ(b1.data_at(n, c, h, w), b2.data_at(n, c, h, w));
}
}
}
}
}
示例12: SetMean
// Load the mean file in binaryproto format.
int DeepFeatureExtractor::SetMean(
const std::string& meanfile)
{
BlobProto blob_proto;
ReadProtoFromBinaryFileOrDie(meanfile.c_str(), &blob_proto);
// Convert from BlobProto to Blob<float>
Blob<float> meanblob;
meanblob.FromProto(blob_proto);
CHECK_EQ(meanblob.channels(), m_num_channels)
<< "Number of channels of mean file doesn't match input layer.";
// The format of the mean file is planar 32-bit float BGR or grayscale.
std::vector<cv::Mat> channels;
float* data = meanblob.mutable_cpu_data();
for (unsigned int i = 0; i < m_num_channels; ++i) {
// Extract an individual channel.
cv::Mat channel(meanblob.height(), meanblob.width(), CV_32FC1, data);
channels.push_back(channel);
data += meanblob.height() * meanblob.width();
}
// Merge the separate channels into a single image.
cv::Mat mean;
cv::merge(channels, mean);
// Compute the global mean pixel value and create a mean image
// filled with this value.
cv::Scalar channel_mean = cv::mean(mean);
m_mean = cv::Mat(m_input_geometry, mean.type(), channel_mean);
return 0;
}
示例13: avePooling_cpu
void PoolingLayerImpl::avePooling_cpu(Blob &src, Blob &dst)
{
for (int n = 0; n < src.num(); ++n)
{
for (int c = 0; c < src.channels(); ++c)
{
const float *srcData = src.ptrf(n, c);
float *dstData = dst.ptrf(n, c);
for (int ph = 0; ph < out.height; ++ph)
{
for (int pw = 0; pw < out.width; ++pw)
{
int hstart = ph * stride.height - pad.height;
int wstart = pw * stride.width - pad.width;
int hend = min(hstart + kernel.height, inp.height + pad.height);
int wend = min(wstart + kernel.width, inp.width + pad.width);
int poolSize = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, inp.height);
wend = min(wend, inp.width);
dstData[ph * out.width + pw] = 0.f;
for (int h = hstart; h < hend; ++h)
for (int w = wstart; w < wend; ++w)
dstData[ph * out.width + pw] += srcData[h * inp.width + w];
dstData[ph * out.width + pw] /= poolSize;
}
}
}
}
}
示例14: if
void ConcatLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
if (concat_dim_ == 0) {
int offset_num = 0;
for (int i = 0; i < bottom->size(); ++i) {
Blob<Dtype>* blob = (*bottom)[i];
Dtype* bottom_diff = blob->mutable_cpu_diff();
caffe_copy(blob->count(),
top_diff+top[0]->offset(offset_num), bottom_diff);
offset_num += blob->num();
}
} else if (concat_dim_ == 1) {
int offset_channel = 0;
for (int i = 0; i < bottom->size(); ++i) {
Blob<Dtype>* blob = (*bottom)[i];
Dtype* bottom_diff = blob->mutable_cpu_diff();
int num_elem = blob->channels()*blob->height()*blob->width();
for (int n = 0; n < num_; ++n) {
caffe_copy(num_elem, top_diff+top[0]->offset(n, offset_channel),
bottom_diff+blob->offset(n));
}
offset_channel += blob->channels();
}
}else if (concat_dim_ == 4){// lipengyu add
int top_bias = 0;
for(int n = 0 ; n < num_ ; n++)
{
for(int i = 0 ; i < bottom->size() ; i++)
{
Blob<Dtype>* blob = (*bottom)[i];
Dtype* bottom_diff = blob->mutable_cpu_diff();
int num_elem = blob->channels()*blob->height()*blob->width();
caffe_copy(num_elem, top_diff+ top_bias,//top[0]->offset(n, offset_channel),
bottom_diff+blob->offset(n));
top_bias += num_elem;
}
}
}else {
LOG(FATAL) << "concat_dim along dim" << concat_dim_ <<
" not implemented yet";
}
}
示例15:
void WordvecLayerTest<TypeParam>::ReferenceWordvecForward(
const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
Blob<Dtype>* blob_top) {
typedef typename TypeParam::Dtype Dtype;
blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
blob_bottom.height(), blob_bottom.width());
Dtype* top_data = blob_top->mutable_cpu_data();
WordvecParameter wordvec_param = layer_param.wordvec_param();
}