本文整理汇总了C++中caffe_set函数的典型用法代码示例。如果您正苦于以下问题:C++ caffe_set函数的具体用法?C++ caffe_set怎么用?C++ caffe_set使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了caffe_set函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: caffe_set
void BinaryBoundingLossLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
LossLayer<Dtype>::Reshape(bottom, top);
vector<int> shape;
shape.push_back(bottom[0]->num());
ones_column.Reshape(shape);
cache_tmp_.ReshapeLike(*bottom[0]);
square_cache_tmp_.ReshapeLike(*bottom[0]);
scalar_cache_.ReshapeLike(*bottom[1]);
ones_.ReshapeLike(*bottom[0]);
caffe_set(ones_column.count(),
(Dtype)1.0,
ones_column.mutable_cpu_data());
caffe_set(ones_.count(),
(Dtype)1.0, ones_.mutable_cpu_data());
}
示例2: LOG
void BNLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
frozen_ = this->layer_param_.bn_param().frozen();
moving_average_ = this->layer_param_.bn_param().moving_average();
bn_momentum_ = this->layer_param_.bn_param().momentum();
bn_eps_ = this->layer_param_.bn_param().eps();
// Initialize parameters
if (this->blobs_.size() > 0) {
LOG(INFO) << "Skipping parameter initialization";
} else {
if (moving_average_) {
this->blobs_.resize(4);
} else {
this->blobs_.resize(2);
}
vector<int> shape;
shape.push_back(1);
shape.push_back(bottom[0]->channels());
// slope
this->blobs_[0].reset(new Blob<Dtype>(shape));
shared_ptr<Filler<Dtype> > slope_filler(GetFiller<Dtype>(
this->layer_param_.bn_param().slope_filler()));
slope_filler->Fill(this->blobs_[0].get());
// bias
this->blobs_[1].reset(new Blob<Dtype>(shape));
shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
this->layer_param_.bn_param().bias_filler()));
bias_filler->Fill(this->blobs_[1].get());
if (this->blobs_.size() > 2) {
// moving average mean
this->blobs_[2].reset(new Blob<Dtype>(shape));
caffe_set(this->blobs_[2]->count(), Dtype(0),
this->blobs_[2]->mutable_cpu_data());
// moving average variance
this->blobs_[3].reset(new Blob<Dtype>(shape));
caffe_set(this->blobs_[3]->count(), Dtype(1),
this->blobs_[3]->mutable_cpu_data());
}
}
this->param_propagate_down_.resize(this->blobs_.size(), true);
}
示例3: caffe_set
void ReLUModLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
hist_res = 256;
num_sample_ = 0;
num_pos_.Reshape(1, bottom[0]->channels(), bottom[0]->height(), bottom[0]->width());
sum_.Reshape(1, bottom[0]->channels(), bottom[0]->height(), bottom[0]->width());
sum_sq_.Reshape(1, bottom[0]->channels(), bottom[0]->height(), bottom[0]->width());
hist_.Reshape(1, 1, hist_res * 2 + 1, bottom[0]->channels());
sum_prod_.Reshape(1, 1, bottom[0]->channels(), bottom[0]->channels());
caffe_set(sum_.count(), (Dtype)0, sum_.mutable_cpu_data());
caffe_set(sum_sq_.count(), (Dtype)0, sum_sq_.mutable_cpu_data());
caffe_set(num_pos_.count(), (unsigned)0, num_pos_.mutable_cpu_data());
caffe_set(hist_.count(), (unsigned)0, hist_.mutable_cpu_data());
caffe_set(sum_prod_.count(), (Dtype)0, sum_prod_.mutable_cpu_data());
string filename = this->layer_param_.name() + "-analysis";
string cmd = "rm " + filename;
system(cmd.c_str());
LOG(INFO) << "ReLUMod: LayerSetUp";
}
示例4: caffe_set
void ConvNormLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
caffe_set(conv_top_vec[0]->count(), (Dtype)0, conv_top_vec[0]->mutable_cpu_diff());
for (int n = 0; n < conv_top_vec[0]->num(); n++)
{
caffe_div(norm_top.count(), top[0]->cpu_diff() + top[0]->offset(n),
norm_top.cpu_data(), conv_top_vec[0]->mutable_cpu_diff()+conv_top_vec[0]->offset(n));
}
conv_layer->Backward(conv_top_vec, propagate_down, bottom);
}
示例5: caffe_set
void CropLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
if (propagate_down[0]) {
caffe_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
std::vector<int> indices(top[0]->num_axes(), 0);
crop_copy(bottom, top, offsets.cpu_data(), indices, 0, top_diff,
bottom_diff, false);
}
}
示例6: CHECK_EQ
void ConvolutionSKLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, "
<< "corresponding to (num, channels, height, width)";
num_ = bottom[0]->num();
height_ = bottom[0]->height();
width_ = bottom[0]->width();
CHECK_EQ(bottom[0]->channels(), channels_) << "Input size incompatible with"
" convolution kernel.";
// TODO: generalize to handle inputs of different shapes.
for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) {
CHECK_EQ(num_, bottom[bottom_id]->num()) << "Inputs must have same num.";
CHECK_EQ(channels_, bottom[bottom_id]->channels())
<< "Inputs must have same channels.";
CHECK_EQ(height_, bottom[bottom_id]->height())
<< "Inputs must have same height.";
CHECK_EQ(width_, bottom[bottom_id]->width())
<< "Inputs must have same width.";
}
// Shape the tops.
compute_output_shape();
for (int top_id = 0; top_id < top.size(); ++top_id) {
top[top_id]->Reshape(num_, num_output_, height_out_, width_out_);
}
if (reverse_dimensions()) {
conv_in_height_ = height_out_;
conv_in_width_ = width_out_;
conv_out_spatial_dim_ = height_ * width_;
} else {
conv_in_height_ = height_;
conv_in_width_ = width_;
conv_out_spatial_dim_ = height_out_ * width_out_;
}
kernel_dim_ = conv_in_channels_ * kernel_h_ * kernel_w_;
weight_offset_ = conv_out_channels_ * kernel_dim_ / group_ / group_;
col_offset_ = kernel_dim_ * conv_out_spatial_dim_ / group_;
output_offset_ = conv_out_channels_ * conv_out_spatial_dim_ / group_;
// The im2col result buffer will only hold one image at a time to avoid
// overly large memory usage. In the special case of 1x1 convolution
// it goes lazily unused to save memory.
if (reverse_dimensions()) {
col_buffer_.Reshape(1, kernel_dim_, height_, width_);
} else {
col_buffer_.Reshape(1, kernel_dim_, height_out_, width_out_);
}
// Set up the all ones "bias multiplier" for adding biases by BLAS
if (bias_term_) {
vector<int> bias_multiplier_shape(1, height_out_ * width_out_);
bias_multiplier_.Reshape(bias_multiplier_shape);
caffe_set(bias_multiplier_.count(), Dtype(1),
bias_multiplier_.mutable_cpu_data());
}
}
示例7: Fill
virtual void Fill(Blob<Dtype>* blob) {
CHECK(blob->count());
Dtype* blob_data = blob->mutable_cpu_data();
caffe_set(blob->count(), Dtype(0), blob_data);
int kernel_area = static_cast<Dtype>(blob->height()*blob->width());
int channels = blob->channels();
int num = blob->num();
for (int n=0; n < num && n < channels; ++n) {
Dtype curr_val;
if (this->filler_param_.diag_val_size() > n)
curr_val = this->filler_param_.diag_val(n);
else
curr_val = 1;
curr_val /= static_cast<Dtype>(kernel_area);
caffe_set(kernel_area, curr_val, blob_data + kernel_area * (channels * n + n));
}
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
示例8: deinterpolate_cpu
void deinterpolate_cpu(const Dtype* input, const unsigned int* indices,
const int input_size, const int output_size, const int channels,
Dtype* output) {
caffe_set(output_size*channels, Dtype(0), output);
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < input_size; ++i) {
output[indices[i]] += input[i];
}
input += input_size;
output += output_size;
}
}
示例9: caffe_sub
void TripletLossLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*> & bottom, const vector<Blob<Dtype>*> & top){
int count = bottom[0]->count();//count= n * c * h * w
// const Dtype* sampleW = bottom[3]->cpu_data(); // 1
caffe_sub(
count,
bottom[0]->cpu_data(), // a
bottom[1]->cpu_data(), //p
diff_ap_.mutable_cpu_data()); // diff_ap_= a - p
caffe_sub(
count,
bottom[0]->cpu_data(), //a
bottom[2]->cpu_data(), //n
diff_an_.mutable_cpu_data()); // diff_an_ = a - n
caffe_sub(
count,
bottom[1]->cpu_data(), //p
bottom[2]->cpu_data(), //n
diff_pn_.mutable_cpu_data() // diff_pn_ = p - n
);
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.triplet_loss_param().margin();// alpha
Dtype loss(0.0); //record the loss of this batch.
for(int i = 0; i < bottom[0]->num(); ++i) {//for all triplet
dist_sq_ap_.mutable_cpu_data()[i] = caffe_cpu_dot(
channels, diff_ap_.cpu_data() + (i*channels), diff_ap_.cpu_data() + (i * channels));
dist_sq_an_.mutable_cpu_data()[i] = caffe_cpu_dot(
channels, diff_an_.cpu_data() + (i * channels), diff_an_.cpu_data() + (i * channels));
//mdist= one triplet loss
Dtype mdist = std::max(margin + dist_sq_ap_.cpu_data()[i] - dist_sq_an_.cpu_data()[i], Dtype(0.0));
loss += mdist;
if(mdist == Dtype(0)){
caffe_set(channels, Dtype(0), diff_ap_.mutable_cpu_data() + (i * channels));
caffe_set(channels, Dtype(0), diff_an_.mutable_cpu_data() + (i * channels));
caffe_set(channels, Dtype(0), diff_pn_.mutable_cpu_data() + (i * channels));
}
}
loss = loss/static_cast<Dtype>(bottom[0]->num())/Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
示例10: caffe_set
void DeconvolutionLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = this->blobs_[0]->cpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff();
if (this->param_propagate_down_[0]) {
caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff);
}
if (this->bias_term_ && this->param_propagate_down_[1]) {
caffe_set(this->blobs_[1]->count(), Dtype(0),
this->blobs_[1]->mutable_cpu_diff());
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->cpu_diff();
const Dtype* bottom_data = bottom[i]->cpu_data();
Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff();
for (int n = 0; n < this->num_; ++n) {
this->backward_cpu_bias(bias_diff, top_diff + n * this->top_dim_);
}
}
if (this->param_propagate_down_[0] || propagate_down[i]) {
for (int n = 0; n < this->num_; ++n) {
// Gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_cpu_gemm(top_diff + n * this->top_dim_,
bottom_data + n * this->bottom_dim_, weight_diff);
}
// Gradient w.r.t. bottom data, if necessary, reusing the column buffer
// we might have just computed above.
if (propagate_down[i]) {
this->forward_cpu_gemm(top_diff + n * this->top_dim_, weight,
bottom_diff + n * this->bottom_dim_,
this->param_propagate_down_[0]);
}
}
}
}
}
示例11: caffe_set
void BNLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
if (top.size() > 1) {
// top blob for batch mean
top[1]->Reshape(1, C_, 1, 1);
}
if (top.size() > 2) {
// top blob for batch variance
top[2]->Reshape(1, C_, 1, 1);
}
x_norm_.Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
// mean
spatial_mean_.Reshape(N_, C_, 1, 1);
batch_mean_.Reshape(1, C_, 1, 1);
// variance
spatial_variance_.Reshape(N_, C_, 1, 1);
batch_variance_.Reshape(1, C_, 1, 1);
// buffer blob
buffer_blob_.Reshape(N_, C_, H_, W_);
// fill spatial multiplier
spatial_sum_multiplier_.Reshape(1, 1, H_, W_);
Dtype* spatial_multipl_data = spatial_sum_multiplier_.mutable_cpu_data();
caffe_set(spatial_sum_multiplier_.count(), Dtype(1),
spatial_multipl_data);
caffe_set(spatial_sum_multiplier_.count(), Dtype(0),
spatial_sum_multiplier_.mutable_cpu_diff());
// fill batch multiplier
batch_sum_multiplier_.Reshape(N_, 1, 1, 1);
Dtype* batch_multiplier_data = batch_sum_multiplier_.mutable_cpu_data();
caffe_set(batch_sum_multiplier_.count(), Dtype(1),
batch_multiplier_data);
caffe_set(batch_sum_multiplier_.count(), Dtype(0),
batch_sum_multiplier_.mutable_cpu_diff());
}
示例12: caffe_set
void MovingNormalizeLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
top[0]->ReshapeLike(*bottom[0]);
squared_.ReshapeLike(*bottom[0]);
if (top.size() == 2) {
top[1]->Reshape({ 1 });
}
norm_.Reshape(bottom[0]->num(), 1,
bottom[0]->height(), bottom[0]->width());
sum_multiplier_.Reshape(bottom[0]->num(), 1,
bottom[0]->height(), bottom[0]->width());
caffe_set(sum_multiplier_.count(), Dtype(1), sum_multiplier_.mutable_cpu_data());
}
示例13: top_shape
void SparseInnerProductLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// The top shape will M_ * N_
vector<int> top_shape(2, M_);
top_shape[1] = N_;
top[0]->Reshape(top_shape);
// Set up the bias multiplier
if (bias_term_) {
vector<int> bias_shape(1, M_);
bias_multiplier_.Reshape(bias_shape);
caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data());
}
}
示例14: Fill
virtual void Fill(Blob<Dtype>* blob) {
CHECK(blob->count());
int fan_in = blob->count() / blob->num();
int fan_out = blob->count() / blob->channels();
CHECK_EQ(fan_in, fan_out);
Dtype* blob_data = blob->mutable_cpu_data();
caffe_set(blob->count(), Dtype(0), blob_data);
for (int i = 0; i < blob->num(); i++) {
blob_data[i * blob->channels() + i] = Dtype(1);
}
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
}
示例15: CHECK_EQ
void InnerProductLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Figure out the dimensions
M_ = bottom[0]->num();
CHECK_EQ(bottom[0]->count() / bottom[0]->num(), K_) << "Input size "
"incompatible with inner product parameters.";
top[0]->Reshape(bottom[0]->num(), N_, 1, 1);
// Set up the bias multiplier
if (bias_term_) {
bias_multiplier_.Reshape(1, 1, 1, M_);
caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data());
}
}