本文整理汇总了C++中caffe_copy函数的典型用法代码示例。如果您正苦于以下问题:C++ caffe_copy函数的具体用法?C++ caffe_copy怎么用?C++ caffe_copy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了caffe_copy函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: caffe_copy
void BasePrefetchingDataLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.cpu_data(),
top[0]->mutable_cpu_data());
DLOG(INFO) << "Prefetch copied";
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.cpu_data(),
top[1]->mutable_cpu_data());
}
prefetch_free_.push(batch);
}
示例2: caffe_copy
void LabelSpecificAutoLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (top[0] != bottom[0] && propagate_down[0]) {
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
int count = bottom[0]->count();
caffe_copy(count, top_diff, bottom_diff);
}
}
示例3: TYPED_TEST
TYPED_TEST(NeuronLayerTest, TestPReLUConsistencyReLU) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter prelu_layer_param;
LayerParameter relu_layer_param;
relu_layer_param.mutable_relu_param()->set_negative_slope(0.25);
PReLULayer<Dtype> prelu(prelu_layer_param);
ReLULayer<Dtype> relu(relu_layer_param);
// Set up blobs
vector<Blob<Dtype>*> blob_bottom_vec_2;
vector<Blob<Dtype>*> blob_top_vec_2;
shared_ptr<Blob<Dtype> > blob_bottom_2(new Blob<Dtype>());
shared_ptr<Blob<Dtype> > blob_top_2(new Blob<Dtype>());
blob_bottom_vec_2.push_back(blob_bottom_2.get());
blob_top_vec_2.push_back(blob_top_2.get());
blob_bottom_2->CopyFrom(*this->blob_bottom_, false, true);
// SetUp layers
prelu.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
relu.SetUp(blob_bottom_vec_2, blob_top_vec_2);
// Check forward
prelu.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
relu.Forward(this->blob_bottom_vec_, blob_top_vec_2);
for (int s = 0; s < blob_top_2->count(); ++s) {
EXPECT_EQ(this->blob_top_->cpu_data()[s], blob_top_2->cpu_data()[s]);
}
// Check backward
shared_ptr<Blob<Dtype> > tmp_blob(new Blob<Dtype>());
tmp_blob->ReshapeLike(*blob_top_2.get());
FillerParameter filler_param;
GaussianFiller<Dtype> filler(filler_param);
filler.Fill(tmp_blob.get());
caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(),
this->blob_top_->mutable_cpu_diff());
caffe_copy(blob_top_2->count(), tmp_blob->cpu_data(),
blob_top_2->mutable_cpu_diff());
vector<bool> propagate_down;
propagate_down.push_back(true);
prelu.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_);
relu.Backward(blob_top_vec_2, propagate_down, blob_bottom_vec_2);
for (int s = 0; s < blob_bottom_2->count(); ++s) {
EXPECT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]);
}
}
示例4: caffe_copy
void UnifiedLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// put child layers' data blobs together to form the final blob
// then fill in the label_index blob
int shift_data = 0;
for (int i = 0; i < childlayer_num_; ++i) {
caffe_copy(bottom[i]->count(), bottom[i]->cpu_data(),
top[0]->mutable_cpu_data() + shift_data);
shift_data += bottom[i]->count();
}
}
示例5: caffe_gpu_set
void PowerLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
Dtype* bottom_diff = (bottom)[0]->mutable_gpu_diff();
const int count = (bottom)[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) {
caffe_gpu_set(count, diff_scale_, bottom_diff);
} else {
const Dtype* bottom_data = (bottom)[0]->gpu_data();
// Compute dy/dx = scale * power * (shift + scale * x)^(power - 1)
// = diff_scale * y / (shift + scale * x)
if (power_ == Dtype(2)) {
// Special case for y = (shift + scale * x)^2
// -> dy/dx = 2 * scale * (shift + scale * x)
// = diff_scale * shift + diff_scale * scale * x
caffe_gpu_axpby(
count,
diff_scale_ * scale_,
bottom_data,
Dtype(0),
bottom_diff);
if (shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, diff_scale_ * shift_, bottom_diff);
}
} else if (shift_ == Dtype(0)) {
// Special case for y = (scale * x)^power
// -> dy/dx = scale * power * (scale * x)^(power - 1)
// = scale * power * (scale * x)^power * (scale * x)^(-1)
// = power * y / x
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
caffe_gpu_scal(count, power_, bottom_diff);
} else {
caffe_copy(count, bottom_data, bottom_diff);
if (scale_ != Dtype(1)) {
caffe_gpu_scal(count, scale_, bottom_diff);
}
if (shift_ != Dtype(0)) {
caffe_gpu_add_scalar(count, shift_, bottom_diff);
}
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_div<Dtype>(count, top_data, bottom_diff, bottom_diff);
if (diff_scale_ != Dtype(1)) {
caffe_gpu_scal(count, diff_scale_, bottom_diff);
}
}
}
caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff);
}
}
示例6: caffe_copy
void SumLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
caffe_copy(bottom[0]->count(), bottom_data, top_data);
for (int i = 1; i < bottom.size(); ++i) {
const Dtype* bottom_data_i = bottom[i]->cpu_data();
caffe_cpu_axpby(bottom[0]->count(), Dtype(1.0), bottom_data_i,
Dtype(1.0), top_data);
}
}
示例7: JoinPrefetchThread
Dtype DataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
// First, join the thread
// First, join the thread 等待线程结束
JoinPrefetchThread();
// Copy the data
// Copy the data拷贝数据到top,即该层的输出
caffe_copy(prefetch_data_->count(), prefetch_data_->cpu_data(),
(*top)[0]->mutable_cpu_data());
if (output_labels_) {
caffe_copy(prefetch_label_->count(), prefetch_label_->cpu_data(),
(*top)[1]->mutable_cpu_data());
}
// Start a new prefetch thread
CreatePrefetchThread();
return Dtype(0.);
}
示例8: CHECK_GE
void HDF5OutputLayer<Dtype, MItype, MOtype>::Forward_cpu(
const vector<Blob<MItype>*>& bottom,
const vector<Blob<MOtype>*>& top) {
CHECK_GE(bottom.size(), 2);
CHECK_EQ(bottom[0]->num(), bottom[1]->num());
data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(),
bottom[1]->height(), bottom[1]->width());
const int_tp data_datum_dim = bottom[0]->count() / bottom[0]->num();
const int_tp label_datum_dim = bottom[1]->count() / bottom[1]->num();
for (int_tp i = 0; i < bottom[0]->num(); ++i) {
caffe_copy(data_datum_dim, &bottom[0]->cpu_data()[i * data_datum_dim],
&data_blob_.mutable_cpu_data()[i * data_datum_dim]);
caffe_copy(label_datum_dim, &bottom[1]->cpu_data()[i * label_datum_dim],
&label_blob_.mutable_cpu_data()[i * label_datum_dim]);
}
SaveBlobs();
}
示例9: JoinPrefetchThread
void BasePrefetchingDataLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// First, join the thread
JoinPrefetchThread();
DLOG(INFO) << "Thread joined";
// Reshape to loaded data.
top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(),
this->prefetch_data_.height(), this->prefetch_data_.width());
// Copy the data
caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(),
top[0]->mutable_cpu_data());
DLOG(INFO) << "Prefetch copied";
if (this->output_labels_) {
caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(),
top[1]->mutable_cpu_data());
}
// Start a new prefetch thread
DLOG(INFO) << "CreatePrefetchThread";
CreatePrefetchThread();
}
示例10: caffe_copy
void LastRowLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
int num = bottom[0]->shape(0);
int num1 = bottom[0]->shape(1);
int channels = bottom[0]->shape(2);
bottom_diff += bottom[0]->offset(num - 1);
caffe_copy(channels * num1, top_diff, bottom_diff);
}
示例11: TYPED_TEST
TYPED_TEST(MathFunctionsTest, TestCopyGPU) {
const int n = this->blob_bottom_->count();
const TypeParam* bottom_data = this->blob_bottom_->gpu_data();
TypeParam* top_data = this->blob_top_->mutable_gpu_data();
Caffe::set_mode(Caffe::GPU);
caffe_copy(n, bottom_data, top_data);
bottom_data = this->blob_bottom_->cpu_data();
top_data = this->blob_top_->mutable_cpu_data();
for (int i = 0; i < n; ++i) {
EXPECT_EQ(bottom_data[i], top_data[i]);
}
}
示例12: caffe_copy
void IgnoreOverlayLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
caffe_copy(bottom[1]->count(), bottom[1]->cpu_data(), top[0]->mutable_cpu_data());
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
for (int i = 0; i < bottom[0]->count(); ++i) {
const int value = bottom_data[i];
if (value == ignore_label_) {
top_data[i] = static_cast<Dtype>(value);
}
}
}
示例13: caffe_copy
void TileLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
for (int i = 0; i < outer_dim_; ++i) {
for (int t = 0; t < tiles_; ++t) {
caffe_copy(inner_dim_, bottom_data, top_data);
top_data += inner_dim_;
}
bottom_data += inner_dim_;
}
}
示例14: DCHECK_GE
void SelectLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_cpu_data();
const Dtype* select_data = bottom[num_cand_]->cpu_data();
for (int i = 0; i < outer_dim_; ++i) {
const int index = static_cast<int>(select_data[i]);
DCHECK_GE(index, 0);
DCHECK_LT(index, num_cand_);
caffe_copy(inner_dim_, bottom[index]->cpu_data() + inner_dim_*i, top_data);
top_data += inner_dim_;
}
}
示例15: ASSERT
void Tensor<Dtype>::CopyChunkFrom(const Tensor& source, int count,
int this_offset, int other_offset) {
ASSERT(source.count() >= count + other_offset,
"Chunk exceeds source memory: "
<< count << " + " << other_offset << " > " << source.count());
ASSERT(this->count() >= count + this_offset, "Chunk exceeds target memory: "
<< count << " + " << this_offset << " > " << this->count());
switch (mode()) {
case Caffe::CPU:
caffe_copy(count, source.cpu_mem() + other_offset,
mutable_cpu_mem() + this_offset);
break;
case Caffe::GPU:
caffe_copy(count, source.gpu_mem() + other_offset,
mutable_gpu_mem() + this_offset);
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}