当前位置: 首页>>代码示例>>C++>>正文


C++ caffe_scal函数代码示例

本文整理汇总了C++中caffe_scal函数的典型用法代码示例。如果您正苦于以下问题:C++ caffe_scal函数的具体用法?C++ caffe_scal怎么用?C++ caffe_scal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了caffe_scal函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: LOG

void SoftmaxWithLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[1]) {
    LOG(FATAL) << this->type()
               << " Layer cannot backpropagate to label inputs.";
  }
  if (propagate_down[0]) {
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    const Dtype* prob_data = prob_.cpu_data();
    caffe_copy(prob_.count(), prob_data, bottom_diff);
    const Dtype* label = bottom[1]->cpu_data();
    int dim = prob_.count() / outer_num_;
    int count = 0;
    for (int i = 0; i < outer_num_; ++i) {
      for (int j = 0; j < inner_num_; ++j) {
        const int label_value = static_cast<int>(label[i * inner_num_ + j]);
        if (has_ignore_label_ && label_value == ignore_label_) {
          for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) {
            bottom_diff[i * dim + c * inner_num_ + j] = 0;
          }
        } else {
          bottom_diff[i * dim + label_value * inner_num_ + j] -= 1;
          ++count;
        }
      }
    }
    // Scale gradient
    const Dtype loss_weight = top[0]->cpu_diff()[0];
    if (normalize_) {
      caffe_scal(prob_.count(), loss_weight / count, bottom_diff);
    } else {
      caffe_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
    }
  }
}
开发者ID:105,项目名称:caffe,代码行数:35,代码来源:softmax_loss_layer.cpp

示例2: LOG

  void HingeLossLayer<Dtype>::Backward_cpu(
      const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down,
      const vector<Blob<Dtype>*>& bottom) {
    if (propagate_down[1]) {
      LOG(FATAL)<< this->type()
      << " Layer cannot backpropagate to label inputs.";
    }
    if (propagate_down[0]) {
      Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
      const Dtype* label = bottom[1]->cpu_data();
      int num = bottom[0]->num();
      int count = bottom[0]->count();
      int dim = count / num;

      for (int i = 0; i < num; ++i) {
        bottom_diff[i * dim + static_cast<int>(label[i])] *= -1;
      }

      const Dtype loss_weight = top[0]->cpu_diff()[0];
      switch (this->layer_param_.hinge_loss_param().norm()) {
        case HingeLossParameter_Norm_L1:
        caffe_cpu_sign(count, bottom_diff, bottom_diff);
        caffe_scal(count, loss_weight / num, bottom_diff);
        break;
        case HingeLossParameter_Norm_L2:
        caffe_scal(count, loss_weight * 2 / num, bottom_diff);
        break;
        default:
        LOG(FATAL) << "Unknown Norm";
      }
    }
  }
开发者ID:rickyHong,项目名称:CaffeForOpenCL,代码行数:33,代码来源:hinge_loss_layer.cpp

示例3: switch

void Blob<Dtype>::scale_diff(Dtype scale_factor) {
  Dtype* diff;
  if (!diff_) { return; }
  switch (diff_->head()) {
  case SyncedMemory::SYNCED_PRV:
  case SyncedMemory::HEAD_AT_PRV:
      diff = mutable_prv_diff();
      caffe_scal(prv_diff_count(), scale_factor, diff);
      break;
  case SyncedMemory::HEAD_AT_CPU:
    diff = mutable_cpu_diff();
    caffe_scal(count_, scale_factor, diff);
    return;
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
    diff = mutable_gpu_diff();
    caffe_gpu_scal(count_, scale_factor, diff);
    return;
#else
    NO_GPU;
#endif
  case SyncedMemory::UNINITIALIZED:
    return;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
  }
}
开发者ID:azrael417,项目名称:caffe,代码行数:28,代码来源:blob.cpp

示例4: caffe_scal

void DropoutLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[0]) {
    Dtype* top_diff = top[0]->mutable_cpu_diff();
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    if (this->phase_ == TRAIN) {
		if (drop_batch_){
			Dtype drop = rand_vec_->cpu_data()[0];
			// scale + mask
			caffe_scal(top[0]->count(), Dtype(scale_ * drop), top_diff);
			caffe_copy(top[0]->count(), top_diff, bottom_diff);
		}
		else{
			// scale
			caffe_scal(top[0]->count(), scale_, top_diff);
			// multiply mask
			vector<Blob<Dtype>*> scale_bottom(2, NULL);
			scale_bottom[0] = bottom[0];
			scale_bottom[1] = rand_vec_;
			const vector<Blob<Dtype>*> scale_top(1, top[0]);
			vector<bool> prop_down(2, true);
			prop_down[1] = false;
			scale_layer_->Backward(scale_top, prop_down, scale_bottom);
		}
    } else {
      caffe_copy(top[0]->count(), top_diff, bottom_diff);
    }
  }
}
开发者ID:jasonustc,项目名称:caffe-multigpu,代码行数:30,代码来源:dropout_layer.cpp

示例5: caffe_set

void PowerLayer<Dtype>::Backward_cpu(
    const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[0]) {
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    const int count = bottom[0]->count();
    const Dtype* top_diff = top[0]->cpu_diff();
    if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) {
      caffe_set(count, diff_scale_, bottom_diff);
    } else {
      const Dtype* bottom_data = bottom[0]->cpu_data();
      // Compute dy/dx = scale * power * (shift + scale * x)^(power - 1)
      //               = diff_scale * y / (shift + scale * x)
      if (power_ == Dtype(2)) {
        // Special case for y = (shift + scale * x)^2
        //     -> dy/dx = 2 * scale * (shift + scale * x)
        //              = diff_scale * shift + diff_scale * scale * x
        caffe_cpu_axpby(
            count,
            diff_scale_ * scale_,
            bottom_data,
            Dtype(0),
            bottom_diff);

        if (shift_ != Dtype(0)) {
          caffe_add_scalar(count, diff_scale_ * shift_, bottom_diff);
        }
      } else if (shift_ == Dtype(0)) {
        // Special case for y = (scale * x)^power
        //     -> dy/dx = scale * power * (scale * x)^(power - 1)
        //              = scale * power * (scale * x)^power * (scale * x)^(-1)
        //              = power * y / x
        const Dtype* top_data = top[0]->cpu_data();
        caffe_div(count, top_data, bottom_data, bottom_diff);
        caffe_scal(count, power_, bottom_diff);
      } else {
        caffe_copy(count, bottom_data, bottom_diff);
        if (scale_ != Dtype(1)) {
          caffe_scal(count, scale_, bottom_diff);
        }
        if (shift_ != Dtype(0)) {
          caffe_add_scalar(count, shift_, bottom_diff);
        }
        const Dtype* top_data = top[0]->cpu_data();
        caffe_div<Dtype>(count, top_data, bottom_diff, bottom_diff);
        if (diff_scale_ != Dtype(1)) {
          caffe_scal(count, diff_scale_, bottom_diff);
        }
      }
    }
    if (diff_scale_ != Dtype(0)) {
      caffe_mul(count, top_diff, bottom_diff, bottom_diff);
    }
  }
}
开发者ID:rickyHong,项目名称:CaffeForOpenCL,代码行数:56,代码来源:power_layer.cpp

示例6: caffe_sub

void TripletRankingHingeLossLayer<Dtype>::Backward_cpu(
	const vector<Blob<Dtype>*>& top, const vector<bool> &propagate_down,
	const vector<Blob<Dtype>*>& bottom){
	const Dtype* orignalcode;
	const Dtype* similarcode;
	const Dtype* diffrcode;
	if (propagate_down[0]) {
		for (int i = 0; i < 3; ++i) {
			for (int j = 0; j < batch_; ++j){
				Dtype* bout = bottom[i]->mutable_cpu_diff();
				orignalcode = bottom[0]->cpu_data() + bottom[0]->offset(j);
				similarcode = bottom[1]->cpu_data() + bottom[1]->offset(j);
				diffrcode = bottom[2]->cpu_data() + bottom[2]->offset(j);
				if (i == 0){
					if (dist_sq_.cpu_data()[j]>Dtype(0.0)){
						caffe_sub(dim_, diffrcode, similarcode,
							gradient.mutable_cpu_data());// the distance of F- and F+
						caffe_scal(dim_, Dtype(2) / Dtype(batch_),
							gradient.mutable_cpu_data());
					}
					else
						caffe_set(dim_, Dtype(0.0), gradient.mutable_cpu_data());
				}
				if (i == 1){
					if (dist_sq_.cpu_data()[j] > Dtype(0.0)){
						caffe_sub(dim_, similarcode, orignalcode, 
							gradient.mutable_cpu_data());// the distance of F+ and F
						caffe_scal(dim_, Dtype(2) / Dtype(batch_),
							gradient.mutable_cpu_data());
					}
					else
						caffe_set(dim_, Dtype(0.0), gradient.mutable_cpu_data());
				}
				if (i == 2){
					if (dist_sq_.cpu_data()[j] > Dtype(0.0)){
						caffe_sub(dim_, orignalcode, diffrcode,
							gradient.mutable_cpu_data()); // the distance of F and F-
						caffe_scal(dim_, Dtype(2) / Dtype(batch_),
							gradient.mutable_cpu_data());
					}
					else
						caffe_set(dim_, Dtype(0.0), gradient.mutable_cpu_data());
				}
				caffe_scal(dim_, Dtype(2.0), gradient.mutable_cpu_data());
				caffe_copy(dim_, gradient.cpu_data(), bout + (j*dim_));
			}
		}
	}
}
开发者ID:FuchenUSTC,项目名称:caffe,代码行数:49,代码来源:triplet_ranking_hinge_loss_layer.cpp

示例7: caffe_scal

void BilateralFilterLayer<Dtype>::Backward_cpu(
    const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  bilateral_interface_cpu_->Backward(
                                propagate_down[0], propagate_down[1],
                                bottom[0], bottom[1], top[0]);
  // Scale gradient
  const Dtype loss_weight = top[0]->cpu_diff()[0];
  if(propagate_down[0]) {
    caffe_scal(bottom[0]->count(), loss_weight, bottom[0]->mutable_cpu_diff());
  }
  if(propagate_down[1]) {
    caffe_scal(bottom[1]->count(), loss_weight, bottom[1]->mutable_cpu_diff());
  }
}
开发者ID:jasonbunk,项目名称:caffe,代码行数:15,代码来源:bilateral_filter_layer.cpp

示例8: LOG

void FocalLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) 
{
  if (propagate_down[1]) {
    LOG(FATAL) << this->type()
               << " Layer cannot backpropagate to label inputs.";
  }

  if (propagate_down[0]) {
    // data
    Dtype* bottom_diff     = bottom[0]->mutable_cpu_diff();
    const Dtype* prob_data = prob_.cpu_data();
    const Dtype* label     = bottom[1]->cpu_data();
    // intermidiate  
    const Dtype* log_prob_data   = log_prob_.cpu_data();
    const Dtype* power_prob_data = power_prob_.cpu_data();

    int count       = 0;
    int channels    = bottom[0]->shape(softmax_axis_);
    int dim         = prob_.count() / outer_num_;
    const Dtype eps = 1e-10;

    for (int i = 0; i < outer_num_; ++i) {
      for (int j = 0; j < inner_num_; ++j) {
        // label
        const int label_value = static_cast<int>(label[i * inner_num_ + j]);
        
        // ignore label
        if (has_ignore_label_ && label_value == ignore_label_) {
          for (int c = 0; c < channels; ++c) {
            bottom_diff[i * dim + c * inner_num_ + j] = 0;
          }
          continue;
        }

        // the gradient from FL w.r.t p_t, here ignore the `sign`
        int ind_i  = i * dim + label_value * inner_num_ + j; // index of ground-truth label
        Dtype grad = 0 - gamma_ * (power_prob_data[ind_i] / std::max(1 - prob_data[ind_i], eps)) * log_prob_data[ind_i] 
                       + power_prob_data[ind_i] / prob_data[ind_i];
        // the gradient w.r.t input data x
        for (int c = 0; c < channels; ++c) {
          int ind_j = i * dim + c * inner_num_ + j;
          if(c == label_value) {
            CHECK_EQ(ind_i, ind_j);
            // if i == j, (here i,j are refered for derivative of softmax)
            bottom_diff[ind_j] = grad * prob_data[ind_i] * (prob_data[ind_i] - 1);
          } else {
            // if i != j, (here i,j are refered for derivative of softmax)
            bottom_diff[ind_j] = grad * prob_data[ind_i] * prob_data[ind_j];
          }
        }
        // count                    
        ++count;
      }
    }
    // Scale gradient
    Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, count);
    caffe_scal(prob_.count(), loss_weight, bottom_diff);
  }
}
开发者ID:hhgxx123,项目名称:CAFFE_SSD,代码行数:60,代码来源:focal_loss_layer.cpp

示例9: LOG

void SigmoidCrossEntropyWithValidLabelLossLayer<Dtype>::Backward_cpu(
    const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[1]) {
    LOG(FATAL) << this->type()
               << " Layer cannot backpropagate to label inputs.";
  }
  if (propagate_down[0]) {
    // First, compute the diff
    const int count = bottom[0]->count();
    const int num = bottom[0]->num();
    const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data();
    const Dtype* target = bottom[1]->cpu_data();
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    caffe_sub(count, sigmoid_output_data, target, bottom_diff);
    if (bottom.size() == 3) { // a valid label is specified
      const Dtype* valid = bottom[2]->cpu_data();
      for (int i = 0; i < count; i++) {
        if (! valid[i]) {
          bottom_diff[i] = 0;
        }
      }
    }
    // Scale down gradient
    const Dtype loss_weight = top[0]->cpu_diff()[0];
    caffe_scal(count, loss_weight / num, bottom_diff);
  }
}
开发者ID:rohitgirdhar-cmu-experimental,项目名称:caffepp,代码行数:28,代码来源:sigmoid_cross_entropy_with_valid_label_loss_layer.cpp

示例10: caffe_scal

void InnerProductLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    vector<Blob<Dtype>*>* bottom) {

  double regularization = this->layer_param_.inner_product_param().regularization()/2;
  if (this->param_propagate_down_[0]) {
    const Dtype* top_diff = top[0]->cpu_diff();
    const Dtype* bottom_data = (*bottom)[0]->cpu_data();
    // Gradient with respect to weight
    caffe_cpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
        top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_cpu_diff());
    if (regularization > 0) {
      caffe_scal(this->blobs_[0]->count(), Dtype(1.0 + regularization),
          this->blobs_[0]->mutable_cpu_diff());
    }
  }
  if (bias_term_ && this->param_propagate_down_[1]) {
    const Dtype* top_diff = top[0]->cpu_diff();
    // Gradient with respect to bias
    caffe_cpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
        bias_multiplier_.cpu_data(), (Dtype)0.,
        this->blobs_[1]->mutable_cpu_diff());
  }
  if (propagate_down[0]) {
    const Dtype* top_diff = top[0]->cpu_diff();
    // Gradient with respect to bottom data
    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
        top_diff, this->blobs_[0]->cpu_data(), (Dtype)0.,
        (*bottom)[0]->mutable_cpu_diff());
  }
}
开发者ID:Peratham,项目名称:videovector,代码行数:31,代码来源:inner_product_layer.cpp

示例11: switch

void NormalizeLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
	if (propagate_down[0]) {
		for (int i = 0; i < bottom.size(); ++i) {
			const Dtype* bottom_data = bottom[i]->cpu_data();
			const Dtype* top_diff    = top[i]->cpu_diff();
			Dtype* bottom_diff       = bottom[i]->mutable_cpu_diff();
			Dtype scal;
			for (int n=0; n <num_; n++){
				switch (op_) { 
				case NormalizeParameter_NormalizeOp_DEMEAN:
					caffe_copy(imSz_, top_diff + n * imSz_, bottom_diff + n * imSz_);
					break;
				case NormalizeParameter_NormalizeOp_SDSCALE:
					caffe_copy(imSz_, bottom_data + n * imSz_, this->blobs_[0]->mutable_cpu_data());
					caffe_copy(imSz_, top_diff + n * imSz_, this->blobs_[0]->mutable_cpu_diff());
					//Find the Scaling Factor
					caffe_cpu_zero_mean(imSz_, this->blobs_[0]->mutable_cpu_data());
					scal = caffe_cpu_dot<Dtype>(imSz_, this->blobs_[0]->cpu_data(), 
												this->blobs_[0]->cpu_data()); 
					//Apply the scaling to the gradients
					caffe_scal(imSz_, Dtype(1.0 / scal), this->blobs_[0]->mutable_cpu_diff());  
					caffe_copy(imSz_, this->blobs_[0]->cpu_diff() , bottom_diff + n * imSz_);
					break;
				default:
					LOG(FATAL) << "Unknown elementwise operation.";
				}
			}
    }
	}
}
开发者ID:castigliano,项目名称:caffe,代码行数:31,代码来源:normalize_layer.cpp

示例12: caffe_copy

void NormalizeLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
	for (int i = 0; i < bottom.size(); ++i) {
    const Dtype* bottom_data = bottom[i]->cpu_data();
    Dtype* top_data = top[i]->mutable_cpu_data();
   	Dtype scal;
		for (int n = 0; n < this->num_; ++n) {
			caffe_copy(imSz_, bottom_data + n * imSz_, this->blobs_[0]->mutable_cpu_data());
			switch (op_) { 
			case NormalizeParameter_NormalizeOp_DEMEAN:
				caffe_cpu_zero_mean(imSz_, this->blobs_[0]->mutable_cpu_data());
				break;
			case NormalizeParameter_NormalizeOp_SDSCALE:
				caffe_cpu_zero_mean(imSz_, this->blobs_[0]->mutable_cpu_data());
				scal = caffe_cpu_dot<Dtype>(imSz_, this->blobs_[0]->cpu_data(), 
											this->blobs_[0]->cpu_data()); 
				caffe_scal(imSz_, Dtype(1.0 / scal), this->blobs_[0]->mutable_cpu_data());  
				break;
			default:
				LOG(FATAL) << "Unknown elementwise operation.";
			}
			caffe_copy(imSz_, this->blobs_[0]->cpu_data(), top_data + n * imSz_); 
    }
  }
}
开发者ID:castigliano,项目名称:caffe,代码行数:25,代码来源:normalize_layer.cpp

示例13: LOG

void Softmax2WithLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    vector<Blob<Dtype>*>* bottom) {
  if (propagate_down[1]) {
    LOG(FATAL) << this->type_name()
               << " Layer cannot backpropagate to label inputs.";
  }
  if (propagate_down[0]) {
    Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
    const Dtype* prob_data = prob_.cpu_data();
	const Dtype* top_prob_data = top_prob_.cpu_data();
    caffe_copy(prob_.count(), prob_data, bottom_diff);
    const Dtype* label = (*bottom)[1]->cpu_data();
    int num = prob_.num();
    int dim = prob_.count() / num;
    int top_dim = top_prob_.count() / num;
    int spatial_dim = prob_.height() * prob_.width();
    for (int i = 0; i < num; ++i) {
      for (int j = 0; j < spatial_dim; ++j) {
	    int label_v = static_cast<int>(label[i * spatial_dim + j]);
	    int top_label_v = top_dict_.cpu_data()[label_v];
        bottom_diff[i * dim + label_v * spatial_dim + j] -= lambda_;
		for (int k = 0; k < prob_.channels(); ++k) {
          if (top_label_v == top_dict_.cpu_data()[k]) {
            bottom_diff[i * dim + k * spatial_dim +j] -= (1 - lambda_) * prob_data[i * dim + k * spatial_dim + j] / top_prob_data[i * top_dim + top_label_v * spatial_dim + j]; 
		  }
		}
      }
    }
    // Scale gradient
    const Dtype loss_weight = top[0]->cpu_diff()[0];
    caffe_scal(prob_.count(), loss_weight / num / spatial_dim, bottom_diff);
  }
}
开发者ID:pl8787,项目名称:caffe,代码行数:34,代码来源:softmax2_loss_layer.cpp

示例14: LOG

void SoftmaxWithLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[1]) {
    LOG(FATAL) << this->type_name()
               << " Layer cannot backpropagate to label inputs.";
  }
  if (propagate_down[0]) {
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    const Dtype* prob_data = prob_.cpu_data();
    caffe_copy(prob_.count(), prob_data, bottom_diff);
    const Dtype* label = bottom[1]->cpu_data();
    int num = prob_.num();
    int dim = prob_.count() / num;
    int spatial_dim = prob_.height() * prob_.width();
    for (int i = 0; i < num; ++i) {
      for (int j = 0; j < spatial_dim; ++j) {
        bottom_diff[i * dim + static_cast<int>(label[i * spatial_dim + j])
            * spatial_dim + j] -= 1;
      }
    }
    // Scale gradient
    const Dtype loss_weight = top[0]->cpu_diff()[0];
    caffe_scal(prob_.count(), loss_weight / num / spatial_dim, bottom_diff);
  }
}
开发者ID:alasin,项目名称:ViewpointsAndKeypoints,代码行数:26,代码来源:softmax_loss_layer.cpp

示例15: caffe_scal

void WeightPlusLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
	const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){

	const Dtype* bottom_data = bottom[0]->cpu_data();
	const Dtype* top_diff = top[0]->cpu_diff();
	const Dtype* weight = this->blobs_[0]->cpu_data();
	Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();

	caffe_scal(dim_, Dtype(2.0), weight_two_.mutable_cpu_data());

	// gradient with respect to weight
	for (int n = 0; n < batch_; ++n){
		int offset = n*dim_;
		caffe_mul(dim_, weight_two_.cpu_data(), bottom_data + offset, data_meta_.mutable_cpu_data() + offset);
		caffe_mul(dim_, top_diff + offset, data_meta_.cpu_data() + offset, data_meta_.mutable_cpu_data() + offset);
		caffe_axpy(dim_, Dtype(1.0), data_meta_.cpu_data() + offset, blobs_[0]->mutable_cpu_diff());
	}

	// gradient with respect to bottom data
	if (propagate_down[0]){
		for (int n = 0; n < batch_; ++n){
			int offset = n*dim_;
			caffe_mul(dim_, top_diff + offset, weight_two_.cpu_data(), bottom_diff + offset);
		}
	}

}
开发者ID:FuchenUSTC,项目名称:caffe,代码行数:27,代码来源:weight_plus_layer.cpp


注:本文中的caffe_scal函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。