本文整理汇总了C++中caffe_mul函数的典型用法代码示例。如果您正苦于以下问题:C++ caffe_mul函数的具体用法?C++ caffe_mul怎么用?C++ caffe_mul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了caffe_mul函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: caffe_mul
void MyAccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype RMSE_lin = 0;
int count = bottom[0]->count();
// weighting
caffe_mul(count,
bottom[0]->cpu_data(),
bottom[2]->cpu_data(),
bottom[0]->mutable_cpu_data());
caffe_mul(count,
bottom[1]->cpu_data(),
bottom[2]->cpu_data(),
bottom[1]->mutable_cpu_data());
// rescaling
caffe_exp(count, bottom[0]->cpu_data(), bottom[0]->mutable_cpu_data());
caffe_exp(count, bottom[1]->cpu_data(), bottom[1]->mutable_cpu_data());
// diff
caffe_sub(
count,
bottom[0]->cpu_data(),
bottom[1]->cpu_data(),
diff_.mutable_cpu_data());
// sum(diff^2)
Dtype ss = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data());
// n
Dtype n = caffe_cpu_asum(count, bottom[2]->cpu_data());
n += std::numeric_limits<Dtype>::min();
// sqrt(ss/n)
RMSE_lin = sqrt(ss/n);
top[0]->mutable_cpu_data()[0] = RMSE_lin;
}
示例2: caffe_scal
void WeightPlusLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
const Dtype* bottom_data = bottom[0]->cpu_data();
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* weight = this->blobs_[0]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
caffe_scal(dim_, Dtype(2.0), weight_two_.mutable_cpu_data());
// gradient with respect to weight
for (int n = 0; n < batch_; ++n){
int offset = n*dim_;
caffe_mul(dim_, weight_two_.cpu_data(), bottom_data + offset, data_meta_.mutable_cpu_data() + offset);
caffe_mul(dim_, top_diff + offset, data_meta_.cpu_data() + offset, data_meta_.mutable_cpu_data() + offset);
caffe_axpy(dim_, Dtype(1.0), data_meta_.cpu_data() + offset, blobs_[0]->mutable_cpu_diff());
}
// gradient with respect to bottom data
if (propagate_down[0]){
for (int n = 0; n < batch_; ++n){
int offset = n*dim_;
caffe_mul(dim_, top_diff + offset, weight_two_.cpu_data(), bottom_diff + offset);
}
}
}
示例3: if
void BinomialDevianceLossLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
n1 = 0;
n2 = 0;
for (int i = 0; i < bottom[1]->num(); ++i){
if (static_cast<int>(bottom[1]->cpu_data()[i]) == 1){
n1++;
}
else if (static_cast<int>(bottom[1]->cpu_data()[i]) == -1) {
n2++;
}
}
// LOG(INFO) << n1 << " " << n2;
Dtype c = this->layer_param_.binomial_deviance_loss_param().c();
for (int i = 0; i < bottom[1]->num(); ++i){
M_.mutable_cpu_data()[i] = static_cast<int>(bottom[1]->cpu_data()[i]);
if (static_cast<int>(bottom[1]->cpu_data()[i]) == 1)
W_.mutable_cpu_data()[i] = 1.0/n1;
else if (static_cast<int>(bottom[1]->cpu_data()[i]) == -1) {
W_.mutable_cpu_data()[i] = 1.0/n2;
M_.mutable_cpu_data()[i] = -c;
}
else W_.mutable_cpu_data()[i] = 0.0;
}
summer_vec_.Reshape(bottom[0]->num(), 1, 1, 1);
for (int i = 0; i < bottom[0]->num(); ++i){
exp_.mutable_cpu_data()[i] = Dtype(1);
summer_vec_.mutable_cpu_data()[i] = Dtype(1);
}
Dtype alpha = this->layer_param_.binomial_deviance_loss_param().alpha();
Dtype beta = this->layer_param_.binomial_deviance_loss_param().beta();
caffe_cpu_axpby(
bottom[1]->num(),
Dtype(-alpha),
bottom[0]->cpu_data(),
Dtype(alpha * beta),
exp_.mutable_cpu_data());
caffe_mul(bottom[1]->num(), M_.cpu_data(), exp_.cpu_data(), exp_.mutable_cpu_data());
caffe_exp(bottom[1]->num(), exp_.cpu_data(), exp_.mutable_cpu_data());
caffe_cpu_axpby(bottom[1]->num(), Dtype(1), exp_.cpu_data(), Dtype(1), summer_vec_.mutable_cpu_data());
for (int i = 0; i < bottom[0]->num(); ++i){
summer_vec_.mutable_cpu_data()[i] = log(summer_vec_.cpu_data()[i]);
}
//// multiply by elimination array
caffe_mul(bottom[2]->num(), bottom[2]->cpu_data(), summer_vec_.cpu_data(), summer_vec_.mutable_cpu_data());
////
Dtype loss = caffe_cpu_dot(bottom[1]->num(), W_.cpu_data(), summer_vec_.cpu_data());
top[0]->mutable_cpu_data()[0] = loss;
}
示例4: switch
void EltwiseLayer<Dtype, MItype, MOtype>::Forward_cpu(
const vector<Blob<MItype>*>& bottom,
const vector<Blob<MOtype>*>& top) {
int_tp* mask = NULL;
const Dtype* bottom_data_a = NULL;
const Dtype* bottom_data_b = NULL;
const int_tp count = top[0]->count();
Dtype* top_data = top[0]->mutable_cpu_data();
Dtype maxVal = FLT_MAX;
if (std::is_same<Dtype, half_fp>::value)
maxVal = HALF_MAX;
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
caffe_mul(count, bottom[0]->cpu_data(), bottom[1]->cpu_data(), top_data);
for (int_tp i = 2; i < bottom.size(); ++i) {
caffe_mul(count, top_data, bottom[i]->cpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_SUM:
caffe_set(count, Dtype(0), top_data);
// TODO(shelhamer) does BLAS optimize to sum for coeff = 1?
for (int_tp i = 0; i < bottom.size(); ++i) {
caffe_axpy(count, coeffs_[i], bottom[i]->cpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
// Initialize
mask = max_idx_.mutable_cpu_data();
caffe_set(count, (int_tp)-1, mask);
caffe_set(count, Dtype(-maxVal), top_data);
// bottom 0 & 1
bottom_data_a = bottom[0]->cpu_data();
bottom_data_b = bottom[1]->cpu_data();
for (int_tp idx = 0; idx < count; ++idx) {
if (bottom_data_a[idx] > bottom_data_b[idx]) {
top_data[idx] = bottom_data_a[idx]; // maxval
mask[idx] = 0; // maxid
} else {
top_data[idx] = bottom_data_b[idx]; // maxval
mask[idx] = 1; // maxid
}
}
// bottom 2++
for (int_tp blob_idx = 2; blob_idx < bottom.size(); ++blob_idx) {
bottom_data_b = bottom[blob_idx]->cpu_data();
for (int_tp idx = 0; idx < count; ++idx) {
if (bottom_data_b[idx] > top_data[idx]) {
top_data[idx] = bottom_data_b[idx]; // maxval
mask[idx] = blob_idx; // maxid
}
}
}
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
示例5: caffe_mul
Dtype EltwiseProductLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
const int count = (*top)[0]->count();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
caffe_mul(count, bottom[0]->cpu_data(), bottom[1]->cpu_data(), top_data);
for (int i = 2; i < bottom.size(); ++i) {
caffe_mul(count, top_data, bottom[i]->cpu_data(), top_data);
}
return Dtype(0.);
}
示例6: LOG
void SigmoidWeightedCrossEntropyLossLayer<Dtype>::Backward_cpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[2]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to weight inputs.";
}
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
const int count = bottom[0]->count();
const int num = bottom[0]->num();
const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data();
const Dtype* target = bottom[1]->cpu_data();
const Dtype* weight = bottom[2]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
Dtype* tmp = new Dtype[count << 1];
Dtype* tmp1 = tmp + count;
// diff: 1/2
caffe_set(count, (Dtype)0.5, bottom_diff);
// diff: 1/2 * \hat{p}
caffe_mul(count, bottom_diff, sigmoid_output_data, bottom_diff);
// diff: 1/2 * (1-p) * \hat{p}
caffe_set(count, (Dtype)1, tmp1);
caffe_sub(count, tmp1, target, tmp);
caffe_mul(count, bottom_diff, tmp, bottom_diff);
// diff: 1/2(1-w) * (1-p) * \hat{p}
caffe_sub(count, tmp1, weight, tmp);
caffe_div(count, bottom_diff, tmp, bottom_diff);
// tmp: 1-\hat{p}
caffe_sub(count, tmp1, sigmoid_output_data, tmp);
// tmp: p * (1-\hat{p})
caffe_mul(count, tmp, target, tmp);
// tmp: -1/2 * p * (1-\hat{p})
caffe_set(count, (Dtype)-0.5, tmp1);
caffe_mul(count, tmp, tmp1, tmp);
// tmp: -1/2w * p * (1-\hat{p})
caffe_div(count, tmp, weight, tmp);
// diff: -(1/2w * p * (1-\hat{p}) - 1/2(1-w) * (1-p) * \hat{p})
caffe_add(count, bottom_diff, tmp, bottom_diff);
delete[] tmp;
// Scale down gradient
const Dtype loss_weight = top[0]->cpu_diff()[0];
caffe_scal(count, loss_weight / num, bottom_diff);
}
}
示例7: caffe_mul
void MVNLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* top_data = top[0]->cpu_data();
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
int num;
if (this->layer_param_.mvn_param().across_channels())
num = bottom[0]->num();
else
num = bottom[0]->num() * bottom[0]->channels();
int dim = bottom[0]->count() / num;
if (this->layer_param_.mvn_param().normalize_variance()) {
caffe_mul(temp_.count(), top_data, top_diff, bottom_diff);
caffe_cpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., bottom_diff,
sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data());
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, 1.,
mean_.cpu_data(), sum_multiplier_.cpu_data(), 0.,
bottom_diff);
caffe_mul(temp_.count(), top_data, bottom_diff, bottom_diff);
caffe_cpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., top_diff,
sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data());
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, 1.,
mean_.cpu_data(), sum_multiplier_.cpu_data(), 1.,
bottom_diff);
caffe_cpu_axpby(temp_.count(), Dtype(1), top_diff, Dtype(-1. / dim),
bottom_diff);
// put the squares of bottom into temp_
caffe_powx(temp_.count(), bottom_data, Dtype(2),
temp_.mutable_cpu_data());
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, 1.,
variance_.cpu_data(), sum_multiplier_.cpu_data(), 0.,
temp_.mutable_cpu_data());
caffe_div(temp_.count(), bottom_diff, temp_.cpu_data(), bottom_diff);
} else {
caffe_cpu_gemv<Dtype>(CblasNoTrans, num, dim, 1. / dim, top_diff,
sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data());
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
mean_.cpu_data(), sum_multiplier_.cpu_data(), 0.,
temp_.mutable_cpu_data());
caffe_add(temp_.count(), top_diff, temp_.cpu_data(), bottom_diff);
}
}
示例8: caffe_mul
void MaskingLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
caffe_mul(top[0]->count(), bottom[0]->cpu_data(), this->blobs_[0]->cpu_data(), top[0]->mutable_cpu_data()); // multiply mask, y=a*b
if (bias_term_) {
caffe_axpy(top[0]->count(), (Dtype)1.0, this->blobs_[1]->cpu_data(), top[0]->mutable_cpu_data()); // y=a*x+y
}
}
示例9: caffe_mul
void TopologyLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (this->param_propagate_down_[0]) {
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* weighted_bottom_data = weighted_bottom_.mutable_cpu_data();
// Gradient with respect to weight
// caffe_cpu_axpby<Dtype>(N_, (Dtype)1., topology_weight_mask, (Dtype)1., bottom_data);
caffe_mul(N_, weight_mask_.cpu_data(), bottom_data, weighted_bottom_data);
caffe_cpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top_diff, weighted_bottom_data, (Dtype)1., this->blobs_[0]->mutable_cpu_diff());
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype* top_diff = top[0]->cpu_diff();
// Gradient with respect to bias
caffe_cpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
bias_multiplier_.cpu_data(), (Dtype)1.,
this->blobs_[1]->mutable_cpu_diff());
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->cpu_diff();
// Gradient with respect to bottom data
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top_diff, this->blobs_[0]->cpu_data(), (Dtype)0.,
bottom[0]->mutable_cpu_diff());
}
}
示例10: Dtype
void CosineSimilarityLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < bottom[0]->num(); ++i){
summer_vec_.mutable_cpu_data()[i] = Dtype(1);
}
int channels = bottom[0]->channels();
for (int i = 0; i < bottom[0]->num(); ++i) {
xx_.mutable_cpu_data()[i] = caffe_cpu_dot(bottom[0]->channels(), bottom[0]->cpu_data() + i * channels,
bottom[0]->cpu_data() + i * channels);
yy_.mutable_cpu_data()[i] = caffe_cpu_dot(bottom[1]->channels(), bottom[1]->cpu_data() + i * channels,
bottom[1]->cpu_data() + i * channels);
xy_.mutable_cpu_data()[i] = caffe_cpu_dot(bottom[0]->channels(), bottom[0]->cpu_data() + i * channels,
bottom[1]->cpu_data() + i * channels);
}
caffe_mul(bottom[1]->num(), xx_.cpu_data(),yy_.cpu_data(), summer_vec_.mutable_cpu_data());
for (int i = 0; i < bottom[0]->num(); ++i) {
summer_vec_.mutable_cpu_data()[i] = sqrt(summer_vec_.cpu_data()[i]);
}
caffe_div(bottom[1]->num(), xy_.cpu_data(), summer_vec_.cpu_data(), top[0]->mutable_cpu_data());
}
示例11: caffe_copy
void SoftmaxLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* top_data = top[0]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
Dtype* scale_data = scale_.mutable_cpu_data();
int channels = top[0]->shape(softmax_axis_);
int dim = top[0]->count() / outer_num_;
//从top_diff拷贝到bottom_diff
caffe_copy(top[0]->count(), top_diff, bottom_diff);
for (int i = 0; i < outer_num_; ++i) {
// compute dot(top_diff, top_data) and subtract them from the bottom diff
for (int k = 0; k < inner_num_; ++k) {
scale_data[k] = caffe_cpu_strided_dot<Dtype>(channels,
bottom_diff + i * dim + k, inner_num_,
//因为bottom_diff是从top_diff拷贝而来,所以caffe_cpu_strided_dot的参数不用top_diff,用bottom_diff即可,而且,这样的话就可以在下面的代码("caffe_cpu_gemm<Dtype>()")中直接更新bottom_diff.
top_data + i * dim + k, inner_num_);
}
//实现的非常巧妙
// subtraction
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels, inner_num_, 1,
-1., sum_multiplier_.cpu_data(), scale_data, 1., bottom_diff + i * dim);
}
// elementwise multiplication
caffe_mul(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
示例12: switch
void EltwiseLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
if (propagate_down) {
const int count = top[0]->count();
const Dtype* top_data = top[0]->cpu_data();
const Dtype* top_diff = top[0]->cpu_diff();
for (int i = 0; i < bottom->size(); ++i) {
const Dtype* bottom_data = (*bottom)[i]->cpu_data();
Dtype* bottom_diff = (*bottom)[i]->mutable_cpu_diff();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
caffe_div(count, top_data, bottom_data, bottom_diff);
caffe_mul(count, bottom_diff, top_diff, bottom_diff);
break;
case EltwiseParameter_EltwiseOp_SUM:
if (coeffs_[i] == Dtype(1)) {
caffe_copy(count, top_diff, bottom_diff);
} else {
caffe_cpu_scale(count, coeffs_[i], top_diff, bottom_diff);
}
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
}
}
示例13: caffe_mul
void DeconvNormLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
Dtype* wa = weights_alphas->mutable_cpu_data();
exp_layer->Forward(exp_bottom_vec, exp_top_vec);
for (int ch_in = 0; ch_in < weights_alphas->num(); ++ch_in)
{
caffe_mul(alphas->count(), this->blobs_[0]->cpu_data() + this->blobs_[0]->offset(ch_in),
alphas->cpu_data(), wa + weights_alphas->offset(ch_in));
}
deconv2_layer->Forward(bottom, deconv2_top_vec);
deconv1_layer->Forward(deconv1_bottom_vec, deconv1_top_vec);
Dtype* top_data = top[0]->mutable_cpu_data();
const Dtype* deconv1_top_vec_data = deconv1_top_vec[0]->cpu_data();
const Dtype* deconv2_top_vec_data = deconv2_top_vec[0]->cpu_data();
caffe_add_scalar(deconv1_top_vec[0]->count(), (Dtype) std::numeric_limits<Dtype>::epsilon(),
deconv1_top_vec[0]->mutable_cpu_data());
for (int n = 0; n < bottom[0]->num(); ++n)
{
caffe_div(deconv1_top_vec[0]->count(), deconv2_top_vec_data + deconv2_top_vec[0]->offset(n),
deconv1_top_vec_data, top_data + top[0]->offset(n));
if (this->bias_term_)
{
const Dtype* bias = this->blobs_[2]->cpu_data();
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, top[0]->channels(),
top[0]->height() * top[0]->width(), 1, (Dtype)1., bias, bias_multiplier.cpu_data(),
(Dtype)1., top_data + top[0]->offset(n));
}
}
}
示例14: LOG
void NormalizedSigmoidCrossEntropyLossLayer<Dtype>::Backward_cpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
const int count = bottom[0]->count();
const int num = bottom[0]->num();
const int dim = count / num;
const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data();
const Dtype* target = bottom[1]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
caffe_sub(count, sigmoid_output_data, target, bottom_diff);
// Scale down gradient
const Dtype loss_weight = top[0]->cpu_diff()[0];
Dtype* scales = new Dtype[count]();
for (int i = 0; i < dim; ++i) {
int n_pos = 0;
int n_neg = 0;
for (int j = 0; j < num; ++j) {
int idx = j * dim + i;
if (target[idx] > 0.5) {
n_pos++;
} else {
n_neg++;
}
}
// Only back propagate if there are both positive and negative samples
if (n_pos > 0 && n_pos < num) {
const float ratio = float(n_pos) / n_neg;
const bool shouldNorm = (ratio >= thres_ || 1. / ratio >= thres_);
for (int j = 0; j < num; ++j) {
int idx = j * dim + i;
if (target[idx] > 0.5) {
if (shouldNorm) {
scales[idx] = loss_weight / (n_pos * 2.);
} else {
scales[idx] = loss_weight / num;
}
} else {
if (shouldNorm) {
scales[idx] = loss_weight / (n_neg * 2.);
} else {
scales[idx] = loss_weight / num;
}
}
}
}
}
caffe_mul(count, scales, bottom_diff, bottom_diff);
delete [] scales;
}
}
示例15: multiplyAllChannelsByMask
void multiplyAllChannelsByMask(const Dtype* blob, const Dtype* mask_blob, int mask_num, Dtype* blob_result, int sz, int blob_channels){
int data_offset = 0;
int mask_offset = mask_num * sz;
for(int j = 0; j < blob_channels; j++){
data_offset = j * sz;
caffe_mul(sz, blob + data_offset, mask_blob + mask_offset, blob_result + data_offset);
}
}