本文整理汇总了C++中DCHECK_LT函数的典型用法代码示例。如果您正苦于以下问题:C++ DCHECK_LT函数的具体用法?C++ DCHECK_LT怎么用?C++ DCHECK_LT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了DCHECK_LT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CHECK
void EmbedLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
CHECK(!propagate_down[0]) << "Can't backpropagate to EmbedLayer input.";
if (this->param_propagate_down_[0]) {
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* bottom_data = bottom[0]->cpu_data();
// Gradient with respect to weight
Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff();
int index;
for (int n = 0; n < M_; ++n) {
index = static_cast<int>(bottom_data[n]);
DCHECK_GE(index, 0);
DCHECK_LT(index, K_);
DCHECK_EQ(static_cast<Dtype>(index), bottom_data[n])
<< "non-integer input";
caffe_axpy(N_, Dtype(1), top_diff + n * N_, weight_diff + index * N_);
}
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff();
caffe_cpu_gemv<Dtype>(CblasTrans, M_, N_, Dtype(1), top_diff,
bias_multiplier_.cpu_data(), Dtype(1), bias_diff);
}
}
示例2: EmptyCopy
ColorHistogram ColorHistogram::ScaleHistogram(const vector<float>& gain) const {
const ColorHistogramIndexLUT& lut =
ColorHistogramIndexLUTFactory::Instance().GetLUT(
lum_bins_, color_bins_, color_bins_);
ColorHistogram result = EmptyCopy();
if (!IsSparse()) {
for (int i = 0; i < total_bins_; ++i) {
const float value = bins_[i];
if (value) {
const std::tuple<int, int, int>& idx_3d = lut.Ind2Sub(i);
const float bin_lum = std::min(lum_bins_ - 1.f, std::get<0>(idx_3d) * gain[0]);
const float bin_col1 = std::min(color_bins_ - 1.f, std::get<1>(idx_3d) * gain[1]);
const float bin_col2 = std::min(color_bins_ - 1.f, std::get<2>(idx_3d) * gain[2]);
result.AddValueInterpolated(bin_lum, bin_col1, bin_col2, value);
}
}
} else {
for (const auto& bin : sparse_bins_) {
const std::tuple<int, int, int>& idx_3d = lut.Ind2Sub(bin.first);
const float bin_lum = std::min(lum_bins_ - 1.f, std::get<0>(idx_3d) * gain[0]);
const float bin_col1 = std::min(color_bins_ - 1.f, std::get<1>(idx_3d) * gain[1]);
const float bin_col2 = std::min(color_bins_ - 1.f, std::get<2>(idx_3d) * gain[2]);
result.AddValueInterpolated(bin_lum, bin_col1, bin_col2, bin.second);
}
}
DCHECK_LT(fabs(WeightSum() - result.WeightSum()), 1e-3f);
return result;
}
示例3: DCHECK_GE
void SoftmaxWithLossLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.cpu_data();
const Dtype* label = bottom[1]->cpu_data();
int dim = prob_.count() / outer_num_;
int count = 0;
Dtype loss = 0;
for (int i = 0; i < outer_num_; ++i) {
for (int j = 0; j < inner_num_; j++) {
const int label_value = static_cast<int>(label[i * inner_num_ + j]);
if (has_ignore_label_ && label_value == ignore_label_) {
continue;
}
DCHECK_GE(label_value, 0);
DCHECK_LT(label_value, prob_.shape(softmax_axis_));
loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j],
Dtype(FLT_MIN)));
++count;
}
}
if (normalize_) {
top[0]->mutable_cpu_data()[0] = loss / count;
} else {
top[0]->mutable_cpu_data()[0] = loss / outer_num_;
}
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
示例4: DCHECK
void IOBuf::coalesceSlow(size_t maxLength) {
// coalesceSlow() should only be called if we are part of a chain of multiple
// IOBufs. The caller should have already verified this.
DCHECK(isChained());
DCHECK_LT(length_, maxLength);
// Compute the length of the entire chain
uint64_t newLength = 0;
IOBuf* end = this;
while (true) {
newLength += end->length_;
end = end->next_;
if (newLength >= maxLength) {
break;
}
if (end == this) {
throw std::overflow_error("attempted to coalesce more data than "
"available");
}
}
coalesceAndReallocate(newLength, end);
// We should have the requested length now
DCHECK_GE(length_, maxLength);
}
示例5: appendUntypedValue
/**
* @brief Append an untyped value to this NativeColumnVector.
* @warning Appending a new value must not cause the number of values in this
* NativeColumnVector to exceed the reserved length supplied to the
* constructor.
* @warning Do not use this with NULL values. Use appendNullValue() instead.
*
* @param value A pointer to an untyped value to append to this
* NativeColumnVector.
**/
inline void appendUntypedValue(const void *value) {
DCHECK_LT(actual_length_, reserved_length_);
std::memcpy((static_cast<char*>(values_) + (actual_length_ * type_length_)),
value,
type_length_);
++actual_length_;
}
示例6: degree_
LeastSquaresVelocityTrackerStrategy::LeastSquaresVelocityTrackerStrategy(
uint32_t degree,
Weighting weighting)
: degree_(degree), weighting_(weighting) {
DCHECK_LT(degree_, static_cast<uint32_t>(Estimator::kMaxDegree));
Clear();
}
示例7: DCHECK_GE
void WeightedSoftmaxWithLossLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.cpu_data();
const Dtype* label = bottom[1]->cpu_data();
const Dtype* sample_weight = bottom[2]->cpu_data();
int num = prob_.num();
int dim = prob_.count() / num;
int spatial_dim = prob_.height() * prob_.width();
int count = 0;
Dtype loss = 0;
for (int i = 0; i < num; ++i) {
for (int j = 0; j < spatial_dim; j++) {
const int label_value = static_cast<int>(label[i * spatial_dim + j]);
if (has_ignore_label_ && label_value == ignore_label_) {
continue;
}
DCHECK_GE(label_value, 0);
DCHECK_LT(label_value, prob_.channels());
Dtype w = sample_weight[i * spatial_dim + j];
loss -= w * log(std::max(prob_data[i * dim + label_value * spatial_dim + j],
Dtype(FLT_MIN)));
++count;
}
}
if (normalize_) {
top[0]->mutable_cpu_data()[0] = loss / count;
} else {
top[0]->mutable_cpu_data()[0] = loss / num;
}
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
示例8: DCHECK
void ScriptProcessorHandler::fireProcessEventForOfflineAudioContext(
unsigned doubleBufferIndex,
WaitableEvent* waitableEvent) {
DCHECK(isMainThread());
DCHECK_LT(doubleBufferIndex, 2u);
if (doubleBufferIndex > 1) {
waitableEvent->signal();
return;
}
AudioBuffer* inputBuffer = m_inputBuffers[doubleBufferIndex].get();
AudioBuffer* outputBuffer = m_outputBuffers[doubleBufferIndex].get();
DCHECK(outputBuffer);
if (!outputBuffer) {
waitableEvent->signal();
return;
}
if (node() && context() && context()->getExecutionContext()) {
// We do not need a process lock here because the offline render thread
// is locked by the waitable event.
double playbackTime = (context()->currentSampleFrame() + m_bufferSize) /
static_cast<double>(context()->sampleRate());
node()->dispatchEvent(
AudioProcessingEvent::create(inputBuffer, outputBuffer, playbackTime));
}
waitableEvent->signal();
}
示例9: addBlockToPartition
/**
* @brief Add a block to a partition.
*
* @param block_id The id of the block to be added to the partition.
* @param part_id The id of the partition to add the block to.
**/
inline void addBlockToPartition(const block_id block,
const partition_id part_id) {
DCHECK_LT(part_id, num_partitions_);
SpinSharedMutexExclusiveLock<false> lock(
blocks_in_partition_mutexes_[part_id]);
blocks_in_partition_[part_id].insert(block);
}
示例10: getTypedValue
/**
* @brief Get a value in this NativeColumnVector as a TypedValue.
*
* @param position The position of the value to get.
* @return The value at position.
**/
inline TypedValue getTypedValue(const std::size_t position) const {
DCHECK_LT(position, actual_length_);
return (null_bitmap_ && null_bitmap_->getBit(position))
? type_.makeNullValue()
: type_.makeValue(static_cast<const char*>(values_) + (position * type_length_),
type_length_);
}
示例11: bottom_data_vector
void AccuracyLayer<Ftype, Btype>::Forward_cpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
float accuracy = 0.F;
const Ftype* bottom_data = bottom[0]->cpu_data<Ftype>();
const Ftype* bottom_label = bottom[1]->cpu_data<Ftype>();
const int dim = bottom[0]->count() / outer_num_;
const int num_labels = bottom[0]->shape(label_axis_);
if (top.size() > 1) {
nums_buffer_.set_data(0.F);
top[1]->set_data(0.F);
}
std::vector<std::pair<float, int>> bottom_data_vector(num_labels);
int count = 0;
for (int i = 0; i < outer_num_; ++i) {
for (int j = 0; j < inner_num_; ++j) {
const int label_value = static_cast<int>(bottom_label[i * inner_num_ + j]);
if (has_ignore_label_ && label_value == ignore_label_) {
continue;
}
if (top.size() > 1) {
++nums_buffer_.mutable_cpu_data()[label_value];
}
DCHECK_GE(label_value, 0);
DCHECK_LT(label_value, num_labels);
// Top-k accuracy
for (int k = 0; k < num_labels; ++k) {
bottom_data_vector[k] = std::make_pair(
static_cast<float>(bottom_data[i * dim + k * inner_num_ + j]), k);
}
std::partial_sort(
bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
bottom_data_vector.end(), std::greater<std::pair<float, int>>());
// check if true label is in top k predictions
for (int k = 0; k < top_k_; k++) {
if (bottom_data_vector[k].second == label_value) {
accuracy += 1.F;
if (top.size() > 1) {
Ftype* top_label = top[1]->mutable_cpu_data<Ftype>();
top_label[label_value] = top_label[label_value] + 1.;
}
break;
}
}
++count;
}
}
top[0]->mutable_cpu_data<Ftype>()[0] = accuracy / count;
if (top.size() > 1) {
for (int i = 0; i < top[1]->count(); ++i) {
const float num = nums_buffer_.cpu_data()[i];
Ftype* top_label = top[1]->mutable_cpu_data<Ftype>();
top_label[i] = num == 0.F ? 0. : top_label[i] / num;
}
}
// Accuracy layer should not be used as a loss function.
}
示例12: getBlocksInPartition
/**
* @brief Get all the blocks from a particular partition.
*
* @param part_id The id of the partition to retrieve the blocks from.
* @return The block_ids of blocks belonging to this partition at the moment
* when this method is called.
**/
inline const std::vector<block_id> getBlocksInPartition(
const partition_id part_id) const {
DCHECK_LT(part_id, num_partitions_);
SpinSharedMutexSharedLock<false> lock(
blocks_in_partition_mutexes_[part_id]);
return std::vector<block_id>(blocks_in_partition_[part_id].begin(),
blocks_in_partition_[part_id].end());
}
示例13: addWorkOrderProto
/**
* @brief Add a WorkOrder generated from a given
* operator.
*
* @param workorder A pointer to the WorkOrder to be added.
* @param operator_index The index of the operator in the query DAG.
**/
void addWorkOrderProto(serialization::WorkOrder *proto,
const std::size_t operator_index) {
DCHECK(proto != nullptr);
DCHECK_LT(operator_index, num_operators_);
operator_containers_[operator_index].emplace(
std::unique_ptr<serialization::WorkOrder>(proto));
}
示例14: positionalWriteUntypedValue
/**
* @brief Overwrite the value at the specified position with the supplied
* untyped value.
* @warning Do not use this with NULL values. Use positionalWriteNullValue()
* instead.
* @warning You must call prepareForPositionalWrites() BEFORE calling this
* method.
* @warning Do NOT use positional writes in combination with appends.
* @warning It is intended that this and other positional write methods
* should be called exactly once for each position (if this is
* violated, NULLs may not be tracked properly).
*
* @param position The position of the value in this NativeColumnVector to
* overwrite.
* @param value A pointer to an untyped value to write into this
* NativeColumnVector.
**/
inline void positionalWriteUntypedValue(const std::size_t position,
const void *value) {
DCHECK_LT(position, actual_length_);
DCHECK(value != nullptr);
std::memcpy((static_cast<char*>(values_) + (position * type_length_)),
value,
type_length_);
}
示例15: maxval
void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype accuracy = 0;
const Dtype* bottom_data = bottom[0]->cpu_data();
const Dtype* bottom_label = bottom[1]->cpu_data();
const int dim = bottom[0]->count() / outer_num_;
const int num_labels = bottom[0]->shape(label_axis_);
vector<Dtype> maxval(top_k_+1);
vector<int> max_id(top_k_+1);
if (top.size() > 1) {
caffe_set(nums_buffer_.count(), Dtype(0), nums_buffer_.mutable_cpu_data());
caffe_set(top[1]->count(), Dtype(0), top[1]->mutable_cpu_data());
}
int count = 0;
for (int i = 0; i < outer_num_; ++i) {
for (int j = 0; j < inner_num_; ++j) {
const int label_value =
static_cast<int>(bottom_label[i * inner_num_ + j]);
if (has_ignore_label_ && label_value == ignore_label_) {
continue;
}
if (top.size() > 1) ++nums_buffer_.mutable_cpu_data()[label_value];
DCHECK_GE(label_value, 0);
DCHECK_LT(label_value, num_labels);
// Top-k accuracy
std::vector<std::pair<Dtype, int> > bottom_data_vector;
for (int k = 0; k < num_labels; ++k) {
bottom_data_vector.push_back(std::make_pair(
bottom_data[i * dim + k * inner_num_ + j], k));
}
std::partial_sort(
bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
// check if true label is in top k predictions
for (int k = 0; k < top_k_; k++) {
if (bottom_data_vector[k].second == label_value &&
(threshold_ <= 0 || bottom_data_vector[k].first >= threshold_ ))
{
++accuracy;
if (top.size() > 1) ++top[1]->mutable_cpu_data()[label_value];
break;
}
}
++count;
}
}
// LOG(INFO) << "Accuracy: " << accuracy;
top[0]->mutable_cpu_data()[0] = accuracy / count;
if (top.size() > 1) {
for (int i = 0; i < top[1]->count(); ++i) {
top[1]->mutable_cpu_data()[i] =
nums_buffer_.cpu_data()[i] == 0 ? 0
: top[1]->cpu_data()[i] / nums_buffer_.cpu_data()[i];
}
}
// Accuracy layer should not be used as a loss function.
}