本文整理汇总了C++中DLOG函数的典型用法代码示例。如果您正苦于以下问题:C++ DLOG函数的具体用法?C++ DLOG怎么用?C++ DLOG使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了DLOG函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: AdjustFactoryCrate
bool CraftingSession::prepareComponent(Item* component, uint32 needed, ManufactureSlot* manSlot)
{
FactoryCrate* fC = dynamic_cast<FactoryCrate*>(component);
if(fC)
{
uint32 amount = AdjustFactoryCrate(fC, needed);
DLOG(INFO) << "CraftingSession::prepareComponent FactoryCrate take " << amount;
//TODO - added stacks shouldnt have more items than maximally possible - needed is the amount needed for the slot
// that might be bigger than the max stack size
//create the new item - link it to the slot
mAsyncComponentAmount = needed;
mAsyncManSlot = manSlot;
//make sure we request the right amount of stacks
for(uint8 i = 0; i<amount; i++)
gObjectFactory->requestNewClonedItem(this,fC->getLinkedObject()->getId(),mManufacturingSchematic->getId());
// if its now empty remove it out of the inventory so we cant use it several times
// and destroy it while were at it
uint32 crateSize = fC->getAttribute<uint32>("factory_count");
if(!crateSize)
{
TangibleObject* container = dynamic_cast<TangibleObject*>(gWorldManager->getObjectById(fC->getParentId()));
//just delete it
gContainerManager->deleteObject(fC, container);
}
//dont send result - its a callback
return false;
}
//no stacksize or crate - do not bother with temporaries
if(!component->hasAttribute("stacksize"))
{
// remove it out of the inventory so we cant use it several times
TangibleObject* tO = dynamic_cast<TangibleObject*>(gWorldManager->getObjectById(component->getParentId()));
assert(tO && "CraftingSession::prepareComponent :: cant get parent");
tO->removeObject(component);
//leave parent_id untouched - we might need to readd it to the container!
//please note that we can only use components out of our inventory or the crafting stations thingy
//so update containment for all watchers
//TODO
gMessageLib->sendContainmentMessage(component->getId(),mManufacturingSchematic->getId(),0xffffffff,mOwner);
//send result directly we dont have a callback
return true;
}
//only pure stacks remain
AdjustComponentStack(component, needed);
//create the new item - link it to the slot
mAsyncComponentAmount = needed;
mAsyncManSlot = manSlot;
gObjectFactory->requestNewClonedItem(this,component->getId(),mManufacturingSchematic->getId());
//delete the stack if empty
uint32 stackSize = component->getAttribute<uint32>("stacksize");
if(!stackSize)
{
//remove the item out of its container
TangibleObject* tO = dynamic_cast<TangibleObject*>(gWorldManager->getObjectById(component->getParentId()));
if(!tO)
{
assert(false);
return false;
}
//just delete it
tO->removeObject(component);
gWorldManager->destroyObject(component);
}
//dont send result - its a callback
return false;
}
示例2: CHECK_EQ
void DataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
CHECK_EQ(bottom.size(), 0) << "Data Layer takes no input blobs.";
CHECK_GE(top->size(), 1) << "Data Layer takes at least one blob as output.";
CHECK_LE(top->size(), 2) << "Data Layer takes at most two blobs as output.";
if (top->size() == 1) {
output_labels_ = false;
} else {
output_labels_ = true;
}
// Initialize the leveldb
leveldb::DB* db_temp;
leveldb::Options options;
options.create_if_missing = false;
options.max_open_files = 10;
LOG(INFO) << "Opening leveldb " << this->layer_param_.data_param().source();
leveldb::Status status = leveldb::DB::Open(
options, this->layer_param_.data_param().source(), &db_temp);
CHECK(status.ok()) << "Failed to open leveldb "
<< this->layer_param_.data_param().source() << std::endl
<< status.ToString();
db_.reset(db_temp);
iter_.reset(db_->NewIterator(leveldb::ReadOptions()));
iter_->SeekToFirst();
// Check if we would need to randomly skip a few data points
if (this->layer_param_.data_param().rand_skip()) {
unsigned int skip = caffe_rng_rand() %
this->layer_param_.data_param().rand_skip();
LOG(INFO) << "Skipping first " << skip << " data points.";
while (skip-- > 0) {
iter_->Next();
if (!iter_->Valid()) {
iter_->SeekToFirst();
}
}
}
// Read a data point, and use it to initialize the top blob.
Datum datum;
datum.ParseFromString(iter_->value().ToString());
// image
int crop_size = this->layer_param_.data_param().crop_size();
if (crop_size > 0) {
(*top)[0]->Reshape(this->layer_param_.data_param().batch_size(),
datum.channels(), crop_size, crop_size);
prefetch_data_.reset(new Blob<Dtype>(
this->layer_param_.data_param().batch_size(), datum.channels(),
crop_size, crop_size));
} else {
(*top)[0]->Reshape(
this->layer_param_.data_param().batch_size(), datum.channels(),
datum.height(), datum.width());
prefetch_data_.reset(new Blob<Dtype>(
this->layer_param_.data_param().batch_size(), datum.channels(),
datum.height(), datum.width()));
}
LOG(INFO) << "output data size: " << (*top)[0]->num() << ","
<< (*top)[0]->channels() << "," << (*top)[0]->height() << ","
<< (*top)[0]->width();
// label
if (output_labels_) {
(*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1);
prefetch_label_.reset(
new Blob<Dtype>(this->layer_param_.data_param().batch_size(), 1, 1, 1));
}
// datum size
datum_channels_ = datum.channels();
datum_height_ = datum.height();
datum_width_ = datum.width();
datum_size_ = datum.channels() * datum.height() * datum.width();
CHECK_GT(datum_height_, crop_size);
CHECK_GT(datum_width_, crop_size);
// check if we want to have mean
if (this->layer_param_.data_param().has_mean_file()) {
const string& mean_file = this->layer_param_.data_param().mean_file();
LOG(INFO) << "Loading mean file from" << mean_file;
BlobProto blob_proto;
ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);
data_mean_.FromProto(blob_proto);
CHECK_EQ(data_mean_.num(), 1);
CHECK_EQ(data_mean_.channels(), datum_channels_);
CHECK_EQ(data_mean_.height(), datum_height_);
CHECK_EQ(data_mean_.width(), datum_width_);
} else {
// Simply initialize an all-empty mean.
data_mean_.Reshape(1, datum_channels_, datum_height_, datum_width_);
}
// Now, start the prefetch thread. Before calling prefetch, we make two
// cpu_data calls so that the prefetch thread does not accidentally make
// simultaneous cudaMalloc calls when the main thread is running. In some
// GPUs this seems to cause failures if we do not so.
prefetch_data_->mutable_cpu_data();
if (output_labels_) {
prefetch_label_->mutable_cpu_data();
}
data_mean_.cpu_data();
DLOG(INFO) << "Initializing prefetch";
CreatePrefetchThread();
DLOG(INFO) << "Prefetch initialized.";
}
示例3: CHECK
void FloDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(this->transformed_data_.count());
ImageDataParameter image_data_param = this->layer_param_.image_data_param();
const int batch_size = image_data_param.batch_size();
string root_folder = image_data_param.root_folder();
// Reshape according to the first image of each batch
// on single input batches allows for inputs of varying dimension.
int xSize, ySize;
CHECK(readFloFile(root_folder + lines_[lines_id_].first, NULL, xSize, ySize))
<< "Could not load " << lines_[lines_id_].first;
// Use data_transformer to infer the expected blob shape from a cv_img.
vector<int> top_shape = vector<int>(4);
top_shape[0] = 1;
top_shape[1] = 2;
top_shape[2] = ySize;
top_shape[3] = xSize;
//this->transformed_data_.Reshape(top_shape);
// Reshape batch according to the batch_size.
top_shape[0] = batch_size;
batch->data_.Reshape(top_shape);
Dtype* prefetch_data = batch->data_.mutable_cpu_data();
// datum scales
const int lines_size = lines_.size();
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a blob
timer.Start();
CHECK_GT(lines_size, lines_id_);
read_time += timer.MicroSeconds();
timer.Start();
// Apply transformations (mirror, crop...) to the image
int offset = batch->data_.offset(item_id);
//this->transformed_data_.set_cpu_data(prefetch_data + offset);
CHECK(readFloFile(root_folder + lines_[lines_id_].first, prefetch_data + offset, xSize, ySize))
<< "Could not load " << lines_[lines_id_].first;
//this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
trans_time += timer.MicroSeconds();
// go to the next iter
lines_id_++;
if (lines_id_ >= lines_size) {
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
lines_id_ = 0;
if (this->layer_param_.image_data_param().shuffle()) {
ShuffleImages();
}
}
}
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例4: DLOG
jobjectArray MetaInterface::parseObject(MetaBase const* root, const int current_index) {
int next_index = current_index + 1;
const int path_elements = mElements.getCount();
jobjectArray result = NULL;
const MetaObject* meta = root->getMetaObject();
const char* field_string = mElements.get(current_index);
const int field_count = meta->getFieldCount();
for (int y = 0; y < field_count; y++) {
const MetaField* metaField = meta->getField(y);
if (strcmp(field_string, metaField->getName()) == 0) {
DLOG(INFO) << "Found field: " << metaField->getName();
// found the field!
// test for array access
int array_index = -1;
if (current_index + 1 < path_elements) {
const char* next_element = mElements.get(current_index + 1);
if (isNumber(next_element)) {
array_index = atoi(next_element);
next_index++;
}
}
// pull the object out of the root object
void* object = NULL;
bool is_array_terminator = false;
if (array_index >= 0) {
if (array_index >= 0 && array_index <= metaField->getElementCount(root)) {
if (metaField->getStorageType() == MetaField::TYPE_pointer) {
// array of pointers
object = *((void**)metaField->getElement(root, array_index));
} else {
// inline array
object = metaField->getElement(root, array_index);
}
}
} else if (metaField->getElementCount(root) > 1 && next_index >= path_elements) {
// This is an array request.
is_array_terminator = true;
} else {
if (metaField->getStorageType() == MetaField::TYPE_pointer) {
object = *((void**)metaField->get(root));
} else {
object = metaField->get(root);
}
}
if (object && MetaBase::authenticatePointer(object)) {
// safe to cast!
MetaBase* new_root = static_cast<MetaBase*>(object);
if (next_index < path_elements) {
// recurse
result = parseObject(new_root, next_index);
} else {
// leaf
result = printObject(new_root);
}
} else if (is_array_terminator) {
// This is the leaf, but it's an array.
result = printArray(root, metaField);
} else {
// we found the field but can't go any further.
DLOG(INFO) << "Field null or unknown type: " << metaField->getTypeName() << " (" << reinterpret_cast<int>(object) << ")";
}
}
}
return result;
}
示例5: LOG
void Net<Dtype>::Init(const NetParameter& in_param) {
LOG(INFO) << "Initializing net from parameters: " << std::endl
<< in_param.DebugString();
// Create a copy of in_param with splits added where necessary.
NetParameter param;
InsertSplits(in_param, ¶m);
// Basically, build all the layers and set up its connections.
name_ = param.name();
map<string, int> blob_name_to_idx;
set<string> available_blobs;
int num_layers = param.layers_size();
CHECK_EQ(param.input_size() * 4, param.input_dim_size())
<< "Incorrect bottom blob dimension specifications.";
size_t memory_used = 0;
// set the input blobs
for (int i = 0; i < param.input_size(); ++i) {
const string& blob_name = param.input(i);
shared_ptr<Blob<Dtype> > blob_pointer(
new Blob<Dtype>(param.input_dim(i * 4),
param.input_dim(i * 4 + 1),
param.input_dim(i * 4 + 2),
param.input_dim(i * 4 + 3)));
blobs_.push_back(blob_pointer);
blob_names_.push_back(blob_name);
blob_need_backward_.push_back(param.force_backward());
net_input_blob_indices_.push_back(i);
net_input_blobs_.push_back(blob_pointer.get());
blob_name_to_idx[blob_name] = i;
available_blobs.insert(blob_name);
memory_used += blob_pointer->count();
}
DLOG(INFO) << "Memory required for Data" << memory_used*sizeof(Dtype);
// For each layer, set up their input and output
bottom_vecs_.resize(param.layers_size());
top_vecs_.resize(param.layers_size());
bottom_id_vecs_.resize(param.layers_size());
top_id_vecs_.resize(param.layers_size());
for (int i = 0; i < param.layers_size(); ++i) {
bool in_place = false;
const LayerParameter& layer_param = param.layers(i);
layers_.push_back(shared_ptr<Layer<Dtype> >(GetLayer<Dtype>(layer_param)));
layer_names_.push_back(layer_param.name());
LOG(INFO) << "Creating Layer " << layer_param.name();
bool need_backward = param.force_backward();
// Figure out this layer's input and output
for (int j = 0; j < layer_param.bottom_size(); ++j) {
const string& blob_name = layer_param.bottom(j);
const int blob_id = blob_name_to_idx[blob_name];
if (available_blobs.find(blob_name) == available_blobs.end()) {
LOG(FATAL) << "Unknown blob input " << blob_name <<
" to layer" << j;
}
LOG(INFO) << layer_param.name() << " <- " << blob_name;
bottom_vecs_[i].push_back(
blobs_[blob_id].get());
bottom_id_vecs_[i].push_back(blob_id);
// If a blob needs backward, this layer should provide it.
need_backward |= blob_need_backward_[blob_id];
available_blobs.erase(blob_name);
}
for (int j = 0; j < layer_param.top_size(); ++j) {
const string& blob_name = layer_param.top(j);
// Check if we are doing in-place computation
if (layer_param.bottom_size() > j &&
blob_name == layer_param.bottom(j)) {
// In-place computation
LOG(INFO) << layer_param.name() << " -> " << blob_name << " (in-place)";
in_place = true;
available_blobs.insert(blob_name);
top_vecs_[i].push_back(
blobs_[blob_name_to_idx[blob_name]].get());
top_id_vecs_[i].push_back(blob_name_to_idx[blob_name]);
} else if (blob_name_to_idx.find(blob_name) != blob_name_to_idx.end()) {
// If we are not doing in-place computation but has duplicated blobs,
// raise an error.
LOG(FATAL) << "Duplicate blobs produced by multiple sources.";
} else {
// Normal output.
LOG(INFO) << layer_param.name() << " -> " << blob_name;
shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>());
blobs_.push_back(blob_pointer);
blob_names_.push_back(blob_name);
blob_need_backward_.push_back(param.force_backward());
blob_name_to_idx[blob_name] = blob_names_.size() - 1;
available_blobs.insert(blob_name);
top_vecs_[i].push_back(blobs_[blob_names_.size() - 1].get());
top_id_vecs_[i].push_back(blob_names_.size() - 1);
}
}
// After this layer is connected, set it up.
// LOG(INFO) << "Setting up " << layer_names_[i];
layers_[i]->SetUp(bottom_vecs_[i], &top_vecs_[i]);
for (int topid = 0; topid < top_vecs_[i].size(); ++topid) {
LOG(INFO) << "Top shape: " << top_vecs_[i][topid]->num() << " "
<< top_vecs_[i][topid]->channels() << " "
<< top_vecs_[i][topid]->height() << " "
<< top_vecs_[i][topid]->width() << " ("
<< top_vecs_[i][topid]->count() << ")";
if (!in_place)
memory_used += top_vecs_[i][topid]->count();
//.........这里部分代码省略.........
示例6: harfBuzzBuffer
PassRefPtr<ShapeResult> HarfBuzzShaper::shapeResult()
{
RefPtr<ShapeResult> result = ShapeResult::create(m_font,
m_normalizedBufferLength, m_textRun.direction());
HarfBuzzScopedPtr<hb_buffer_t> harfBuzzBuffer(hb_buffer_create(), hb_buffer_destroy);
const FontDescription& fontDescription = m_font->getFontDescription();
const String& localeString = fontDescription.locale();
CString locale = localeString.latin1();
const hb_language_t language = hb_language_from_string(locale.data(), locale.length());
RunSegmenter::RunSegmenterRange segmentRange = {
0,
0,
USCRIPT_INVALID_CODE,
OrientationIterator::OrientationInvalid,
SmallCapsIterator::SmallCapsSameCase,
FontFallbackPriority::Invalid };
RunSegmenter runSegmenter(
m_normalizedBuffer.get(),
m_normalizedBufferLength,
m_font->getFontDescription().orientation(),
fontDescription.variant());
Vector<UChar32> fallbackCharsHint;
// TODO: Check whether this treatAsZerowidthspace from the previous script
// segmentation plays a role here, does the new scriptRuniterator handle that correctly?
while (runSegmenter.consume(&segmentRange)) {
RefPtr<FontFallbackIterator> fallbackIterator =
m_font->createFontFallbackIterator(
segmentRange.fontFallbackPriority);
appendToHolesQueue(HolesQueueNextFont, 0, 0);
appendToHolesQueue(HolesQueueRange, segmentRange.start, segmentRange.end - segmentRange.start);
const SimpleFontData* currentFont = nullptr;
RefPtr<UnicodeRangeSet> currentFontRangeSet;
bool fontCycleQueued = false;
while (m_holesQueue.size()) {
HolesQueueItem currentQueueItem = m_holesQueue.takeFirst();
if (currentQueueItem.m_action == HolesQueueNextFont) {
// For now, we're building a character list with which we probe
// for needed fonts depending on the declared unicode-range of a
// segmented CSS font. Alternatively, we can build a fake font
// for the shaper and check whether any glyphs were found, or
// define a new API on the shaper which will give us coverage
// information?
if (!collectFallbackHintChars(fallbackCharsHint, fallbackIterator->needsHintList())) {
// Give up shaping since we cannot retrieve a font fallback
// font without a hintlist.
m_holesQueue.clear();
break;
}
FontDataForRangeSet nextFontDataForRangeSet = fallbackIterator->next(fallbackCharsHint);
currentFont = nextFontDataForRangeSet.fontData().get();
currentFontRangeSet = nextFontDataForRangeSet.ranges();
if (!currentFont) {
ASSERT(!m_holesQueue.size());
break;
}
fontCycleQueued = false;
continue;
}
// TODO crbug.com/522964: Only use smallCapsFontData when the font does not support true smcp. The spec
// says: "To match the surrounding text, a font may provide alternate glyphs for caseless characters when
// these features are enabled but when a user agent simulates small capitals, it must not attempt to
// simulate alternates for codepoints which are considered caseless."
const SimpleFontData* smallcapsAdjustedFont = segmentRange.smallCapsBehavior == SmallCapsIterator::SmallCapsUppercaseNeeded
? currentFont->smallCapsFontData(fontDescription).get()
: currentFont;
// Compatibility with SimpleFontData approach of keeping a flag for overriding drawing direction.
// TODO: crbug.com/506224 This should go away in favor of storing that information elsewhere, for example in
// ShapeResult.
const SimpleFontData* directionAndSmallCapsAdjustedFont = fontDataAdjustedForOrientation(smallcapsAdjustedFont,
m_font->getFontDescription().orientation(),
segmentRange.renderOrientation);
if (!shapeRange(harfBuzzBuffer.get(),
currentQueueItem.m_startIndex,
currentQueueItem.m_numCharacters,
directionAndSmallCapsAdjustedFont,
currentFontRangeSet,
segmentRange.script,
language))
DLOG(ERROR) << "Shaping range failed.";
if (!extractShapeResults(harfBuzzBuffer.get(),
result.get(),
fontCycleQueued,
currentQueueItem,
directionAndSmallCapsAdjustedFont,
segmentRange.script,
!fallbackIterator->hasNext()))
//.........这里部分代码省略.........
示例7: CFGFUN
/*
* Initializes the specified 'Match' data structure and the initial state of
* commands.c for matching target windows of a command.
*
*/
CFGFUN(criteria_init, int _state) {
criteria_next_state = _state;
DLOG("Initializing criteria, current_match = %p, state = %d\n", current_match, _state);
match_init(current_match);
}
示例8: remaining
void DevicePair::compute(const vector<int> devices, vector<DevicePair>* pairs) {
#ifndef CPU_ONLY
vector<int> remaining(devices);
// Depth for reduction tree
int remaining_depth = static_cast<int>(ceil(log2(remaining.size())));
// Group GPUs by board
for (int d = 0; d < remaining_depth; ++d) {
for (int i = 0; i < remaining.size(); ++i) {
for (int j = i + 1; j < remaining.size(); ++j) {
cudaDeviceProp a, b;
CUDA_CHECK(cudaGetDeviceProperties(&a, remaining[i]));
CUDA_CHECK(cudaGetDeviceProperties(&b, remaining[j]));
if (a.isMultiGpuBoard && b.isMultiGpuBoard) {
if (a.multiGpuBoardGroupID == b.multiGpuBoardGroupID) {
pairs->push_back(DevicePair(remaining[i], remaining[j]));
DLOG(INFO) << "GPU board: " << remaining[i] << ":" << remaining[j];
remaining.erase(remaining.begin() + j);
break;
}
}
}
}
}
ostringstream s;
for (int i = 0; i < remaining.size(); ++i) {
s << (i ? ", " : "") << remaining[i];
}
DLOG(INFO) << "GPUs paired by boards, remaining: " << s.str();
// Group by P2P accessibility
remaining_depth = ceil(log2(remaining.size()));
for (int d = 0; d < remaining_depth; ++d) {
for (int i = 0; i < remaining.size(); ++i) {
for (int j = i + 1; j < remaining.size(); ++j) {
int access;
CUDA_CHECK(
cudaDeviceCanAccessPeer(&access, remaining[i], remaining[j]));
if (access) {
pairs->push_back(DevicePair(remaining[i], remaining[j]));
DLOG(INFO) << "P2P pair: " << remaining[i] << ":" << remaining[j];
remaining.erase(remaining.begin() + j);
break;
}
}
}
}
s.str("");
for (int i = 0; i < remaining.size(); ++i) {
s << (i ? ", " : "") << remaining[i];
}
DLOG(INFO) << "GPUs paired by P2P access, remaining: " << s.str();
// Group remaining
remaining_depth = ceil(log2(remaining.size()));
for (int d = 0; d < remaining_depth; ++d) {
for (int i = 0; i < remaining.size(); ++i) {
pairs->push_back(DevicePair(remaining[i], remaining[i + 1]));
DLOG(INFO) << "Remaining pair: " << remaining[i] << ":"
<< remaining[i + 1];
remaining.erase(remaining.begin() + i + 1);
}
}
// Should only be the parent node remaining
CHECK_EQ(remaining.size(), 1);
pairs->insert(pairs->begin(), DevicePair(-1, remaining[0]));
CHECK(pairs->size() == devices.size());
for (int i = 0; i < pairs->size(); ++i) {
CHECK((*pairs)[i].parent() != (*pairs)[i].device());
for (int j = i + 1; j < pairs->size(); ++j) {
CHECK((*pairs)[i].device() != (*pairs)[j].device());
}
}
#else
NO_GPU;
#endif
}
示例9: in
bool UgvParam::loadParam(std::string configFile)
{
std::ifstream in(configFile);
if(!in)
{
DLOG(FATAL) << "Couldn't find configuration file: " << configFile;
return false;
}
std::string line;
std::string key;
double value;
while (getline(in, line)) {
std::stringstream ss;
ss << line;
ss >> key;
if(key[0] == '#')
continue;
ss >> value;
if(key == "EulrChangeThreshold")
DivideCarTrack.EulrChangeThreshold = value;
else if(key == "DetectPoints")
DivideCarTrack.DetectPoints = value;
else if(key == "DetectDistance")
DivideCarTrack.DetectDistance = value;
else if(key == "ValidSegmentPointsNum")
DivideCarTrack.ValidSegmentPointsNum = value;
else if(key == "SimilarEulrThreshold")
LineParallel.SimilarEulrThreshold = value;
else if(key == "LateralDistanceThreshold")
SameSeg.LateralDistanceThreshold = value;
else if(key == "SameDirectionThreshold")
SameSeg.SameDirectionThreshold = value;
else if(key == "xMax")
Scale.xMax = value;
else if(key == "xMin")
Scale.xMin = value;
else if(key == "yMax")
Scale.yMax = value;
else if(key == "yMin")
Scale.yMin = value;
else if(key == "GridSize")
Scale.GridSize = value;
else if(key == "PixelPerGrid")
Scale.PixelPerGrid = value;
else if(key == "LeftDetectAngleBoundary")
ProbMap.LeftDetectAngleBoundary = value;
else if(key == "RightDetectAngleBoundary")
ProbMap.RightDetectAngleBoundary = value;
else if(key == "unitHeight")
ProbMap.unitHeight = value;
else if(key == "OccupiedThreshold")
ProbMap.OccupiedThreshold = value;
else if(key == "ClearThreshold")
ProbMap.ClearThreshold = value;
else if(key == "incrementUnit")
ProbMap.incrementUnit = value;
else if(key == "MaxGroundHeight")
ProbMap.MaxGroundHeight = value;
else if(key == "MaxAvgMidDiff")
ProbMap.MaxAvgMidDiff = value;
else if(key == "SaveNeeded"){
LocalMap.SaveNeeded.insert(value);
while(ss){
ss >> value;
LocalMap.SaveNeeded.insert(value);
}
}
else if(key == "SaveInterval")
示例10: work
void
o3d3xx::FrameGrabber::Run()
{
boost::asio::io_service::work work(this->io_service_);
//
// setup the camera for image acquistion
//
std::string cam_ip;
int cam_port;
try
{
cam_ip = this->cam_->GetIP();
cam_port = std::stoi(this->cam_->GetParameter("PcicTcpPort"));
}
catch (const o3d3xx::error_t& ex)
{
LOG(ERROR) << "Could not get IP/Port of the camera: "
<< ex.what();
return;
}
LOG(INFO) << "Camera connection info: ip=" << cam_ip
<< ", port=" << cam_port;
try
{
this->cam_->RequestSession();
this->cam_->SetOperatingMode(o3d3xx::Camera::operating_mode::RUN);
this->cam_->CancelSession();
}
catch (const o3d3xx::error_t& ex)
{
LOG(ERROR) << "Failed to setup camera for image acquisition: "
<< ex.what();
return;
}
//
// init the asio structures
//
boost::asio::ip::tcp::socket sock(this->io_service_);
boost::asio::ip::tcp::endpoint endpoint(
boost::asio::ip::address::from_string(cam_ip), cam_port);
//
// Forward declare our two read handlers (because they need to call
// eachother).
//
o3d3xx::FrameGrabber::ReadHandler ticket_handler;
o3d3xx::FrameGrabber::ReadHandler image_handler;
//
// image data callback
//
std::size_t bytes_read = 0;
std::size_t buff_sz = 0; // bytes
image_handler =
[&, this]
(const boost::system::error_code& ec, std::size_t bytes_transferred)
{
if (ec) { throw o3d3xx::error_t(ec.value()); }
bytes_read += bytes_transferred;
//DLOG(INFO) << "Read " << bytes_read << " image bytes of "
// << buff_sz;
if (bytes_read == buff_sz)
{
DLOG(INFO) << "Got full image!";
bytes_read = 0;
// 1. verify the data
if (o3d3xx::verify_image_buffer(this->back_buffer_))
{
DLOG(INFO) << "Image OK";
// 2. move the data to the front buffer in O(1) time complexity
this->front_buffer_mutex_.lock();
this->back_buffer_.swap(this->front_buffer_);
this->front_buffer_mutex_.unlock();
// 3. notify waiting clients
this->front_buffer_cv_.notify_all();
}
else
{
LOG(WARNING) << "Bad image!";
}
// read another ticket
sock.async_read_some(
boost::asio::buffer(this->ticket_buffer_.data(),
o3d3xx::IMG_TICKET_SZ),
ticket_handler);
return;
}
//.........这里部分代码省略.........
示例11: main
int main(int argc, char **argv)
{
static unsigned char blk[LZJODY_BSIZE];
static unsigned char out[LZJODY_BSIZE + 4];
int i;
int length = 0; /* Incoming data block length counter */
int c_length; /* Compressed block length temp variable */
int blocknum = 0; /* Current block number */
unsigned char options = 0; /* Compressor options */
#ifdef THREADED
struct thread_info *thr;
int nprocs = 1; /* Number of processors */
int eof = 0; /* End of file? */
char running = 0; /* Number of threads running */
#endif /* THREADED */
if (argc < 2) goto usage;
/* Windows requires that data streams be put into binary mode */
#ifdef ON_WINDOWS
setmode(STDIN_FILENO, _O_BINARY);
setmode(STDOUT_FILENO, _O_BINARY);
#endif /* ON_WINDOWS */
files.in = stdin;
files.out = stdout;
if (!strncmp(argv[1], "-c", 2)) {
#ifndef THREADED
/* Non-threaded compression */
/* fprintf(stderr, "blk %p, blkend %p, files %p\n",
blk, blk + LZJODY_BSIZE - 1, files); */
while((length = fread(blk, 1, LZJODY_BSIZE, files.in))) {
if (ferror(files.in)) goto error_read;
DLOG("\n--- Compressing block %d\n", blocknum);
i = lzjody_compress(blk, out, options, length);
if (i < 0) goto error_compression;
DLOG("c_size %d bytes\n", i);
i = fwrite(out, i, 1, files.out);
if (!i) goto error_write;
blocknum++;
}
#else /* Using POSIX threads */
#ifdef _SC_NPROCESSORS_ONLN
/* Get number of online processors for pthreads */
nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN);
if (nprocs < 1) {
fprintf(stderr, "warning: system returned bad number of processors: %d\n", nprocs);
nprocs = 1;
}
#endif /* _SC_NPROCESSORS_ONLN */
/* Run two threads per processor */
nprocs <<= 1;
fprintf(stderr, "lzjody: compressing with %d worker threads\n", nprocs);
/* Allocate per-thread input/output memory and control blocks */
thr = (struct thread_info *)calloc(nprocs, sizeof(struct thread_info));
if (!thr) goto oom;
/* Set compressor options */
for (i = 0; i < nprocs; i++) (thr + i)->options = options;
thread_error = 0;
while (1) {
struct thread_info *cur = NULL;
uint32_t min_blk; /* Minimum block number */
unsigned int min_thread; /* Thread for min_blk */
int thread; /* Temporary thread scan counter */
int open_thr; /* Next open thread */
/* See if lowest block number is finished */
while (1) {
min_blk = 0xffffffff;
min_thread = 0;
/* Scan threads for smallest block number */
pthread_mutex_lock(&mtx);
for (thread = 0; thread < nprocs; thread++) {
unsigned int j;
fprintf(stderr, ":thr %p, thread %d\n",
(void *)thr, thread);
if (thread_error != 0) goto error_compression;
j = (thr + thread)->block;
if (j > 0 && j < min_blk) {
min_blk = j;
min_thread = thread;
fprintf(stderr, ":j%d:%d thr %p, cur %p, min_thread %d\n",
j, min_blk, (void *)thr, (void *)cur, min_thread);
}
}
pthread_mutex_unlock(&mtx);
cur = thr + min_thread;
fprintf(stderr, "thr %p, cur %p, min_thread %d\n",
(void *)thr, (void *)cur, min_thread);
if (cur->working == 0 && cur->length > 0) {
pthread_detach(cur->id);
/* flush finished block */
//.........这里部分代码省略.........
示例12: manage_window
/*
* Do some sanity checks and then reparent the window.
*
*/
void manage_window(xcb_window_t window, xcb_get_window_attributes_cookie_t cookie,
bool needs_to_be_mapped) {
xcb_drawable_t d = { window };
xcb_get_geometry_cookie_t geomc;
xcb_get_geometry_reply_t *geom;
xcb_get_window_attributes_reply_t *attr = NULL;
xcb_get_property_cookie_t wm_type_cookie, strut_cookie, state_cookie,
utf8_title_cookie, title_cookie,
class_cookie, leader_cookie, transient_cookie,
role_cookie, startup_id_cookie, wm_hints_cookie;
#ifdef USE_ICONS
xcb_get_property_cookie_t wm_icon_cookie;
#endif
geomc = xcb_get_geometry(conn, d);
#define FREE_GEOMETRY() do { \
if ((geom = xcb_get_geometry_reply(conn, geomc, 0)) != NULL) \
free(geom); \
} while (0)
/* Check if the window is mapped (it could be not mapped when intializing and
calling manage_window() for every window) */
if ((attr = xcb_get_window_attributes_reply(conn, cookie, 0)) == NULL) {
DLOG("Could not get attributes\n");
FREE_GEOMETRY();
return;
}
if (needs_to_be_mapped && attr->map_state != XCB_MAP_STATE_VIEWABLE) {
FREE_GEOMETRY();
goto out;
}
/* Don’t manage clients with the override_redirect flag */
if (attr->override_redirect) {
FREE_GEOMETRY();
goto out;
}
/* Check if the window is already managed */
if (con_by_window_id(window) != NULL) {
DLOG("already managed (by con %p)\n", con_by_window_id(window));
FREE_GEOMETRY();
goto out;
}
/* Get the initial geometry (position, size, …) */
if ((geom = xcb_get_geometry_reply(conn, geomc, 0)) == NULL) {
DLOG("could not get geometry\n");
goto out;
}
uint32_t values[1];
/* Set a temporary event mask for the new window, consisting only of
* PropertyChange and StructureNotify. We need to be notified of
* PropertyChanges because the client can change its properties *after* we
* requested them but *before* we actually reparented it and have set our
* final event mask.
* We need StructureNotify because the client may unmap the window before
* we get to re-parent it.
* If this request fails, we assume the client has already unmapped the
* window between the MapRequest and our event mask change. */
values[0] = XCB_EVENT_MASK_PROPERTY_CHANGE |
XCB_EVENT_MASK_STRUCTURE_NOTIFY;
xcb_void_cookie_t event_mask_cookie =
xcb_change_window_attributes_checked(conn, window, XCB_CW_EVENT_MASK, values);
if (xcb_request_check(conn, event_mask_cookie) != NULL) {
LOG("Could not change event mask, the window probably already disappeared.\n");
goto out;
}
#define GET_PROPERTY(atom, len) xcb_get_property(conn, false, window, atom, XCB_GET_PROPERTY_TYPE_ANY, 0, len)
wm_type_cookie = GET_PROPERTY(A__NET_WM_WINDOW_TYPE, UINT32_MAX);
strut_cookie = GET_PROPERTY(A__NET_WM_STRUT_PARTIAL, UINT32_MAX);
state_cookie = GET_PROPERTY(A__NET_WM_STATE, UINT32_MAX);
utf8_title_cookie = GET_PROPERTY(A__NET_WM_NAME, 128);
leader_cookie = GET_PROPERTY(A_WM_CLIENT_LEADER, UINT32_MAX);
transient_cookie = GET_PROPERTY(XCB_ATOM_WM_TRANSIENT_FOR, UINT32_MAX);
title_cookie = GET_PROPERTY(XCB_ATOM_WM_NAME, 128);
class_cookie = GET_PROPERTY(XCB_ATOM_WM_CLASS, 128);
role_cookie = GET_PROPERTY(A_WM_WINDOW_ROLE, 128);
startup_id_cookie = GET_PROPERTY(A__NET_STARTUP_ID, 512);
wm_hints_cookie = xcb_icccm_get_wm_hints(conn, window);
#ifdef USE_ICONS
wm_icon_cookie = xcb_get_property_unchecked(conn, false, window, A__NET_WM_ICON, XCB_ATOM_CARDINAL, 0, UINT32_MAX);
#endif
/* TODO: also get wm_normal_hints here. implement after we got rid of xcb-event */
DLOG("Managing window 0x%08x\n", window);
i3Window *cwindow = scalloc(sizeof(i3Window));
cwindow->id = window;
cwindow->depth = get_visual_depth(attr->visual);
//.........这里部分代码省略.........
示例13: DLOG
void Client::ZCom_cbConnectResult( ZCom_ConnID _id, eZCom_ConnectResult _result, ZCom_BitStream &_reply )
{
if ( _result != eZCom_ConnAccepted )
{
Network::ConnectionReply::type r = static_cast<Network::ConnectionReply::type>(_reply.getInt(8));
if(r == Network::ConnectionReply::Retry)
{
DLOG("Got retry from server");
network.reconnect(50);
}
else if(r == Network::ConnectionReply::Banned)
{
console.addLogMsg("* YOU ARE BANNED FROM THIS SERVER");
}
else
{
console.addLogMsg("* COULDNT ESTABLISH CONNECTION");
}
}
else
{
network.setClient(true);
ZCom_requestDownstreamLimit(_id, network.downPPS, network.downBPP);
console.addLogMsg("* CONNECTION ACCEPTED");
network.setServerID(_id);
network.incConnCount();
std::string mod = _reply.getStringStatic();
std::string map = _reply.getStringStatic();
game.refreshLevels();
game.refreshMods();
bool hasLevel = game.hasLevel(map);
bool hasMod = game.hasMod(mod);
if(!hasMod)
{
game.error(Game::ErrorModNotFound);
//This doesn't work somewhy: network.disconnect();
//And maybe we don't want to do it since it would overwrite our error message
}
else if(!hasLevel)
{
if(network.autoDownloads)
{
ZCom_requestZoidMode(_id, 2); // We need to update
if(!hasLevel)
updater.requestLevel(map);
}
else
game.error(Game::ErrorMapNotFound);
}
else
{
game.setMod( mod );
if(game.changeLevel( map, false ) && game.isLoaded())
{
game.runInitScripts();
sendConsistencyInfo();
ZCom_requestZoidMode(_id, 1);
}
else
{
console.addLogMsg("* COULDN'T LOAD MOD OR LEVEL");
network.disconnect();
}
}
}
}
示例14: timeout_task
int
timeout_task (struct task_act *tk)
{
struct oper_act * on;
struct DSError * err = &(tk->tk_resp.di_error.de_err);
struct ds_search_task *tmp;
DLOG(log_dsap, LLOG_TRACE, ("timeout_task"));
for(on=tk->tk_operlist; on!=NULLOPER; on=on->on_next_task) {
/* Time out operations started by task */
on->on_state = ON_ABANDONED;
on->on_task = NULLTASK;
if (on->on_dsas) {
di_desist (on->on_dsas);
on -> on_dsas = NULL_DI_BLOCK;
}
}
if(tk->tk_dx.dx_arg.dca_dsarg.arg_type != OP_SEARCH) {
ds_error_free (err);
err->dse_type = DSE_SERVICEERROR;
if (tk->tk_timed == TRUE)
err->ERR_SERVICE.DSE_sv_problem = DSE_SV_TIMELIMITEXCEEDED;
else /* tk->tk_timed == 2 */
err->ERR_SERVICE.DSE_sv_problem = DSE_SV_ADMINLIMITEXCEEDED;
task_error(tk);
task_extract(tk);
} else {
/* Do search collation */
if ((tk->tk_state == TK_ACTIVE) && (tk->local_st == NULL_ST)) {
ds_error_free (err);
/* nothing happened yet... */
err->dse_type = DSE_SERVICEERROR;
if (tk->tk_timed == TRUE)
err->ERR_SERVICE.DSE_sv_problem = DSE_SV_TIMELIMITEXCEEDED;
else /* tk->tk_timed == 2 */
err->ERR_SERVICE.DSE_sv_problem = DSE_SV_ADMINLIMITEXCEEDED;
task_error(tk);
} else {
/* send the results we have got... */
tk->tk_result = &(tk->tk_resp.di_result.dr_res);
tk->tk_result->dcr_dsres.result_type = tk->tk_dx.dx_arg.dca_dsarg.arg_type;
tk->tk_resp.di_type = DI_RESULT;
if (tk->tk_timed == TRUE)
tk->tk_resp.di_result.dr_res.dcr_dsres.res_sr.CSR_limitproblem = LSR_TIMELIMITEXCEEDED;
else /* tk->tk_timed == 2 */
tk->tk_resp.di_result.dr_res.dcr_dsres.res_sr.CSR_limitproblem = LSR_ADMINSIZEEXCEEDED;
/* Go through sub-tasks and add a POQ for each */
for(tmp=tk->referred_st; tmp!= NULL_ST; tmp=tmp->st_next)
add_cref2poq (&tk->tk_result->dcr_dsres.res_sr,tmp->st_cr);
task_result(tk);
st_free_dis(&tk->referred_st,1);
}
task_extract(tk);
}
}
示例15: hb_buffer_get_length
bool HarfBuzzShaper::extractShapeResults(hb_buffer_t* harfBuzzBuffer,
ShapeResult* shapeResult,
bool& fontCycleQueued, const HolesQueueItem& currentQueueItem,
const SimpleFontData* currentFont,
UScriptCode currentRunScript,
bool isLastResort)
{
enum ClusterResult {
Shaped,
NotDef,
Unknown
};
ClusterResult currentClusterResult = Unknown;
ClusterResult previousClusterResult = Unknown;
unsigned previousCluster = 0;
unsigned currentCluster = 0;
// Find first notdef glyph in harfBuzzBuffer.
unsigned numGlyphs = hb_buffer_get_length(harfBuzzBuffer);
hb_glyph_info_t* glyphInfo = hb_buffer_get_glyph_infos(harfBuzzBuffer, 0);
unsigned lastChangePosition = 0;
if (!numGlyphs) {
DLOG(ERROR) << "HarfBuzz returned empty glyph buffer after shaping.";
return false;
}
for (unsigned glyphIndex = 0; glyphIndex <= numGlyphs; ++glyphIndex) {
// Iterating by clusters, check for when the state switches from shaped
// to non-shaped and vice versa. Taking into account the edge cases of
// beginning of the run and end of the run.
previousCluster = currentCluster;
currentCluster = glyphInfo[glyphIndex].cluster;
if (glyphIndex < numGlyphs) {
// Still the same cluster, merge shaping status.
if (previousCluster == currentCluster && glyphIndex != 0) {
if (glyphInfo[glyphIndex].codepoint == 0) {
currentClusterResult = NotDef;
} else {
// We can only call the current cluster fully shapped, if
// all characters that are part of it are shaped, so update
// currentClusterResult to Shaped only if the previous
// characters have been shaped, too.
currentClusterResult = currentClusterResult == Shaped ? Shaped : NotDef;
}
continue;
}
// We've moved to a new cluster.
previousClusterResult = currentClusterResult;
currentClusterResult = glyphInfo[glyphIndex].codepoint == 0 ? NotDef : Shaped;
} else {
// The code below operates on the "flanks"/changes between NotDef
// and Shaped. In order to keep the code below from explictly
// dealing with character indices and run end, we explicitly
// terminate the cluster/run here by setting the result value to the
// opposite of what it was, leading to atChange turning true.
previousClusterResult = currentClusterResult;
currentClusterResult = currentClusterResult == NotDef ? Shaped : NotDef;
}
bool atChange = (previousClusterResult != currentClusterResult) && previousClusterResult != Unknown;
if (!atChange)
continue;
// Compute the range indices of consecutive shaped or .notdef glyphs.
// Cluster information for RTL runs becomes reversed, e.g. character 0
// has cluster index 5 in a run of 6 characters.
unsigned numCharacters = 0;
unsigned numGlyphsToInsert = 0;
unsigned startIndex = 0;
if (HB_DIRECTION_IS_FORWARD(hb_buffer_get_direction(harfBuzzBuffer))) {
startIndex = glyphInfo[lastChangePosition].cluster;
if (glyphIndex == numGlyphs) {
numCharacters = currentQueueItem.m_startIndex + currentQueueItem.m_numCharacters - glyphInfo[lastChangePosition].cluster;
numGlyphsToInsert = numGlyphs - lastChangePosition;
} else {
numCharacters = glyphInfo[glyphIndex].cluster - glyphInfo[lastChangePosition].cluster;
numGlyphsToInsert = glyphIndex - lastChangePosition;
}
} else {
// Direction Backwards
startIndex = glyphInfo[glyphIndex - 1].cluster;
if (lastChangePosition == 0) {
numCharacters = currentQueueItem.m_startIndex + currentQueueItem.m_numCharacters - glyphInfo[glyphIndex - 1].cluster;
} else {
numCharacters = glyphInfo[lastChangePosition - 1].cluster - glyphInfo[glyphIndex - 1].cluster;
}
numGlyphsToInsert = glyphIndex - lastChangePosition;
}
if (currentClusterResult == Shaped && !isLastResort) {
// Now it's clear that we need to continue processing.
if (!fontCycleQueued) {
appendToHolesQueue(HolesQueueNextFont, 0, 0);
fontCycleQueued = true;
}
// Here we need to put character positions.
//.........这里部分代码省略.........