本文整理汇总了C++中size_policy函数的典型用法代码示例。如果您正苦于以下问题:C++ size_policy函数的具体用法?C++ size_policy怎么用?C++ size_policy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了size_policy函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: size_policy
void CollectorPolicy::cleared_all_soft_refs() {
// If near gc overhear limit, continue to clear SoftRefs. SoftRefs may
// have been cleared in the last collection but if the gc overhear
// limit continues to be near, SoftRefs should still be cleared.
if (size_policy() != NULL) {
_should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
}
_all_soft_refs_clear = true;
}
示例2: size_policy
// Before delegating the resize to the old generation,
// the reserved space for the young and old generations
// may be changed to accomodate the desired resize.
void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
if (UseAdaptiveGCBoundary) {
if (size_policy()->bytes_absorbed_from_eden() != 0) {
size_policy()->reset_bytes_absorbed_from_eden();
return; // The generation changed size already.
}
gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
}
// Delegate the resize to the generation.
_old_gen->resize(desired_free_space);
}
示例3: assert
void ASConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
assert(size_policy() != NULL, "A size policy is required");
// initialize the policy counters - 2 collectors, 3 generations
if (ParNewGeneration::in_use()) {
_gc_policy_counters = new CMSGCAdaptivePolicyCounters("ParNew:CMS", 2, 3,
size_policy());
}
else {
_gc_policy_counters = new CMSGCAdaptivePolicyCounters("Copy:CMS", 2, 3,
size_policy());
}
}
示例4: update_counters_from_policy
void GCAdaptivePolicyCounters::update_counters_from_policy() {
if (UsePerfData && (size_policy() != NULL)) {
update_avg_minor_pause_counter();
update_avg_minor_interval_counter();
#ifdef NOT_PRODUCT
update_minor_pause_counter();
#endif
update_minor_gc_cost_counter();
update_avg_young_live_counter();
update_survivor_size_counters();
update_avg_survived_avg_counters();
update_avg_survived_dev_counters();
update_avg_survived_padded_avg_counters();
update_change_old_gen_for_throughput();
update_change_young_gen_for_throughput();
update_decrease_for_footprint();
update_change_young_gen_for_min_pauses();
update_change_old_gen_for_maj_pauses();
update_minor_pause_young_slope_counter();
update_minor_collection_slope_counter();
update_major_collection_slope_counter();
}
}
示例5: assert
// Basic allocation policy. Should never be called at a safepoint, or
// from the VM thread.
//
// This method must handle cases where many mem_allocate requests fail
// simultaneously. When that happens, only one VM operation will succeed,
// and the rest will not be executed. For that reason, this method loops
// during failed allocation attempts. If the java heap becomes exhausted,
// we rely on the size_policy object to force a bail out.
HeapWord* ParallelScavengeHeap::mem_allocate(size_t size, bool is_noref, bool is_tlab) {
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
HeapWord* result;
uint loop_count = 0;
do {
result = young_gen()->allocate(size, is_noref, is_tlab);
// In some cases, the requested object will be too large to easily
// fit in the young_gen. Rather than force a safepoint and collection
// for each one, try allocation in old_gen for objects likely to fail
// allocation in eden.
if (result == NULL && size >= (young_gen()->eden_space()->capacity_in_words() / 2) && !is_tlab) {
MutexLocker ml(Heap_lock);
result = old_gen()->allocate(size, is_noref, is_tlab);
}
if (result == NULL) {
// Generate a VM operation
VM_ParallelGCFailedAllocation op(size, is_noref, is_tlab);
VMThread::execute(&op);
// Did the VM operation execute? If so, return the result directly.
// This prevents us from looping until time out on requests that can
// not be satisfied.
if (op.prologue_succeeded()) {
assert(Universe::heap()->is_in_or_null(op.result()), "result not in heap");
return op.result();
}
}
// The policy object will prevent us from looping forever. If the
// time spent in gc crosses a threshold, we will bail out.
loop_count++;
if ((QueuedAllocationWarningCount > 0) && (loop_count % QueuedAllocationWarningCount == 0)) {
warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
" size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
}
} while (result == NULL && !size_policy()->gc_time_limit_exceeded());
return result;
}
示例6: update_major_collection_slope_counter
inline void update_major_collection_slope_counter() {
_major_collection_slope_counter->set_value(
(jlong)(size_policy()->major_collection_slope() * 1000)
);
}
示例7: update_change_old_gen_for_maj_pauses
inline void update_change_old_gen_for_maj_pauses() {
_change_old_gen_for_maj_pauses_counter->set_value(
size_policy()->change_old_gen_for_maj_pauses());
}
示例8: update_decrement_tenuring_threshold_for_survivor_limit
inline void update_decrement_tenuring_threshold_for_survivor_limit() {
_decrement_tenuring_threshold_for_survivor_limit_counter->set_value(
size_policy()->decrement_tenuring_threshold_for_survivor_limit());
}
示例9: assert
//.........这里部分代码省略.........
MutexLocker ml(Heap_lock);
gc_count = total_collections();
result = young_gen()->allocate(size);
if (result != NULL) {
return result;
}
// If certain conditions hold, try allocating from the old gen.
result = mem_allocate_old_gen(size);
if (result != NULL) {
return result;
}
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
return NULL;
}
// Failed to allocate without a gc.
if (GCLocker::is_active_and_needs_gc()) {
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is
// initiated by the last thread exiting the critical section; so
// we retry the allocation sequence from the beginning of the loop,
// rather than causing more, now probably unnecessary, GC attempts.
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock);
GCLocker::stall_until_clear();
gclocker_stalled_count += 1;
continue;
} else {
if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while"
" in jni critical section");
}
return NULL;
}
}
}
if (result == NULL) {
// Generate a VM operation
VM_ParallelGCFailedAllocation op(size, gc_count);
VMThread::execute(&op);
// Did the VM operation execute? If so, return the result directly.
// This prevents us from looping until time out on requests that can
// not be satisfied.
if (op.prologue_succeeded()) {
assert(is_in_or_null(op.result()), "result not in heap");
// If GC was locked out during VM operation then retry allocation
// and/or stall as necessary.
if (op.gc_locked()) {
assert(op.result() == NULL, "must be NULL if gc_locked() is true");
continue; // retry and/or stall as necessary
}
// Exit the loop if the gc time limit has been exceeded.
// The allocation must have failed above ("result" guarding
// this path is NULL) and the most recent collection has exceeded the
// gc overhead limit (although enough may have been collected to
// satisfy the allocation). Exit the loop so that an out-of-memory
// will be thrown (return a NULL ignoring the contents of
// op.result()),
// but clear gc_overhead_limit_exceeded so that the next collection
// starts with a clean slate (i.e., forgets about previous overhead
// excesses). Fill op.result() with a filler object so that the
// heap remains parsable.
const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
if (limit_exceeded && softrefs_clear) {
*gc_overhead_limit_was_exceeded = true;
size_policy()->set_gc_overhead_limit_exceeded(false);
log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
if (op.result() != NULL) {
CollectedHeap::fill_with_object(op.result(), size);
}
return NULL;
}
return op.result();
}
}
// The policy object will prevent us from looping forever. If the
// time spent in gc crosses a threshold, we will bail out.
loop_count++;
if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
(loop_count % QueuedAllocationWarningCount == 0)) {
log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
log_warning(gc)("\tsize=" SIZE_FORMAT, size);
}
}
return result;
}
示例10: QWidget
//.........这里部分代码省略.........
const bool& threecols = m_task_shows_taskname;
// Taskname
if (m_task_shows_taskname) {
QLabel* taskname = new LabelWordWrapWide(m_p_task->shortname());
taskname->setObjectName(complete
? "task_item_taskname_complete"
: "task_item_taskname_incomplete");
QSizePolicy spTaskname(QSizePolicy::Preferred,
QSizePolicy::Preferred);
spTaskname.setHorizontalStretch(STRETCH_3COL_TASKNAME);
taskname->setSizePolicy(spTaskname);
rowlayout->addWidget(taskname);
}
// Timestamp
QLabel* timestamp = new LabelWordWrapWide(
m_p_task->whenCreated().toString(DateTime::SHORT_DATETIME_FORMAT));
timestamp->setObjectName(complete ? "task_item_timestamp_complete"
: "task_item_timestamp_incomplete");
QSizePolicy spTimestamp(QSizePolicy::Preferred,
QSizePolicy::Preferred);
spTimestamp.setHorizontalStretch(threecols ? STRETCH_3COL_TIMESTAMP
: STRETCH_2COL_TIMESTAMP);
timestamp->setSizePolicy(spTimestamp);
rowlayout->addWidget(timestamp);
// Summary
QLabel* summary = new LabelWordWrapWide(
m_p_task->summaryWithCompleteSuffix());
summary->setObjectName(complete ? "task_item_summary_complete"
: "task_item_summary_incomplete");
QSizePolicy spSummary(QSizePolicy::Preferred, QSizePolicy::Preferred);
spSummary.setHorizontalStretch(threecols ? STRETCH_3COL_SUMMARY
: STRETCH_2COL_SUMMARY);
summary->setSizePolicy(spSummary);
rowlayout->addWidget(summary);
} else {
// --------------------------------------------------------------------
// Conventional menu item
// --------------------------------------------------------------------
// Icon
if (!m_label_only) { // Labels go full-left
if (!m_icon.isEmpty()) {
QLabel* icon = UiFunc::iconWidget(m_icon, row);
rowlayout->addWidget(icon);
} else if (m_chain) {
QLabel* icon = UiFunc::iconWidget(
UiFunc::iconFilename(UiConst::ICON_CHAIN), row);
rowlayout->addWidget(icon);
} else {
rowlayout->addWidget(UiFunc::blankIcon(row));
}
}
// Title/subtitle
QVBoxLayout* textlayout = new QVBoxLayout();
QLabel* title = new LabelWordWrapWide(m_title);
title->setObjectName("menu_item_title");
textlayout->addWidget(title);
if (!m_subtitle.isEmpty()) {
QLabel* subtitle = new LabelWordWrapWide(m_subtitle);
subtitle->setObjectName("menu_item_subtitle");
textlayout->addWidget(subtitle);
}
rowlayout->addLayout(textlayout);
rowlayout->addStretch();
// Arrow on right
if (m_arrow_on_right) {
QLabel* iconLabel = UiFunc::iconWidget(
UiFunc::iconFilename(UiConst::ICON_HASCHILD),
nullptr, false);
rowlayout->addWidget(iconLabel);
}
// Background colour, via stylesheets
if (m_label_only) {
row->setObjectName("label_only");
} else if (!m_implemented) {
row->setObjectName("not_implemented");
} else if (m_unsupported) {
row->setObjectName("unsupported");
} else if (m_not_if_locked && app.locked()) {
row->setObjectName("locked");
} else if (m_needs_privilege && !app.privileged()) {
row->setObjectName("needs_privilege");
}
// ... but not for locked/needs privilege, as otherwise we'd need
// to refresh the whole menu? Well, we could try it.
// On Linux desktop, it's extremely fast.
}
// Size policy
QSizePolicy size_policy(QSizePolicy::MinimumExpanding, // horizontal
QSizePolicy::Fixed); // vertical
row->setSizePolicy(size_policy);
return row;
}
示例11: assert
//.........这里部分代码省略.........
}
if (!is_tlab &&
size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
result = old_gen()->allocate(size, is_tlab);
if (result != NULL) {
return result;
}
}
if (GC_locker::is_active_and_needs_gc()) {
// GC is locked out. If this is a TLAB allocation,
// return NULL; the requestor will retry allocation
// of an idividual object at a time.
if (is_tlab) {
return NULL;
}
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is
// initiated by the last thread exiting the critical section; so
// we retry the allocation sequence from the beginning of the loop,
// rather than causing more, now probably unnecessary, GC attempts.
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock);
GC_locker::stall_until_clear();
continue;
} else {
if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while"
" in jni critical section");
}
return NULL;
}
}
}
if (result == NULL) {
// Generate a VM operation
VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
VMThread::execute(&op);
// Did the VM operation execute? If so, return the result directly.
// This prevents us from looping until time out on requests that can
// not be satisfied.
if (op.prologue_succeeded()) {
assert(Universe::heap()->is_in_or_null(op.result()),
"result not in heap");
// If GC was locked out during VM operation then retry allocation
// and/or stall as necessary.
if (op.gc_locked()) {
assert(op.result() == NULL, "must be NULL if gc_locked() is true");
continue; // retry and/or stall as necessary
}
// Exit the loop if the gc time limit has been exceeded.
// The allocation must have failed above ("result" guarding
// this path is NULL) and the most recent collection has exceeded the
// gc overhead limit (although enough may have been collected to
// satisfy the allocation). Exit the loop so that an out-of-memory
// will be thrown (return a NULL ignoring the contents of
// op.result()),
// but clear gc_overhead_limit_exceeded so that the next collection
// starts with a clean slate (i.e., forgets about previous overhead
// excesses). Fill op.result() with a filler object so that the
// heap remains parsable.
const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
if (limit_exceeded && softrefs_clear) {
*gc_overhead_limit_was_exceeded = true;
size_policy()->set_gc_overhead_limit_exceeded(false);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
"return NULL because gc_overhead_limit_exceeded is set");
}
if (op.result() != NULL) {
CollectedHeap::fill_with_object(op.result(), size);
}
return NULL;
}
return op.result();
}
}
// The policy object will prevent us from looping forever. If the
// time spent in gc crosses a threshold, we will bail out.
loop_count++;
if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
(loop_count % QueuedAllocationWarningCount == 0)) {
warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
" size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
}
}
return result;
}
示例12: update_avg_minor_pause_counter
inline void update_avg_minor_pause_counter() {
_avg_minor_pause_counter->set_value((jlong)
(size_policy()->avg_minor_pause()->average() * 1000.0));
}
示例13: update_decrease_for_footprint
inline void update_decrease_for_footprint() {
_decrease_for_footprint_counter->set_value(
size_policy()->decrease_for_footprint());
}
示例14: update_change_young_gen_for_throughput
inline void update_change_young_gen_for_throughput() {
_change_young_gen_for_throughput_counter->set_value(
size_policy()->change_young_gen_for_throughput());
}
示例15: update_avg_survived_padded_avg_counters
inline void update_avg_survived_padded_avg_counters() {
_avg_survived_padded_avg_counter->set_value(
(jlong)(size_policy()->_avg_survived->padded_average())
);
}