本文整理汇总了C++中SGEN_LOG函数的典型用法代码示例。如果您正苦于以下问题:C++ SGEN_LOG函数的具体用法?C++ SGEN_LOG怎么用?C++ SGEN_LOG使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SGEN_LOG函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mono_gc_make_descr_for_object
/*
* Descriptor builders.
*/
SgenDescriptor
mono_gc_make_descr_for_object (gsize *bitmap, int numbits, size_t obj_size)
{
int first_set = -1, num_set = 0, last_set = -1, i;
SgenDescriptor desc = 0;
size_t stored_size = SGEN_ALIGN_UP (obj_size);
for (i = 0; i < numbits; ++i) {
if (bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
if (first_set < 0)
first_set = i;
last_set = i;
num_set++;
}
}
if (first_set < 0) {
SGEN_LOG (6, "Ptrfree descriptor %p, size: %zd", (void*)desc, stored_size);
if (stored_size <= MAX_RUNLEN_OBJECT_SIZE && stored_size <= SGEN_MAX_SMALL_OBJ_SIZE)
return DESC_TYPE_SMALL_PTRFREE | stored_size;
return DESC_TYPE_COMPLEX_PTRFREE;
}
g_assert (!(stored_size & 0x7));
SGEN_ASSERT (5, stored_size == SGEN_ALIGN_UP (stored_size), "Size is not aligned");
/* we know the 2-word header is ptr-free */
if (last_set < BITMAP_NUM_BITS + OBJECT_HEADER_WORDS && stored_size <= SGEN_MAX_SMALL_OBJ_SIZE) {
desc = DESC_TYPE_BITMAP | ((*bitmap >> OBJECT_HEADER_WORDS) << LOW_TYPE_BITS);
SGEN_LOG (6, "Largebitmap descriptor %p, size: %zd, last set: %d", (void*)desc, stored_size, last_set);
return desc;
}
示例2: class_kind
static MonoGCBridgeObjectKind
class_kind (MonoClass *klass)
{
MonoGCBridgeObjectKind res = bridge_callbacks.bridge_class_kind (klass);
/* If it's a bridge, nothing we can do about it. */
if (res == GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS || res == GC_BRIDGE_OPAQUE_BRIDGE_CLASS)
return res;
/* Non bridge classes with no pointers will never point to a bridge, so we can savely ignore them. */
if (!klass->has_references) {
SGEN_LOG (6, "class %s is opaque\n", klass->name);
return GC_BRIDGE_OPAQUE_CLASS;
}
/* Some arrays can be ignored */
if (klass->rank == 1) {
MonoClass *elem_class = klass->element_class;
/* FIXME the bridge check can be quite expensive, cache it at the class level. */
/* An array of a sealed type that is not a bridge will never get to a bridge */
if ((elem_class->flags & TYPE_ATTRIBUTE_SEALED) && !elem_class->has_references && !bridge_callbacks.bridge_class_kind (elem_class)) {
SGEN_LOG (6, "class %s is opaque\n", klass->name);
return GC_BRIDGE_OPAQUE_CLASS;
}
}
return GC_BRIDGE_TRANSPARENT_CLASS;
}
示例3: sgen_stop_world
/* LOCKING: assumes the GC lock is held */
int
sgen_stop_world (int generation)
{
int count, dead;
mono_profiler_gc_event (MONO_GC_EVENT_PRE_STOP_WORLD, generation);
MONO_GC_WORLD_STOP_BEGIN ();
acquire_gc_locks ();
/* We start to scan after locks are taking, this ensures we won't be interrupted. */
sgen_process_togglerefs ();
update_current_thread_stack (&count);
sgen_global_stop_count++;
SGEN_LOG (3, "stopping world n %d from %p %p", sgen_global_stop_count, mono_thread_info_current (), (gpointer)mono_native_thread_id_get ());
TV_GETTIME (stop_world_time);
count = sgen_thread_handshake (TRUE);
dead = restart_threads_until_none_in_managed_allocator ();
if (count < dead)
g_error ("More threads have died (%d) that been initialy suspended %d", dead, count);
count -= dead;
SGEN_LOG (3, "world stopped %d thread(s)", count);
mono_profiler_gc_event (MONO_GC_EVENT_POST_STOP_WORLD, generation);
MONO_GC_WORLD_STOP_END ();
sgen_memgov_collection_start (generation);
sgen_bridge_reset_data ();
return count;
}
示例4: sgen_client_clear_togglerefs
void sgen_client_clear_togglerefs (char *start, char *end, ScanCopyContext ctx)
{
CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
SgenGrayQueue *queue = ctx.queue;
int i;
SGEN_LOG (4, "Clearing ToggleRefs %d", toggleref_array_size);
for (i = 0; i < toggleref_array_size; ++i) {
if (toggleref_array [i].weak_ref) {
GCObject *object = toggleref_array [i].weak_ref;
if ((char*)object >= start && (char*)object < end) {
if (sgen_gc_is_object_ready_for_finalization (object)) {
SGEN_LOG (6, "\tcleaning weak slot %d", i);
toggleref_array [i].weak_ref = NULL; /* We defer compaction to only happen on the callback step. */
} else {
SGEN_LOG (6, "\tkeeping weak slot %d", i);
copy_func (&toggleref_array [i].weak_ref, queue);
}
}
}
}
sgen_drain_gray_stack (ctx);
}
示例5: sgen_client_stop_world
/* LOCKING: assumes the GC lock is held */
void
sgen_client_stop_world (int generation)
{
TV_DECLARE (end_handshake);
/* notify the profiler of the leftovers */
/* FIXME this is the wrong spot at we can STW for non collection reasons. */
if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_GC_MOVES))
mono_sgen_gc_event_moves ();
acquire_gc_locks ();
mono_profiler_gc_event (MONO_GC_EVENT_PRE_STOP_WORLD_LOCKED, generation);
/* We start to scan after locks are taking, this ensures we won't be interrupted. */
sgen_process_togglerefs ();
update_current_thread_stack (&generation);
sgen_global_stop_count++;
SGEN_LOG (3, "stopping world n %d from %p %p", sgen_global_stop_count, mono_thread_info_current (), (gpointer) (gsize) mono_native_thread_id_get ());
TV_GETTIME (stop_world_time);
sgen_unified_suspend_stop_world ();
SGEN_LOG (3, "world stopped");
TV_GETTIME (end_handshake);
time_stop_world += TV_ELAPSED (stop_world_time, end_handshake);
sgen_memgov_collection_start (generation);
if (sgen_need_bridge_processing ())
sgen_bridge_reset_data ();
}
示例6: sgen_find_section_pin_queue_start_end
void
sgen_find_section_pin_queue_start_end (GCMemSection *section)
{
SGEN_LOG (6, "Pinning from section %p (%p-%p)", section, section->data, section->end_data);
section->pin_queue_start = sgen_find_optimized_pin_queue_area (section->data, section->end_data, §ion->pin_queue_num_entries);
SGEN_LOG (6, "Found %d pinning addresses in section %p", section->pin_queue_num_entries, section);
}
示例7: sgen_find_section_pin_queue_start_end
void
sgen_find_section_pin_queue_start_end (GCMemSection *section)
{
SGEN_LOG (6, "Pinning from section %p (%p-%p)", section, section->data, section->end_data);
sgen_find_optimized_pin_queue_area (section->data, section->end_data,
§ion->pin_queue_first_entry, §ion->pin_queue_last_entry);
SGEN_LOG (6, "Found %zd pinning addresses in section %p",
section->pin_queue_last_entry - section->pin_queue_first_entry, section);
}
示例8: sgen_memgov_calculate_minor_collection_allowance
static void
sgen_memgov_calculate_minor_collection_allowance (void)
{
size_t new_major, new_heap_size, allowance_target, allowance;
size_t decrease;
if (!need_calculate_minor_collection_allowance)
return;
SGEN_ASSERT (0, major_collector.have_swept (), "Can only calculate allowance if heap is swept");
new_major = major_collector.get_bytes_survived_last_sweep ();
new_heap_size = new_major + last_collection_los_memory_usage;
/*
* We allow the heap to grow by one third its current size before we start the next
* major collection.
*/
allowance_target = new_heap_size * SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO;
allowance = MAX (allowance_target, MIN_MINOR_COLLECTION_ALLOWANCE);
/*
* For the concurrent collector, we decrease the allowance relative to the memory
* growth during the M&S phase, survival rate of the collection and the allowance
* ratio.
*/
decrease = (major_pre_sweep_heap_size - major_start_heap_size) * ((float)new_heap_size / major_pre_sweep_heap_size) * (SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO + 1);
if (decrease > allowance)
decrease = allowance;
allowance -= decrease;
if (new_heap_size + allowance > soft_heap_limit) {
if (new_heap_size > soft_heap_limit)
allowance = MIN_MINOR_COLLECTION_ALLOWANCE;
else
allowance = MAX (soft_heap_limit - new_heap_size, MIN_MINOR_COLLECTION_ALLOWANCE);
}
/* FIXME: Why is this here? */
if (major_collector.free_swept_blocks)
major_collector.free_swept_blocks (allowance);
major_collection_trigger_size = new_heap_size + allowance;
need_calculate_minor_collection_allowance = FALSE;
if (debug_print_allowance) {
SGEN_LOG (0, "Surviving sweep: %ld bytes (%ld major, %ld LOS)", (long)new_heap_size, (long)new_major, (long)last_collection_los_memory_usage);
SGEN_LOG (0, "Allowance: %ld bytes", (long)allowance);
SGEN_LOG (0, "Trigger size: %ld bytes", (long)major_collection_trigger_size);
}
}
示例9: sgen_process_togglerefs
void
sgen_process_togglerefs (void)
{
int i, w;
int toggle_ref_counts [3] = { 0, 0, 0 };
SGEN_LOG (4, "Proccessing ToggleRefs %d", toggleref_array_size);
for (i = w = 0; i < toggleref_array_size; ++i) {
int res;
MonoGCToggleRef r = toggleref_array [i];
MonoObject *obj;
if (r.strong_ref)
obj = r.strong_ref;
else if (r.weak_ref)
obj = r.weak_ref;
else
continue;
res = toggleref_callback (obj);
++toggle_ref_counts [res];
switch (res) {
case MONO_TOGGLE_REF_DROP:
break;
case MONO_TOGGLE_REF_STRONG:
toggleref_array [w].strong_ref = obj;
toggleref_array [w].weak_ref = NULL;
++w;
break;
case MONO_TOGGLE_REF_WEAK:
toggleref_array [w].strong_ref = NULL;
toggleref_array [w].weak_ref = obj;
++w;
break;
default:
g_assert_not_reached ();
}
}
toggleref_array_size = w;
SGEN_LOG (4, "Done Proccessing ToggleRefs dropped %d strong %d weak %d final size %d",
toggle_ref_counts [MONO_TOGGLE_REF_DROP],
toggle_ref_counts [MONO_TOGGLE_REF_STRONG],
toggle_ref_counts [MONO_TOGGLE_REF_WEAK],
w);
}
示例10: mono_gc_alloc_pinned_obj
/*
* To be used for interned strings and possibly MonoThread, reflection handles.
* We may want to explicitly free these objects.
*/
void*
mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size)
{
void **p;
if (!SGEN_CAN_ALIGN_UP (size))
return NULL;
size = ALIGN_UP (size);
LOCK_GC;
if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
/* large objects are always pinned anyway */
p = sgen_los_alloc_large_inner (vtable, size);
} else {
SGEN_ASSERT (9, vtable->klass->inited, "class %s:%s is not initialized", vtable->klass->name_space, vtable->klass->name);
p = major_collector.alloc_small_pinned_obj (vtable, size, SGEN_VTABLE_HAS_REFERENCES (vtable));
}
if (G_LIKELY (p)) {
SGEN_LOG (6, "Allocated pinned object %p, vtable: %p (%s), size: %zd", p, vtable, vtable->klass->name, size);
if (size > SGEN_MAX_SMALL_OBJ_SIZE)
MONO_GC_MAJOR_OBJ_ALLOC_LARGE ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
else
MONO_GC_MAJOR_OBJ_ALLOC_PINNED ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
binary_protocol_alloc_pinned (p, vtable, size);
}
UNLOCK_GC;
return p;
}
示例11: sgen_thread_handshake
int
sgen_thread_handshake (BOOL suspend)
{
int count, result;
SgenThreadInfo *info;
int signum = suspend ? suspend_signal_num : restart_signal_num;
MonoNativeThreadId me = mono_native_thread_id_get ();
count = 0;
mono_thread_info_current ()->client_info.suspend_done = TRUE;
FOREACH_THREAD_SAFE (info) {
if (mono_native_thread_id_equals (mono_thread_info_get_tid (info), me)) {
continue;
}
info->client_info.suspend_done = FALSE;
if (info->client_info.gc_disabled)
continue;
/*if (signum == suspend_signal_num && info->stop_count == global_stop_count)
continue;*/
result = mono_threads_pthread_kill (info, signum);
if (result == 0) {
count++;
} else {
info->client_info.skip = 1;
}
} END_FOREACH_THREAD_SAFE
sgen_wait_for_suspend_ack (count);
SGEN_LOG (4, "%s handshake for %d threads\n", suspend ? "suspend" : "resume", count);
return count;
}
示例12: add_nursery_frag
/*
* We found a fragment of free memory in the nursery: memzero it and if
* it is big enough, add it to the list of fragments that can be used for
* allocation.
*/
static void
add_nursery_frag (SgenFragmentAllocator *allocator, size_t frag_size, char* frag_start, char* frag_end)
{
SGEN_LOG (4, "Found empty fragment: %p-%p, size: %zd", frag_start, frag_end, frag_size);
binary_protocol_empty (frag_start, frag_size);
/* Not worth dealing with smaller fragments: need to tune */
if (frag_size >= SGEN_MAX_NURSERY_WASTE) {
/* memsetting just the first chunk start is bound to provide better cache locality */
if (sgen_get_nursery_clear_policy () == CLEAR_AT_GC)
memset (frag_start, 0, frag_size);
else if (sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG)
memset (frag_start, 0xff, frag_size);
#ifdef NALLOC_DEBUG
/* XXX convert this into a flight record entry
printf ("\tfragment [%p %p] size %zd\n", frag_start, frag_end, frag_size);
*/
#endif
sgen_fragment_allocator_add (allocator, frag_start, frag_end);
fragment_total += frag_size;
} else {
/* Clear unused fragments, pinning depends on this */
sgen_clear_range (frag_start, frag_end);
HEAVY_STAT (stat_wasted_bytes_small_areas += frag_size);
}
}
示例13: sgen_restart_world
/* LOCKING: assumes the GC lock is held */
int
sgen_restart_world (int generation, GGTimingInfo *timing)
{
int count;
SgenThreadInfo *info;
TV_DECLARE (end_sw);
TV_DECLARE (end_bridge);
unsigned long usec, bridge_usec;
/* notify the profiler of the leftovers */
/* FIXME this is the wrong spot at we can STW for non collection reasons. */
if (G_UNLIKELY (mono_profiler_events & MONO_PROFILE_GC_MOVES))
sgen_gc_event_moves ();
mono_profiler_gc_event (MONO_GC_EVENT_PRE_START_WORLD, generation);
MONO_GC_WORLD_RESTART_BEGIN (generation);
FOREACH_THREAD (info) {
info->stack_start = NULL;
#ifdef USE_MONO_CTX
memset (&info->ctx, 0, sizeof (MonoContext));
#else
memset (&info->regs, 0, sizeof (info->regs));
#endif
} END_FOREACH_THREAD
count = sgen_thread_handshake (FALSE);
TV_GETTIME (end_sw);
usec = TV_ELAPSED (stop_world_time, end_sw);
max_pause_usec = MAX (usec, max_pause_usec);
SGEN_LOG (2, "restarted %d thread(s) (pause time: %d usec, max: %d)", count, (int)usec, (int)max_pause_usec);
mono_profiler_gc_event (MONO_GC_EVENT_POST_START_WORLD, generation);
MONO_GC_WORLD_RESTART_END (generation);
/*
* We must release the thread info suspend lock after doing
* the thread handshake. Otherwise, if the GC stops the world
* and a thread is in the process of starting up, but has not
* yet registered (it's not in the thread_list), it is
* possible that the thread does register while the world is
* stopped. When restarting the GC will then try to restart
* said thread, but since it never got the suspend signal, it
* cannot answer the restart signal, so a deadlock results.
*/
release_gc_locks ();
sgen_try_free_some_memory = TRUE;
sgen_bridge_processing_finish (generation);
TV_GETTIME (end_bridge);
bridge_usec = TV_ELAPSED (end_sw, end_bridge);
if (timing) {
timing [0].stw_time = usec;
timing [0].bridge_time = bridge_usec;
}
sgen_memgov_collection_end (generation, timing, timing ? 2 : 0);
return count;
}
示例14: sgen_alloc_obj_pinned
/*
* To be used for interned strings and possibly MonoThread, reflection handles.
* We may want to explicitly free these objects.
*/
GCObject*
sgen_alloc_obj_pinned (GCVTable vtable, size_t size)
{
GCObject *p;
if (!SGEN_CAN_ALIGN_UP (size))
return NULL;
size = ALIGN_UP (size);
LOCK_GC;
if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
/* large objects are always pinned anyway */
p = (GCObject *)sgen_los_alloc_large_inner (vtable, size);
} else {
SGEN_ASSERT (9, sgen_client_vtable_is_inited (vtable), "class %s:%s is not initialized", sgen_client_vtable_get_namespace (vtable), sgen_client_vtable_get_name (vtable));
p = sgen_major_collector.alloc_small_pinned_obj (vtable, size, SGEN_VTABLE_HAS_REFERENCES (vtable));
}
if (G_LIKELY (p)) {
SGEN_LOG (6, "Allocated pinned object %p, vtable: %p (%s), size: %zd", p, vtable, sgen_client_vtable_get_name (vtable), size);
sgen_binary_protocol_alloc_pinned (p, vtable, size, sgen_client_get_provenance ());
}
UNLOCK_GC;
return p;
}
示例15: alloc_complex_descriptor
static int
alloc_complex_descriptor (gsize *bitmap, int numbits)
{
int nwords, res, i;
numbits = ALIGN_TO (numbits, GC_BITS_PER_WORD);
nwords = numbits / GC_BITS_PER_WORD + 1;
sgen_gc_lock ();
res = complex_descriptors_next;
/* linear search, so we don't have duplicates with domain load/unload
* this should not be performance critical or we'd have bigger issues
* (the number and size of complex descriptors should be small).
*/
for (i = 0; i < complex_descriptors_next; ) {
if (complex_descriptors [i] == nwords) {
int j, found = TRUE;
for (j = 0; j < nwords - 1; ++j) {
if (complex_descriptors [i + 1 + j] != bitmap [j]) {
found = FALSE;
break;
}
}
if (found) {
sgen_gc_unlock ();
return i;
}
}
i += (int)complex_descriptors [i];
}
if (complex_descriptors_next + nwords > complex_descriptors_size) {
int new_size = complex_descriptors_size * 2 + nwords;
complex_descriptors = (gsize *)g_realloc (complex_descriptors, new_size * sizeof (gsize));
complex_descriptors_size = new_size;
}
SGEN_LOG (6, "Complex descriptor %d, size: %d (total desc memory: %d)", res, nwords, complex_descriptors_size);
complex_descriptors_next += nwords;
complex_descriptors [res] = nwords;
for (i = 0; i < nwords - 1; ++i) {
complex_descriptors [res + 1 + i] = bitmap [i];
SGEN_LOG (6, "\tvalue: %p", (void*)complex_descriptors [res + 1 + i]);
}
sgen_gc_unlock ();
return res;
}