当前位置: 首页>>代码示例>>C++>>正文


C++ GenCollectedHeap类代码示例

本文整理汇总了C++中GenCollectedHeap的典型用法代码示例。如果您正苦于以下问题:C++ GenCollectedHeap类的具体用法?C++ GenCollectedHeap怎么用?C++ GenCollectedHeap使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了GenCollectedHeap类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: assert

inline void ParScanClosure::do_oop_work(T* p,
                                        bool gc_barrier,
                                        bool root_scan) {
  assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
          generation()->is_in_reserved(p))
         && (GenCollectedHeap::heap()->is_young_gen(generation()) || gc_barrier),
         "The gen must be right, and we must be doing the barrier "
         "in older generations.");
  T heap_oop = oopDesc::load_heap_oop(p);
  if (!oopDesc::is_null(heap_oop)) {
    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
    if ((HeapWord*)obj < _boundary) {
#ifndef PRODUCT
      if (_g->to()->is_in_reserved(obj)) {
        tty->print_cr("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
        GenCollectedHeap* gch = GenCollectedHeap::heap();
        Space* sp = gch->space_containing(p);
        oop obj = oop(sp->block_start(p));
        assert((HeapWord*)obj < (HeapWord*)p, "Error");
        tty->print_cr("Object: " PTR_FORMAT, p2i((void *)obj));
        tty->print_cr("-------");
        obj->print();
        tty->print_cr("-----");
        tty->print_cr("Heap:");
        tty->print_cr("-----");
        gch->print();
        ShouldNotReachHere();
      }
#endif
      // OK, we need to ensure that it is copied.
      // We read the klass and mark in this order, so that we can reliably
      // get the size of the object: if the mark we read is not a
      // forwarding pointer, then the klass is valid: the klass is only
      // overwritten with an overflow next pointer after the object is
      // forwarded.
      Klass* objK = obj->klass();
      markOop m = obj->mark();
      oop new_obj;
      if (m->is_marked()) { // Contains forwarding pointer.
        new_obj = ParNewGeneration::real_forwardee(obj);
        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
        log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
                                        "forwarded ",
                                        new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
      } else {
        size_t obj_sz = obj->size_given_klass(objK);
        new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
        if (root_scan) {
          // This may have pushed an object.  If we have a root
          // category with a lot of roots, can't let the queue get too
          // full:
          (void)_par_scan_state->trim_queues(10 * ParallelGCThreads);
        }
      }
      if (is_scanning_a_klass()) {
        do_klass_barrier();
      } else if (gc_barrier) {
        // Now call parent closure
        par_do_barrier(p);
      }
    }
  }
}
开发者ID:mohlerm,项目名称:hotspot_cached_profiles,代码行数:64,代码来源:parOopClosures.inline.hpp

示例2: assert

void ASParNewGeneration::resize_spaces(size_t requested_eden_size,
                                       size_t requested_survivor_size) {
  assert(UseAdaptiveSizePolicy, "sanity check");
  assert(requested_eden_size > 0  && requested_survivor_size > 0,
         "just checking");
  CollectedHeap* heap = Universe::heap();
  assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Sanity");


  // We require eden and to space to be empty
  if ((!eden()->is_empty()) || (!to()->is_empty())) {
    return;
  }

  size_t cur_eden_size = eden()->capacity();

  if (PrintAdaptiveSizePolicy && Verbose) {
    gclog_or_tty->print_cr("ASParNew::resize_spaces(requested_eden_size: "
                  SIZE_FORMAT
                  ", requested_survivor_size: " SIZE_FORMAT ")",
                  requested_eden_size, requested_survivor_size);
    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
                  SIZE_FORMAT,
                  eden()->bottom(),
                  eden()->end(),
                  pointer_delta(eden()->end(),
                                eden()->bottom(),
                                sizeof(char)));
    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
                  SIZE_FORMAT,
                  from()->bottom(),
                  from()->end(),
                  pointer_delta(from()->end(),
                                from()->bottom(),
                                sizeof(char)));
    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
                  SIZE_FORMAT,
                  to()->bottom(),
                  to()->end(),
                  pointer_delta(  to()->end(),
                                  to()->bottom(),
                                  sizeof(char)));
  }

  // There's nothing to do if the new sizes are the same as the current
  if (requested_survivor_size == to()->capacity() &&
      requested_survivor_size == from()->capacity() &&
      requested_eden_size == eden()->capacity()) {
    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("    capacities are the right sizes, returning");
    }
    return;
  }

  char* eden_start = (char*)eden()->bottom();
  char* eden_end   = (char*)eden()->end();
  char* from_start = (char*)from()->bottom();
  char* from_end   = (char*)from()->end();
  char* to_start   = (char*)to()->bottom();
  char* to_end     = (char*)to()->end();

  const size_t alignment = os::vm_page_size();
  const bool maintain_minimum =
    (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();

  // Check whether from space is below to space
  if (from_start < to_start) {
    // Eden, from, to
    if (PrintAdaptiveSizePolicy && Verbose) {
      gclog_or_tty->print_cr("  Eden, from, to:");
    }

    // Set eden
    // "requested_eden_size" is a goal for the size of eden
    // and may not be attainable.  "eden_size" below is
    // calculated based on the location of from-space and
    // the goal for the size of eden.  from-space is
    // fixed in place because it contains live data.
    // The calculation is done this way to avoid 32bit
    // overflow (i.e., eden_start + requested_eden_size
    // may too large for representation in 32bits).
    size_t eden_size;
    if (maintain_minimum) {
      // Only make eden larger than the requested size if
      // the minimum size of the generation has to be maintained.
      // This could be done in general but policy at a higher
      // level is determining a requested size for eden and that
      // should be honored unless there is a fundamental reason.
      eden_size = pointer_delta(from_start,
                                eden_start,
                                sizeof(char));
    } else {
      eden_size = MIN2(requested_eden_size,
                       pointer_delta(from_start, eden_start, sizeof(char)));
    }

    eden_size = align_size_down(eden_size, alignment);
    eden_end = eden_start + eden_size;
    assert(eden_end >= eden_start, "addition overflowed")

//.........这里部分代码省略.........
开发者ID:,项目名称:,代码行数:101,代码来源:

示例3: guarantee

void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) {
  guarantee(level == 1, "We always collect both old and young.");
  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");

  GenCollectedHeap* gch = GenCollectedHeap::heap();
#ifdef ASSERT
  if (gch->collector_policy()->should_clear_all_soft_refs()) {
    assert(clear_all_softrefs, "Policy should have been checked earlier");
  }
#endif

  // hook up weak ref data so it can be used during Mark-Sweep
  assert(ref_processor() == NULL, "no stomping");
  assert(rp != NULL, "should be non-NULL");
  _ref_processor = rp;
  rp->setup_policy(clear_all_softrefs);

  GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id());

  gch->trace_heap_before_gc(_gc_tracer);

  // When collecting the permanent generation Method*s may be moving,
  // so we either have to flush all bcp data or convert it into bci.
  CodeCache::gc_prologue();
  Threads::gc_prologue();

  // Increment the invocation count
  _total_invocations++;

  // Capture heap size before collection for printing.
  size_t gch_prev_used = gch->used();

  // Capture used regions for each generation that will be
  // subject to collection, so that card table adjustments can
  // be made intelligently (see clear / invalidate further below).
  gch->save_used_regions(level);

  allocate_stacks();

  mark_sweep_phase1(level, clear_all_softrefs);

  mark_sweep_phase2();

  // Don't add any more derived pointers during phase3
  COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
  COMPILER2_PRESENT(DerivedPointerTable::set_active(false));

  mark_sweep_phase3(level);

  mark_sweep_phase4();

  restore_marks();

  // Set saved marks for allocation profiler (and other things? -- dld)
  // (Should this be in general part?)
  gch->save_marks();

  deallocate_stacks();

  // If compaction completely evacuated all generations younger than this
  // one, then we can clear the card table.  Otherwise, we must invalidate
  // it (consider all cards dirty).  In the future, we might consider doing
  // compaction within generations only, and doing card-table sliding.
  bool all_empty = true;
  for (int i = 0; all_empty && i < level; i++) {
    Generation* g = gch->get_gen(i);
    all_empty = all_empty && gch->get_gen(i)->used() == 0;
  }
  GenRemSet* rs = gch->rem_set();
  Generation* old_gen = gch->get_gen(level);
  // Clear/invalidate below make use of the "prev_used_regions" saved earlier.
  if (all_empty) {
    // We've evacuated all generations below us.
    rs->clear_into_younger(old_gen);
  } else {
    // Invalidate the cards corresponding to the currently used
    // region and clear those corresponding to the evacuated region.
    rs->invalidate_or_clear(old_gen);
  }

  Threads::gc_epilogue();
  CodeCache::gc_epilogue();
  JvmtiExport::gc_epilogue();

  if (PrintGC && !PrintGCDetails) {
    gch->print_heap_change(gch_prev_used);
  }

  // refs processing: clean slate
  _ref_processor = NULL;

  // Update heap occupancy information which is used as
  // input to soft ref clearing policy at the next gc.
  Universe::update_heap_info_at_gc();

  // Update time of last gc for all generations we collected
  // (which curently is all the generations in the heap).
  // We need to use a monotonically non-deccreasing time in ms
  // or we will see time-warp warnings and os::javaTimeMillis()
  // does not guarantee monotonicity.
//.........这里部分代码省略.........
开发者ID:MyProgrammingStyle,项目名称:hotspot,代码行数:101,代码来源:genMarkSweep.cpp

示例4: mem_allocate_work

HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
                                        bool is_tlab,
                                        bool* gc_overhead_limit_was_exceeded) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();

  debug_only(gch->check_for_valid_allocation_state());
  assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
  HeapWord* result = NULL;

  // Loop until the allocation is satisified,
  // or unsatisfied after GC.
  for (int try_count = 1; /* return or throw */; try_count += 1) {
    HandleMark hm; // discard any handles allocated in each iteration

    // First allocation attempt is lock-free.
    Generation *gen0 = gch->get_gen(0);
    assert(gen0->supports_inline_contig_alloc(),
      "Otherwise, must do alloc within heap lock");
    if (gen0->should_allocate(size, is_tlab)) {
      result = gen0->par_allocate(size, is_tlab);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }
    }
    unsigned int gc_count_before;  // read inside the Heap_lock locked region
    {
      MutexLocker ml(Heap_lock);
      if (PrintGC && Verbose) {
        gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
                      " attempting locked slow path allocation");
      }
      // Note that only large objects get a shot at being
      // allocated in later generations.
      bool first_only = ! should_try_older_generation_allocation(size);

      result = gch->attempt_allocation(size, is_tlab, first_only);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }

      // There are NULL's returned for different circumstances below.
      // In general gc_overhead_limit_was_exceeded should be false so
      // set it so here and reset it to true only if the gc time
      // limit is being exceeded as checked below.
      *gc_overhead_limit_was_exceeded = false;

      if (GC_locker::is_active_and_needs_gc()) {
        if (is_tlab) {
          return NULL;  // Caller will retry allocating individual object
        }
        if (!gch->is_maximal_no_gc()) {
          // Try and expand heap to satisfy request
          result = expand_heap_and_allocate(size, is_tlab);
          // result could be null if we are out of space
          if (result != NULL) {
            return result;
          }
        }

        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
        if (!jthr->in_critical()) {
          MutexUnlocker mul(Heap_lock);
          // Wait for JNI critical section to be exited
          GC_locker::stall_until_clear();
          continue;
        } else {
          if (CheckJNICalls) {
            fatal("Possible deadlock due to allocating while"
                  " in jni critical section");
          }
          return NULL;
        }
      }

      // Read the gc count while the heap lock is held.
      gc_count_before = Universe::heap()->total_collections();
    }

    // Allocation has failed and a collection is about
    // to be done.  If the gc time limit was exceeded the
    // last time a collection was done, return NULL so
    // that an out-of-memory will be thrown.  Clear
    // gc_time_limit_exceeded so that subsequent attempts
    // at a collection will be made.
    if (size_policy()->gc_time_limit_exceeded()) {
      *gc_overhead_limit_was_exceeded = true;
      size_policy()->set_gc_time_limit_exceeded(false);
      return NULL;
    }

    VM_GenCollectForAllocation op(size,
                                  is_tlab,
//.........这里部分代码省略.........
开发者ID:AllenWeb,项目名称:openjdk-1,代码行数:101,代码来源:collectorPolicy.cpp

示例5: satisfy_failed_allocation

HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
                                                        bool   is_tlab) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();
  GCCauseSetter x(gch, GCCause::_allocation_failure);
  HeapWord* result = NULL;

  assert(size != 0, "Precondition violated");
  if (GC_locker::is_active_and_needs_gc()) {
    // GC locker is active; instead of a collection we will attempt
    // to expand the heap, if there's room for expansion.
    if (!gch->is_maximal_no_gc()) {
      result = expand_heap_and_allocate(size, is_tlab);
    }
    return result;   // could be null if we are out of space
  } else if (!gch->incremental_collection_will_fail()) {
    // The gc_prologues have not executed yet.  The value
    // for incremental_collection_will_fail() is the remanent
    // of the last collection.
    // Do an incremental collection.
    gch->do_collection(false            /* full */,
                       false            /* clear_all_soft_refs */,
                       size             /* size */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */);
  } else {
    // Try a full collection; see delta for bug id 6266275
    // for the original code and why this has been simplified
    // with from-space allocation criteria modified and
    // such allocation moved out of the safepoint path.
    gch->do_collection(true             /* full */,
                       false            /* clear_all_soft_refs */,
                       size             /* size */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */);
  }

  result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);

  if (result != NULL) {
    assert(gch->is_in_reserved(result), "result not in heap");
    return result;
  }

  // OK, collection failed, try expansion.
  result = expand_heap_and_allocate(size, is_tlab);
  if (result != NULL) {
    return result;
  }

  // If we reach this point, we're really out of memory. Try every trick
  // we can to reclaim memory. Force collection of soft references. Force
  // a complete compaction of the heap. Any additional methods for finding
  // free memory should be here, especially if they are expensive. If this
  // attempt fails, an OOM exception will be thrown.
  {
    IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted

    gch->do_collection(true             /* full */,
                       true             /* clear_all_soft_refs */,
                       size             /* size */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */);
  }

  result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
  if (result != NULL) {
    assert(gch->is_in_reserved(result), "result not in heap");
    return result;
  }

  // What else?  We might try synchronous finalization later.  If the total
  // space available is large enough for the allocation, then a more
  // complete compaction phase than we've tried so far might be
  // appropriate.
  return NULL;
}
开发者ID:AllenWeb,项目名称:openjdk-1,代码行数:76,代码来源:collectorPolicy.cpp

示例6: mem_allocate_work

HeapWord* TwoGenerationCollectorPolicy::mem_allocate_work(size_t size,
                                                          bool is_large_noref,
                                                          bool is_tlab) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();

  debug_only(gch->check_for_valid_allocation_state());
  assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
  HeapWord* result = NULL;

  // Loop until the allocation is satisified,
  // or unsatisfied after GC.
  for (int try_count = 1; /* return or throw */; try_count += 1) {

    // First allocation attempt is lock-free.
    Generation *gen0 = gch->get_gen(0);
    assert(gen0->supports_inline_contig_alloc(),
      "Otherwise, must do alloc within heap lock");
    if (gen0->should_allocate(size, is_large_noref, is_tlab)) {
      result = gen0->par_allocate(size, is_large_noref, is_tlab);
      if (result != NULL) {
        assert(gch->is_in(result), "result not in heap");
        return result;
      }
    }
    int gc_count_before;  // read inside the Heap_lock locked region
    {
      MutexLocker ml(Heap_lock);
      if (PrintGC && Verbose) {
        gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
                      " attempting locked slow path allocation");
      }
      // Note that only large objects get a shot at being
      // allocated in later generations.  If jvmpi slow allocation
      // is enabled, allocate in later generations (since the
      // first generation is always full.
      bool first_only = ! should_try_older_generation_allocation(size);

      result = gch->attempt_allocation(size, 
                                       is_large_noref, 
                                       is_tlab, 
                                       first_only);
      if (result != NULL) {
        assert(gch->is_in(result), "result not in heap");
        return result;
      }

      // Read the gc count while the heap lock is held.
      gc_count_before = Universe::heap()->total_collections();
    }
      
    VM_GenCollectForAllocation op(size,
                                  is_large_noref,
                                  is_tlab,
                                  gc_count_before);
    VMThread::execute(&op);
    if (op.prologue_succeeded()) {
      result = op.result();
      assert(result == NULL || gch->is_in(result), "result not in heap");
      return result;
    }

    // Give a warning if we seem to be looping forever.
    if ((QueuedAllocationWarningCount > 0) &&
        (try_count % QueuedAllocationWarningCount == 0)) {
          warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
		  " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
    }
  }
}
开发者ID:subxiang,项目名称:jdk-source-code,代码行数:69,代码来源:collectorPolicy.cpp

示例7: satisfy_failed_allocation

HeapWord* TwoGenerationCollectorPolicy::satisfy_failed_allocation(size_t size,
                                                                  bool   is_large_noref,
                                                                  bool   is_tlab,
                                                                  bool*  notify_ref_lock) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();
  GCCauseSetter x(gch, GCCause::_allocation_failure);
  HeapWord* result = NULL;
  
  // The gc_prologues have not executed yet.  The value
  // for incremental_collection_will_fail() is the remanent 
  // of the last collection.
  if (!gch->incremental_collection_will_fail()) {
    // Do an incremental collection.
    gch->do_collection(false            /* full */,
                       false            /* clear_all_soft_refs */,
                       size             /* size */,
                       is_large_noref   /* is_large_noref */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */,
                       notify_ref_lock  /* notify_ref_lock */);
  } else {
    // The incremental_collection_will_fail flag is set if the
    // next incremental collection will not succeed (e.g., the
    // DefNewGeneration didn't think it had space to promote all
    // its objects). However, that last incremental collection
    // continued, allowing all older generations to collect (and
    // perhaps change the state of the flag).
    // 
    // If we reach here, we know that an incremental collection of
    // all generations left us in the state where incremental collections
    // will fail, so we just try allocating the requested space. 
    // If the allocation fails everywhere, force a full collection.
    // We're probably very close to being out of memory, so forcing many
    // collections now probably won't help.
    if (PrintGC && Verbose) {
      gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::satisfy_failed_allocation:"
                    " attempting allocation anywhere before full collection");
    }
    result = gch->attempt_allocation(size, 
                                     is_large_noref, 
                                     is_tlab, 
                                     false /* first_only */);
    if (result != NULL) {
      assert(gch->is_in(result), "result not in heap");
      return result;
    }

    // Allocation request hasn't yet been met; try a full collection.
    gch->do_collection(true             /* full */, 
                       false            /* clear_all_soft_refs */, 
                       size             /* size */, 
                       is_large_noref   /* is_large_noref */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */, 
                       notify_ref_lock  /* notify_ref_lock */);
  }
  
  result = gch->attempt_allocation(size, is_large_noref, is_tlab, false /*first_only*/);
  
  if (result != NULL) {
    assert(gch->is_in(result), "result not in heap");
    return result;
  }
  
  // OK, collection failed, try expansion.
  for (int i = number_of_generations() - 1 ; i>= 0; i--) {
    Generation *gen = gch->get_gen(i);
    if (gen->should_allocate(size, is_large_noref, is_tlab)) {
      result = gen->expand_and_allocate(size, is_large_noref, is_tlab);
      if (result != NULL) {
        assert(gch->is_in(result), "result not in heap");
        return result;
      }
    }
  }
  
  // If we reach this point, we're really out of memory. Try every trick
  // we can to reclaim memory. Force collection of soft references. Force
  // a complete compaction of the heap. Any additional methods for finding
  // free memory should be here, especially if they are expensive. If this
  // attempt fails, an OOM exception will be thrown.
  {
    IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted

    gch->do_collection(true             /* full */,
                       true             /* clear_all_soft_refs */,
                       size             /* size */,
                       is_large_noref   /* is_large_noref */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */,
                       notify_ref_lock  /* notify_ref_lock */);
  }

  result = gch->attempt_allocation(size, is_large_noref, is_tlab, false /* first_only */);
  if (result != NULL) {
    assert(gch->is_in(result), "result not in heap");
    return result;
  }
  
  // What else?  We might try synchronous finalization later.  If the total
//.........这里部分代码省略.........
开发者ID:subxiang,项目名称:jdk-source-code,代码行数:101,代码来源:collectorPolicy.cpp

示例8: assert

void DefNewGeneration::collect(bool   full,
                               bool   clear_all_soft_refs,
			       size_t size,
                               bool   is_large_noref,
                               bool   is_tlab) {
  assert(full || size > 0, "otherwise we don't want to collect");
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  _next_gen = gch->next_gen(this);
  assert(_next_gen != NULL, 
    "This must be the youngest gen, and not the only gen");
  // If the next generation is too full to accomodate worst-case promotion
  // from this generation, pass on collection; let the next generation
  // do it.
  if (!full_promotion_would_succeed()) {
    gch->set_incremental_collection_will_fail();
    if (PrintGC && Verbose) {
      gclog_or_tty->print_cr("DefNewGeneration::collect"
                    " contiguous_available: " SIZE_FORMAT " < used: " SIZE_FORMAT,
                    _next_gen->max_contiguous_available(), used());
    }
    return;
  }

  TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
  // Capture heap used before collection (for printing).
  size_t gch_prev_used = gch->used();

  SpecializationStats::clear();

  // These can be shared for all code paths
  IsAliveClosure is_alive(this);
  ScanWeakRefClosure scan_weak_ref(this);

  age_table()->clear();
  to()->clear();

  gch->rem_set()->prepare_for_younger_refs_iterate(false);

  assert(gch->no_allocs_since_save_marks(0), 
	 "save marks have not been newly set.");

  // Weak refs.
  // FIXME: Are these storage leaks, or are they resource objects?
  NOT_COMPILER2(ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy());
  COMPILER2_ONLY(ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy());
      
  // Not very pretty.
  CollectorPolicy* cp = gch->collector_policy();
  if (!cp->is_train_policy()) {
    FastScanClosure fsc_with_no_gc_barrier(this, false);
    FastScanClosure fsc_with_gc_barrier(this, true);

    FastEvacuateFollowersClosure evacuate_followers(gch, _level,
                                                    &fsc_with_no_gc_barrier, 
                                                    &fsc_with_gc_barrier);
    
    assert(gch->no_allocs_since_save_marks(0), 
           "save marks have not been newly set.");
    
    gch->process_strong_roots(_level,
                              true, // Process younger gens, if any, as
                              // strong roots.
                              false,// not collecting permanent generation.
                              GenCollectedHeap::CSO_AllClasses,
                              &fsc_with_gc_barrier, 
                              &fsc_with_no_gc_barrier);
    
    // "evacuate followers".
    evacuate_followers.do_void();
    
    FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
    ref_processor()->process_discovered_references(soft_ref_policy,
                                                   &is_alive,
                                                   &keep_alive,
                                                   &evacuate_followers);
  } else { // Train policy
    ScanClosure sc_with_no_gc_barrier(this, false);
    ScanClosure sc_with_gc_barrier(this, true);
    
    EvacuateFollowersClosure evacuate_followers(gch, _level,
                                                &sc_with_no_gc_barrier, 
                                                &sc_with_gc_barrier);
    
    gch->process_strong_roots(_level,
                              true, // Process younger gens, if any, as
                              // strong roots.
                              false,// not collecting perm generation.
                              GenCollectedHeap::CSO_AllClasses,
                              &sc_with_gc_barrier, 
                              &sc_with_no_gc_barrier);
    
    // "evacuate followers".
    evacuate_followers.do_void();

    TrainPolicyKeepAliveClosure keep_alive((TrainGeneration*)_next_gen, 
                                           &scan_weak_ref);
    ref_processor()->process_discovered_references(soft_ref_policy,
                                                   &is_alive,
                                                   &keep_alive,
                                                   &evacuate_followers);
//.........这里部分代码省略.........
开发者ID:fatman2021,项目名称:myforthprocessor,代码行数:101,代码来源:defNewGeneration.cpp

示例9: mem_allocate_work

HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
                                        bool is_tlab,
                                        bool* gc_overhead_limit_was_exceeded) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();

  debug_only(gch->check_for_valid_allocation_state());
  assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");

  // In general gc_overhead_limit_was_exceeded should be false so
  // set it so here and reset it to true only if the gc time
  // limit is being exceeded as checked below.
  *gc_overhead_limit_was_exceeded = false;

  HeapWord* result = NULL;

  // Loop until the allocation is satisfied, or unsatisfied after GC.
  for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
    HandleMark hm; // Discard any handles allocated in each iteration.

    // First allocation attempt is lock-free.
    Generation *young = gch->young_gen();
    assert(young->supports_inline_contig_alloc(),
      "Otherwise, must do alloc within heap lock");
    if (young->should_allocate(size, is_tlab)) {
      result = young->par_allocate(size, is_tlab);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }
    }
    uint gc_count_before;  // Read inside the Heap_lock locked region.
    {
      MutexLocker ml(Heap_lock);
      log_trace(gc, alloc)("GenCollectorPolicy::mem_allocate_work: attempting locked slow path allocation");
      // Note that only large objects get a shot at being
      // allocated in later generations.
      bool first_only = ! should_try_older_generation_allocation(size);

      result = gch->attempt_allocation(size, is_tlab, first_only);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }

      if (GCLocker::is_active_and_needs_gc()) {
        if (is_tlab) {
          return NULL;  // Caller will retry allocating individual object.
        }
        if (!gch->is_maximal_no_gc()) {
          // Try and expand heap to satisfy request.
          result = expand_heap_and_allocate(size, is_tlab);
          // Result could be null if we are out of space.
          if (result != NULL) {
            return result;
          }
        }

        if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
          return NULL; // We didn't get to do a GC and we didn't get any memory.
        }

        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
        if (!jthr->in_critical()) {
          MutexUnlocker mul(Heap_lock);
          // Wait for JNI critical section to be exited
          GCLocker::stall_until_clear();
          gclocker_stalled_count += 1;
          continue;
        } else {
          if (CheckJNICalls) {
            fatal("Possible deadlock due to allocating while"
                  " in jni critical section");
          }
          return NULL;
        }
      }

      // Read the gc count while the heap lock is held.
      gc_count_before = gch->total_collections();
    }

    VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
    VMThread::execute(&op);
    if (op.prologue_succeeded()) {
      result = op.result();
      if (op.gc_locked()) {
         assert(result == NULL, "must be NULL if gc_locked() is true");
         continue;  // Retry and/or stall as necessary.
      }

      // Allocation has failed and a collection
      // has been done.  If the gc time limit was exceeded the
      // this time, return NULL so that an out-of-memory
      // will be thrown.  Clear gc_overhead_limit_exceeded
//.........这里部分代码省略.........
开发者ID:netroby,项目名称:jdk9-dev,代码行数:101,代码来源:collectorPolicy.cpp

示例10: satisfy_failed_allocation

/**
 * 处理上层应用线程(Java级线程)的一次内存申请失败
 * 	1. Gc类型选择
 * 		1).如果Gc操作已被触发但还无法被执行,则放弃本次Gc操作
 * 		2).如果执行增量式安全,则执行一次MinorGc
 * 		3).只能执行一次Full Gc
 * 	2. 从年青代-老年代依次尝试分配内存块
 * 	3. 从老年代-年青代依次扩展内存容量尝试分配内存块
 * 	4. 执行一次彻底的Full Gc(清理所有的软引用)
 * 	5. 从年青代-老年代依次尝试分配内存块
 */
HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
                                                        bool   is_tlab) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();
  GCCauseSetter x(gch, GCCause::_allocation_failure);
  HeapWord* result = NULL;

  assert(size != 0, "Precondition violated");

  if (GC_locker::is_active_and_needs_gc()) {	//Gc操作已被触发但还无法被执行

    if (!gch->is_maximal_no_gc()) { // 当前有内存代允许扩展内存容量,则试图通过扩展内存代的容量来分配内存块
      result = expand_heap_and_allocate(size, is_tlab);
    }

    return result;   // could be null if we are out of space

  } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { //如果当前增量式可行,则只触发一个Minor Gc
    //增量式GC
    gch->do_collection(false            /* full */,
                       false            /* clear_all_soft_refs */,
                       size             /* size */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */);
  } else {	//执行一次Full Gc
    if (Verbose && PrintGCDetails) {
      gclog_or_tty->print(" :: Trying full because partial may fail :: ");
    }
    // Try a full collection; see delta for bug id 6266275
    // for the original code and why this has been simplified
    // with from-space allocation criteria modified and
    // such allocation moved out of the safepoint path.
    gch->do_collection(true             /* full */,
                       false            /* clear_all_soft_refs */,
                       size             /* size */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */);
  }

  //执行一次Gc之后,再次从内存堆的各个内存代中依次分配指定大小的内存块
  result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);

  if (result != NULL) {
    assert(gch->is_in_reserved(result), "result not in heap");
    return result;
  }

  //执行一次Gc之后可能有剩余的空间来扩展各内存代的容量,
  //所以再次尝试通过允许扩展内存代容量的方式来试图分配指定大小的内存块
  result = expand_heap_and_allocate(size, is_tlab);
  if (result != NULL) {
    return result;
  }

  // If we reach this point, we're really out of memory. Try every trick
  // we can to reclaim memory. Force collection of soft references. Force
  // a complete compaction of the heap. Any additional methods for finding
  // free memory should be here, especially if they are expensive. If this
  // attempt fails, an OOM exception will be thrown.
  {
    IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted

    //最后再进行一次彻底的Gc: 回收所有的内存代+清除软引用
    gch->do_collection(true             /* full */,
                       true             /* clear_all_soft_refs */,
                       size             /* size */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */);
  }

  //经过一次彻底的Gc之后,最后一次尝试依次从各内存代分配指定大小的内存块
  result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
  if (result != NULL) {
    assert(gch->is_in_reserved(result), "result not in heap");
    return result;
  }

  assert(!should_clear_all_soft_refs(), "Flag should have been handled and cleared prior to this point");

  // What else?  We might try synchronous finalization later.  If the total
  // space available is large enough for the allocation, then a more
  // complete compaction phase than we've tried so far might be
  // appropriate.
  return NULL;
}
开发者ID:GregBowyer,项目名称:Hotspot,代码行数:95,代码来源:collectorPolicy.cpp

示例11: mem_allocate_work

/**
 * 分配指定大小的内存空间,基于内存分代的分配策略(轮寻式):
 * 		1.(无锁式)年青代快速分配
 * 		2.(加锁式)
 * 		   1).抢占内存堆全局锁
 * 		   2).如果请求的内存大小>年青代内存容量 || Gc被触发但无法被执行 || 增量式Gc会失败, 则依次尝试从年青代-老年代分配内存
 * 		      否则,只从年青代分配内存
 * 		   3).如果Gc被触发但目前还无法被执行:
 * 		   		a).如果某一内存代还可扩展其内存容量,则依次从老年代-年青代尝试扩展内存分配
 * 		   		b).释放内存堆全局锁,并等待Gc被执行完成
 * 		   4).释放内存堆全局锁,触发一次GC操作请求,并等待其被执行或放弃
 * 		   5).如果Gc被放弃或由于Gc锁被禁止执行,则回到1
 * 		   6).如果Gc超时,返回NULL,否则返回分配的内存块
 *
 *
 * @param size 申请的内存空间大小
 * @param is_tlab 	false: 从内存堆中分配内存空间
 * 					true: 从当前线程的本地分配缓冲区中分配内存空间
 * @gc_overhead_limit_was_exceeded Full Gc是否超时
 *
 */
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, bool is_tlab,
		bool* gc_overhead_limit_was_exceeded) {

  GenCollectedHeap *gch = GenCollectedHeap::heap();

  debug_only(gch->check_for_valid_allocation_state());

  //确保当前JVM没有正在进行GC
  assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");

  // In general gc_overhead_limit_was_exceeded should be false so
  // set it so here and reset it to true only if the gc time
  // limit is being exceeded as checked below.
  *gc_overhead_limit_was_exceeded = false;

  HeapWord* result = NULL;

  // Loop until the allocation is satisified,
  // or unsatisfied after GC.
  for (int try_count = 1; /* return or throw */; try_count += 1) {
    HandleMark hm; // discard any handles allocated in each iteration

    //年青代必须支持无锁并发方式的内存分配
    Generation *gen0 = gch->get_gen(0);
    assert(gen0->supports_inline_contig_alloc(), "Otherwise, must do alloc within heap lock");

    //当前是否应该优先考虑从年青代分配内存
    if (gen0->should_allocate(size, is_tlab)) {
      //试图从年青代快速分配内存块
      result = gen0->par_allocate(size, is_tlab);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }
    }

    unsigned int gc_count_before;  // read inside the Heap_lock locked region
    {
      MutexLocker ml(Heap_lock);
      if (PrintGC && Verbose) {
        gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
                      " attempting locked slow path allocation");
      }

      // Note that only large objects get a shot at being
      // allocated in later generations.
      //当前是否应该只在年青代分配内存
      bool first_only = ! should_try_older_generation_allocation(size);

      //依次尝试从内存堆的各内存代中分配内存空间
      result = gch->attempt_allocation(size, is_tlab, first_only);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }

      if (GC_locker::is_active_and_needs_gc()) {	//当前其它线程已经触发了Gc
        if (is_tlab) {
          //当前线程是为本地分配缓冲区申请内存(进而再从本地分配缓冲区为对象分配内存),则返回NULL,
          //以让其直接从内存代中为对象申请内存
          return NULL;
        }

        if (!gch->is_maximal_no_gc()) {	//内存堆中的某一个内存代允许扩展其大小
          //在允许扩展内存代大小的情况下尝试从内存堆的各内存代中分配内存空间
          result = expand_heap_and_allocate(size, is_tlab);
          // result could be null if we are out of space
          if (result != NULL) {
            return result;
          }
        }

        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
//.........这里部分代码省略.........
开发者ID:GregBowyer,项目名称:Hotspot,代码行数:101,代码来源:collectorPolicy.cpp

示例12: assert

void DefNewGeneration::collect(bool   full,
                               bool   clear_all_soft_refs,
                               size_t size,
                               bool   is_tlab) {
  assert(full || size > 0, "otherwise we don't want to collect");

  GenCollectedHeap* gch = GenCollectedHeap::heap();

  _gc_timer->register_gc_start();
  DefNewTracer gc_tracer;
  gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());

  _next_gen = gch->next_gen(this);

  // If the next generation is too full to accommodate promotion
  // from this generation, pass on collection; let the next generation
  // do it.
  if (!collection_attempt_is_safe()) {
    if (Verbose && PrintGCDetails) {
      gclog_or_tty->print(" :: Collection attempt not safe :: ");
    }
    gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
    return;
  }
  assert(to()->is_empty(), "Else not collection_attempt_is_safe");

  init_assuming_no_promotion_failure();

  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
  // Capture heap used before collection (for printing).
  size_t gch_prev_used = gch->used();

  gch->trace_heap_before_gc(&gc_tracer);

  SpecializationStats::clear();

  // These can be shared for all code paths
  IsAliveClosure is_alive(this);
  ScanWeakRefClosure scan_weak_ref(this);

  age_table()->clear();
  to()->clear(SpaceDecorator::Mangle);

  gch->rem_set()->prepare_for_younger_refs_iterate(false);

  assert(gch->no_allocs_since_save_marks(0),
         "save marks have not been newly set.");

  // Not very pretty.
  CollectorPolicy* cp = gch->collector_policy();

  FastScanClosure fsc_with_no_gc_barrier(this, false);
  FastScanClosure fsc_with_gc_barrier(this, true);

  KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
                                      gch->rem_set()->klass_rem_set());

  set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
  FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
                                                  &fsc_with_no_gc_barrier,
                                                  &fsc_with_gc_barrier);

  assert(gch->no_allocs_since_save_marks(0),
         "save marks have not been newly set.");

  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;

  gch->gen_process_strong_roots(_level,
                                true,  // Process younger gens, if any,
                                       // as strong roots.
                                true,  // activate StrongRootsScope
                                true,  // is scavenging
                                SharedHeap::ScanningOption(so),
                                &fsc_with_no_gc_barrier,
                                true,   // walk *all* scavengable nmethods
                                &fsc_with_gc_barrier,
                                &klass_scan_closure);

  // "evacuate followers".
  evacuate_followers.do_void();

  FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
  ReferenceProcessor* rp = ref_processor();
  rp->setup_policy(clear_all_soft_refs);
  const ReferenceProcessorStats& stats =
  rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
                                    NULL, _gc_timer);
  gc_tracer.report_gc_reference_stats(stats);

  if (!_promotion_failed) {
    // Swap the survivor spaces.
    eden()->clear(SpaceDecorator::Mangle);
    from()->clear(SpaceDecorator::Mangle);
    if (ZapUnusedHeapArea) {
      // This is now done here because of the piece-meal mangling which
      // can check for valid mangling at intermediate points in the
      // collection(s).  When a minor collection fails to collect
      // sufficient space resizing of the young generation can occur
      // an redistribute the spaces in the young generation.  Mangle
      // here so that unzapped regions don't get distributed to
//.........这里部分代码省略.........
开发者ID:BunnyWei,项目名称:truffle-llvmir,代码行数:101,代码来源:defNewGeneration.cpp

示例13: x

HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
                                                        bool   is_tlab) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();
  GCCauseSetter x(gch, GCCause::_allocation_failure);
  HeapWord* result = NULL;

  assert(size != 0, "Precondition violated");
  if (GC_locker::is_active_and_needs_gc()) {
    // GC locker is active; instead of a collection we will attempt
    // to expand the heap, if there's room for expansion.
    if (!gch->is_maximal_no_gc()) {
      result = expand_heap_and_allocate(size, is_tlab);
    }
    return result;   // Could be null if we are out of space.
  } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
    // Do an incremental collection.
    gch->do_collection(false,                     // full
                       false,                     // clear_all_soft_refs
                       size,                      // size
                       is_tlab,                   // is_tlab
                       GenCollectedHeap::OldGen); // max_generation
  } else {
    if (Verbose && PrintGCDetails) {
      gclog_or_tty->print(" :: Trying full because partial may fail :: ");
    }
    // Try a full collection; see delta for bug id 6266275
    // for the original code and why this has been simplified
    // with from-space allocation criteria modified and
    // such allocation moved out of the safepoint path.
    gch->do_collection(true,                      // full
                       false,                     // clear_all_soft_refs
                       size,                      // size
                       is_tlab,                   // is_tlab
                       GenCollectedHeap::OldGen); // max_generation
  }

  result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);

  if (result != NULL) {
    assert(gch->is_in_reserved(result), "result not in heap");
    return result;
  }

  // OK, collection failed, try expansion.
  result = expand_heap_and_allocate(size, is_tlab);
  if (result != NULL) {
    return result;
  }

  // If we reach this point, we're really out of memory. Try every trick
  // we can to reclaim memory. Force collection of soft references. Force
  // a complete compaction of the heap. Any additional methods for finding
  // free memory should be here, especially if they are expensive. If this
  // attempt fails, an OOM exception will be thrown.
  {
    UIntXFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted

    gch->do_collection(true,                      // full
                       true,                      // clear_all_soft_refs
                       size,                      // size
                       is_tlab,                   // is_tlab
                       GenCollectedHeap::OldGen); // max_generation
  }

  result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
  if (result != NULL) {
    assert(gch->is_in_reserved(result), "result not in heap");
    return result;
  }

  assert(!should_clear_all_soft_refs(),
    "Flag should have been handled and cleared prior to this point");

  // What else?  We might try synchronous finalization later.  If the total
  // space available is large enough for the allocation, then a more
  // complete compaction phase than we've tried so far might be
  // appropriate.
  return NULL;
}
开发者ID:gaoxiaojun,项目名称:dync,代码行数:79,代码来源:collectorPolicy.cpp


注:本文中的GenCollectedHeap类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。