You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@harmony.apache.org by Sian January <si...@googlemail.com> on 2008/10/29 10:22:15 UTC

Re: svn commit: r708756 [1/3] - in /harmony/enhanced/drlvm/trunk/vm: gc_gen/build/ gc_gen/src/common/ gc_gen/src/finalizer_weakref/ gc_gen/src/gen/ gc_gen/src/los/ gc_gen/src/mark_sweep/ gc_gen/src/thread/ gc_gen/src/trace_forward/ gc_gen/src/verify/

Hi Xiao-Feng,

This commit looks like quite a large new feature to me.  Since we're
in feature freeze this week for M8 I really think it should be backed
out until after the milestone, as we should be focussing on testing
and stability at the moment.

Thanks,

Sian


2008/10/29  <xl...@apache.org>:
> Author: xli
> Date: Tue Oct 28 20:01:01 2008
> New Revision: 708756
>
> URL: http://svn.apache.org/viewvc?rev=708756&view=rev
> Log:
> HARMONY-5989 : Concurrent GC (Tick) enhancement in scheduling
>
> Added:
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.cpp   (with props)
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.h   (with props)
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_concurrent_mark.cpp   (with props)
> Removed:
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h
> Modified:
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.h
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp
>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp
>    harmony/enhanced/drlvm/trunk/vm/include/open/gc.h
>    harmony/enhanced/drlvm/trunk/vm/vmcore/build/vmcore.exp
>    harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp
>    harmony/enhanced/drlvm/trunk/vm/vmcore/src/object/vm_arrays.cpp
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp Tue Oct 28 20:01:01 2008
> @@ -34,6 +34,7 @@
>     gc_heap_write_global_slot;
>     gc_heap_write_ref;
>     gc_heap_wrote_object;
> +    gc_heap_copy_object_array;
>     gc_init;
>     gc_is_object_pinned;
>     gc_iterate_heap;
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp Tue Oct 28 20:01:01 2008
> @@ -31,24 +31,16 @@
>   return;
>  }
>
> -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
> -{
> -  if(gc_is_specify_con_gc()){
> -    gc_update_con_collection_scheduler(gc, time_mutator, time_collection);
> -  }
> -  return;
> -}
>
>  Boolean gc_sched_collection(GC* gc, unsigned int gc_cause)
>  {
>   /*collection scheduler only schedules concurrent collection now.*/
>   if(GC_CAUSE_CONCURRENT_GC == gc_cause){
>     assert(gc_is_specify_con_gc());
> -    return gc_sched_con_collection(gc, gc_cause);
> +    return gc_con_perform_collection( gc );
>   }else{
>     return FALSE;
>   }
>  }
>
>
> -
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h Tue Oct 28 20:01:01 2008
> @@ -26,12 +26,8 @@
>  void collection_scheduler_initialize(GC* gc);
>  void collection_scheduler_destruct(GC* gc);
>
> -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
>  Boolean gc_sched_collection(GC* gc, unsigned int gc_cause);
>
>  #endif
>
>
> -
> -
> -
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp Tue Oct 28 20:01:01 2008
> @@ -22,7 +22,7 @@
>  #include "collection_scheduler.h"
>  #include "concurrent_collection_scheduler.h"
>  #include "gc_concurrent.h"
> -#include "../thread/marker.h"
> +#include "../thread/conclctor.h"
>  #include "../verify/verify_live_heap.h"
>
>  #define NUM_TRIAL_COLLECTION 2
> @@ -53,6 +53,7 @@
>  Boolean gc_use_space_scheduler()
>  { return cc_scheduler_kind & SPACE_BASED_SCHEDULER; }
>
> +
>  static int64 time_delay_to_start_mark = MAX_DELAY_TIME;
>  static POINTER_SIZE_INT space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
>
> @@ -75,6 +76,7 @@
>   STD_FREE(gc->collection_scheduler);
>  }
>
> +
>  void gc_decide_cc_scheduler_kind(char* cc_scheduler)
>  {
>   string_to_upper(cc_scheduler);
> @@ -93,281 +95,248 @@
>   gc_enable_time_scheduler();
>  }
>
> -static Boolean time_to_start_mark(GC* gc)
> -{
> -  if(!gc_use_time_scheduler()) return FALSE;
> -
> -  int64 time_current = time_now();
> -  return (time_current - get_collection_end_time()) > time_delay_to_start_mark;
> -}
> -
> -static Boolean space_to_start_mark(GC* gc)
> -{
> -  if(!gc_use_space_scheduler()) return FALSE;
> +/*====================== new scheduler ===================*/
> +extern unsigned int NUM_CON_MARKERS;
> +extern unsigned int NUM_CON_SWEEPERS;
> +unsigned int gc_get_mutator_number(GC *gc);
> +
> +#define MOSTLY_CON_MARKER_DIVISION 0.5
> +unsigned int mostly_con_final_marker_num=1;
> +unsigned int mostly_con_long_marker_num=1;
> +
> +unsigned int gc_get_marker_number(GC* gc) {
> +  unsigned int mutator_num = gc_get_mutator_number(gc);
> +  unsigned int marker_specified = NUM_CON_MARKERS;
> +  if(marker_specified == 0) {
> +    if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
> +       marker_specified = min(gc->num_conclctors, mutator_num>>1);
> +       INFO2("gc.con.scheduler", "[Marker Num] mutator num="<<mutator_num<<", assign marker num="<<marker_specified);
> +    } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
> +       marker_specified = min(gc->num_conclctors, mutator_num>>1);
> +       mostly_con_final_marker_num = max(marker_specified, mostly_con_final_marker_num); // in the STW phase, so all the conclctor can be used
> +       mostly_con_long_marker_num = (unsigned int)(marker_specified*MOSTLY_CON_MARKER_DIVISION);
> +       //INFO2("gc.con.scheduler", "[Marker Num] common marker="<<marker_specified<<", final marker="<<mostly_con_final_marker_num);
> +    }
> +  }
>
> -  POINTER_SIZE_INT size_new_obj = gc_get_new_object_size(gc,FALSE);
> -  return (size_new_obj > space_threshold_to_start_mark);
> +  assert(marker_specified);
> +  return marker_specified;
>  }
>
> -static Boolean gc_need_start_con_mark(GC* gc)
> -{
> -  if(!gc_is_specify_con_mark() || gc_mark_is_concurrent()) return FALSE;
> -
> -  if(time_to_start_mark(gc) || space_to_start_mark(gc))
> -    return TRUE;
> -  else
> -    return FALSE;
> +#define CON_SWEEPER_DIVISION 0.8
> +unsigned int gc_get_sweeper_numer(GC *gc) {
> +  unsigned int sweeper_specified = NUM_CON_SWEEPERS;
> +  if(sweeper_specified == 0)
> +    sweeper_specified = (unsigned int)(gc->num_conclctors*CON_SWEEPER_DIVISION);
> +  //INFO2("gc.con.scheduler", "[Sweeper Num] assign sweeper num="<<sweeper_specified);
> +  assert(sweeper_specified);
> +  return sweeper_specified;
>  }
>
> -static Boolean gc_need_start_con_sweep(GC* gc)
> -{
> -  if(!gc_is_specify_con_sweep() || gc_sweep_is_concurrent()) return FALSE;
>
> -  /*if mark is concurrent and STW GC has not started, we should start concurrent sweep*/
> -  if(gc_mark_is_concurrent() && !gc_con_is_in_marking(gc))
> -    return TRUE;
> -  else
> -    return FALSE;
> -}
>
> -static Boolean gc_need_reset_after_con_collect(GC* gc)
> -{
> -  if(gc_sweep_is_concurrent() && !gc_con_is_in_sweeping(gc))
> -    return TRUE;
> -  else
> -    return FALSE;
> -}
>
> -static Boolean gc_need_start_con_enum(GC* gc)
> -{
> -  /*TODO: support on-the-fly root set enumeration.*/
> -  return FALSE;
> -}
> +#define DEFAULT_CONSERCATIVE_FACTOR (1.0f)
> +#define CONSERCATIVE_FACTOR_FULLY_CONCURRENT (0.95f)
> +static float conservative_factor = DEFAULT_CONSERCATIVE_FACTOR;
>
> -#define SPACE_UTIL_RATIO_CORRETION 0.2f
> -#define TIME_CORRECTION_OTF_MARK 0.65f
> -#define TIME_CORRECTION_OTF_MARK_SWEEP 1.0f
> -#define TIME_CORRECTION_MOSTLY_MARK 0.5f
> -
> -static void con_collection_scheduler_update_stat(GC* gc, int64 time_mutator, int64 time_collection)
> -{
> -  Space* space = NULL;
> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
> -
> -#ifdef USE_UNIQUE_MARK_SWEEP_GC
> -  space = (Space*) gc_get_wspace(gc);
> -#endif
> -  if(!space) return;
> +/* for checking heap effcient*/
> +#define SMALL_DELTA 1000 //minimal check frequency is about delta us
> +#define SPACE_CHECK_STAGE_TWO_TIME (SMALL_DELTA<<6)
> +#define SPACE_CHECK_STAGE_ONE_TIME (SMALL_DELTA<<12)
>
> -  Space_Statistics* space_stat = space->space_statistic;
> -
> -  unsigned int slot_index = cc_scheduler->last_window_index;
> -  unsigned int num_slot   = cc_scheduler->num_window_slots;
> -
> -  cc_scheduler->trace_load_window[slot_index] = space_stat->num_live_obj;
> -  cc_scheduler->alloc_load_window[slot_index] = space_stat->size_new_obj;
> -  cc_scheduler->space_utilization_ratio[slot_index] = space_stat->space_utilization_ratio;
> +#define DEFAULT_ALLOC_RATE (1<<19) //500k/ms
> +#define DEFAULT_MARKING_TIME (1<<9) //512 ms
>
> -  cc_scheduler->last_mutator_time = time_mutator;
> -  cc_scheduler->last_collector_time = time_collection;
> -
> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
> -    return;
> -
> -  cc_scheduler->alloc_rate_window[slot_index]
> -    = time_mutator == 0 ? 0 : (float)cc_scheduler->alloc_load_window[slot_index] / time_mutator;
> +static int64 last_check_time_point = time_now();
> +static int64 check_delay_time = time_now(); //  initial value is just for modifying
>
> -  if(gc_mark_is_concurrent()){
> -    cc_scheduler->trace_rate_window[slot_index]
> -      = time_collection == 0 ? MAX_TRACING_RATE : (float)cc_scheduler->trace_load_window[slot_index] / time_collection;
> -  }else{
> -    cc_scheduler->trace_rate_window[slot_index] = MIN_TRACING_RATE;
> -  }
> -
> -  cc_scheduler->num_window_slots = num_slot >= STAT_SAMPLE_WINDOW_SIZE ? num_slot : (++num_slot);
> -  cc_scheduler->last_window_index = (++slot_index)% STAT_SAMPLE_WINDOW_SIZE;
> +//just debugging
> +int64 get_last_check_point()
> +{
> +   return last_check_time_point;
>  }
>
> -static void con_collection_scheduler_update_start_point(GC* gc, int64 time_mutator, int64 time_collection)
> -{
> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
> -    return;
> +static unsigned int alloc_space_threshold = 0;
>
> -  Space* space = NULL;
> -#ifdef USE_UNIQUE_MARK_SWEEP_GC
> -  space = (Space*) gc_get_wspace(gc);
> -#endif
> -  if(!space) return;
> -
> -  Space_Statistics* space_stat = space->space_statistic;
> -
> -  float sum_alloc_rate = 0;
> -  float sum_trace_rate = 0;
> -  float sum_space_util_ratio = 0;
> +static unsigned int space_check_stage_1; //SPACE_CHECK_EXPECTED_START_TIME
> +static unsigned int space_check_stage_2; //BIG_DELTA
>
> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
> +static unsigned int calculate_start_con_space_threshold(Con_Collection_Statistics *con_collection_stat, unsigned int heap_size)
> +{
>
> -  int64 time_this_collection_correction = 0;
> -#if 0
> -  float space_util_ratio = space_stat->space_utilization_ratio;
> -  if(space_util_ratio > (1-SPACE_UTIL_RATIO_CORRETION)){
> -    time_this_collection_correction = 0;
> -  }else{
> -    time_this_collection_correction
> -      = (int64)(((1 - space_util_ratio - SPACE_UTIL_RATIO_CORRETION)/(space_util_ratio))* time_mutator);
> -  }
> -#endif
> -
> -  unsigned int i;
> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
> -    sum_alloc_rate += cc_scheduler->alloc_rate_window[i];
> -    sum_trace_rate += cc_scheduler->trace_rate_window[i];
> -    sum_space_util_ratio += cc_scheduler->space_utilization_ratio[i];
> -  }
> -
> -  TRACE2("gc.con.cs","Allocation Rate: ");
> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
> -    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->alloc_rate_window[i]);
> -  }
> -
> -  TRACE2("gc.con.cs","Tracing Rate: ");
> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
> -    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->trace_rate_window[i]);
> -  }
> -
> -  float average_alloc_rate = sum_alloc_rate / cc_scheduler->num_window_slots;
> -  float average_trace_rate = sum_trace_rate / cc_scheduler->num_window_slots;
> -  float average_space_util_ratio = sum_space_util_ratio / cc_scheduler->num_window_slots;
> -
> -  TRACE2("gc.con.cs","averAllocRate: "<<average_alloc_rate<<"averTraceRate: "<<average_trace_rate<<"  average_space_util_ratio: "<<average_space_util_ratio<<" ");
> -
> -  if(average_alloc_rate == 0 ){
> -    time_delay_to_start_mark = MIN_DELAY_TIME;
> -    space_threshold_to_start_mark = MIN_SPACE_THRESHOLD;
> -  }else if(average_trace_rate == 0){
> -    time_delay_to_start_mark = MAX_DELAY_TIME;
> -    space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
> -  }else{
> -    float time_alloc_expected = (space_stat->size_free_space * average_space_util_ratio) / average_alloc_rate;
> -    float time_trace_expected = space_stat->num_live_obj / average_trace_rate;
> -    TRACE2("gc.con.cs","[GC][Con] expected alloc time "<<time_alloc_expected<<"  expected collect time  "<<time_trace_expected<<" ");
> -
> -    if(time_alloc_expected > time_trace_expected){
> -      if(gc_is_kind(ALGO_CON_OTF_OBJ)||gc_is_kind(ALGO_CON_OTF_REF)){
> -        float time_correction = gc_sweep_is_concurrent()? TIME_CORRECTION_OTF_MARK_SWEEP : TIME_CORRECTION_OTF_MARK;
> -        cc_scheduler->time_delay_to_start_mark = (int64)((time_alloc_expected - time_trace_expected)*time_correction);
> -      }else if(gc_is_kind(ALGO_CON_MOSTLY)){
> -        cc_scheduler->time_delay_to_start_mark = (int64)(time_mutator* TIME_CORRECTION_MOSTLY_MARK);
> -      }
> -    }else{
> -      cc_scheduler->time_delay_to_start_mark = MIN_DELAY_TIME;
> +  float util_rate = con_collection_stat->heap_utilization_rate;
> +  unsigned int space_threshold = 0;
> +  if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
> +    if( con_collection_stat->trace_rate == 0 )  //for initial iteration
> +         con_collection_stat->trace_rate = con_collection_stat->alloc_rate*20;
> +    unsigned int alloc_rate = con_collection_stat->alloc_rate;
> +    if(alloc_rate<con_collection_stat->trace_rate) {       //  THRESHOLD = Heap*utilization_rate*(1-alloc_rate/marking_rate), accurate formaler
> +      float alloc_marking_rate_ratio = (float)(alloc_rate)/con_collection_stat->trace_rate;
> +
> +      space_threshold = (unsigned int)(heap_size*util_rate*(1-alloc_marking_rate_ratio)*conservative_factor);
> +    } else {  //use default
> +       unsigned int alloc_while_marking = DEFAULT_MARKING_TIME*con_collection_stat->alloc_rate;
> +       space_threshold = (unsigned int)(heap_size*util_rate) -alloc_while_marking;
>     }
> -
> -    cc_scheduler->space_threshold_to_start_mark =
> -      (POINTER_SIZE_INT)(space_stat->size_free_space * ((time_alloc_expected - time_trace_expected) / time_alloc_expected));
> -
> -    time_delay_to_start_mark = cc_scheduler->time_delay_to_start_mark + time_this_collection_correction;
> -    space_threshold_to_start_mark = cc_scheduler->space_threshold_to_start_mark;
> +  } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
> +    unsigned int alloc_while_marking = DEFAULT_MARKING_TIME*con_collection_stat->alloc_rate;
> +    space_threshold = (unsigned int)(heap_size*util_rate) -alloc_while_marking;
>   }
> -  TRACE2("gc.con.cs","[GC][Con] concurrent marking will delay "<<(unsigned int)(time_delay_to_start_mark>>10)<<" ms ");
> -  TRACE2("gc.con.cs","[GC][Con] time correction "<<(unsigned int)(time_this_collection_correction>>10)<<" ms ");
>
> +  if( space_threshold > con_collection_stat->surviving_size_at_gc_end )
> +    alloc_space_threshold = space_threshold - con_collection_stat->surviving_size_at_gc_end;
> +  else
> +    alloc_space_threshold = MIN_SPACE_THRESHOLD;
> +
> +  //INFO2("gc.con.info", "[Threshold] alloc_space_threshold=" << alloc_space_threshold);
> +  return space_threshold;
>  }
>
> -void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
> -{
> -  assert(gc_is_specify_con_gc());
> -  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) return;
> -
> -  con_collection_scheduler_update_stat(gc, time_mutator, time_collection);
> -  con_collection_scheduler_update_start_point(gc, time_mutator, time_collection);
> -
> -  return;
> -}
> -
> -Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause)
> +/* this parameters are updated at end of GC */
> +void gc_update_scheduler_parameter( GC *gc )
>  {
> -  if(!try_lock(gc->lock_collect_sched)) return FALSE;
> -  vm_gc_lock_enum();
> -
> -  gc_try_finish_con_phase(gc);
> -
> -  if(gc_need_start_con_enum(gc)){
> -    /*TODO:Concurrent rootset enumeration.*/
> -    assert(0);
> -  }
> -
> -  if(gc_need_start_con_mark(gc)){
> -    INFO2("gc.con.info", "[GC][Con] concurrent mark start ...");
> -    gc_start_con_mark(gc);
> -    vm_gc_unlock_enum();
> -    unlock(gc->lock_collect_sched);
> -    return TRUE;
> -  }
> -
> -  if(gc_need_start_con_sweep(gc)){
> -    gc->num_collections++;
> -    INFO2("gc.con.info", "[GC][Con] collection number:"<< gc->num_collections<<" ");
> -    gc_start_con_sweep(gc);
> -    vm_gc_unlock_enum();
> -    unlock(gc->lock_collect_sched);
> -    return TRUE;
> -  }
> -
> -  if(gc_need_reset_after_con_collect(gc)){
> -    int64 pause_start = time_now();
> -    int disable_count = vm_suspend_all_threads();
> -    gc_reset_after_con_collect(gc);
> -    gc_start_mutator_time_measure(gc);
> -    set_collection_end_time();
> -    vm_resume_all_threads(disable_count);
> -    vm_gc_unlock_enum();
> -    INFO2("gc.con.time","[GC][Con]pause(reset collection):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
> -    unlock(gc->lock_collect_sched);
> -    return TRUE;
> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +   last_check_time_point = time_now();
> +
> +   unsigned int alloc_rate = con_collection_stat->alloc_rate;
> +   space_check_stage_1 = alloc_rate * trans_time_unit(SPACE_CHECK_STAGE_ONE_TIME);
> +   space_check_stage_2 = alloc_rate * trans_time_unit(SPACE_CHECK_STAGE_TWO_TIME);
> +   //INFO2( "gc.con.scheduler", "space_check_stage_1=["<<space_check_stage_1<<"], space_check_stage_2=["<<space_check_stage_2<<"]" );
> +
> +   check_delay_time = (con_collection_stat->gc_start_time - con_collection_stat->gc_end_time)>>2;
> +   //INFO2("gc.con.scheduler", "next check time = [" << trans_time_unit(check_delay_time) << "] ms" );
> +   if(gc_is_specify_con_sweep()) {
> +         conservative_factor = CONSERCATIVE_FACTOR_FULLY_CONCURRENT;
> +   }
> +   calculate_start_con_space_threshold(con_collection_stat, gc->committed_heap_size);
> +}
> +
> +void gc_force_update_scheduler_parameter( GC *gc )
> +{
> +    last_check_time_point = time_now();
> +    //check_delay_time = SPACE_CHECK_STAGE_ONE_TIME;
> +    check_delay_time = time_now();
> +    //INFO2("gc.con.scheduler", "next check time = [" << trans_time_unit(check_delay_time) << "] ms" );
> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +    con_collection_stat->alloc_rate = DEFAULT_ALLOC_RATE;
> +}
> +
> +
> +
> +static inline Boolean check_start_mark( GC *gc )
> +{
> +   unsigned int new_object_occupied_size = gc_get_mutator_new_obj_size(gc);
> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +   /*just debugging*/
> +   float used_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_object_occupied_size)/gc->committed_heap_size;
> +   if( alloc_space_threshold < new_object_occupied_size ) {
> +       INFO2( "gc.con.info", "[Start Con] check has been delayed " << check_delay_time << " us, until ratio at start point="<<used_rate );
> +       return TRUE;
> +   }
> +
> +   unsigned int free_space = alloc_space_threshold - new_object_occupied_size;
> +     //INFO2("gc.con.info", "[GC Scheduler debug] alloc_space_threshold="<<alloc_space_threshold<<", new_object_occupied_size"<<new_object_occupied_size);
> +   int64 last_check_delay = check_delay_time;
> +
> +   if( free_space < space_check_stage_2 ) {
> +       check_delay_time = SMALL_DELTA;
> +   } else if( free_space < space_check_stage_1 ) {
> +       if(check_delay_time>SPACE_CHECK_STAGE_TWO_TIME ) { //if time interval is too small, the alloc rate will not be updated
> +           unsigned int interval_time = trans_time_unit(time_now() - con_collection_stat->gc_end_time);
> +           unsigned int interval_space = new_object_occupied_size;
> +           con_collection_stat->alloc_rate = interval_space/interval_time;
> +       }
> +       check_delay_time = ((alloc_space_threshold - new_object_occupied_size)/con_collection_stat->alloc_rate)<<9;
> +   }
> +   last_check_time_point = time_now();
> +
> +   //INFO2("gc.con.info", "[GC Scheduler] check has been delayed=" << last_check_delay << " us, used_rate=" << used_rate << ", free_space=" << free_space << " bytes, next delay=" << check_delay_time << " us" );
> +   return FALSE;
> +}
> +
> +static SpinLock check_lock;
> +static inline Boolean space_should_start_mark( GC *gc)
> +{
> +  if( ( time_now() -last_check_time_point ) > check_delay_time && try_lock(check_lock) ) { //first condition is checked frequently, second condition is for synchronization
> +      Boolean should_start = check_start_mark(gc);
> +      unlock(check_lock);
> +      return should_start;
>   }
> -  vm_gc_unlock_enum();
> -  unlock(gc->lock_collect_sched);
>   return FALSE;
>  }
>
> -extern unsigned int NUM_MARKERS;
> -
> -unsigned int gc_decide_marker_number(GC* gc)
> -{
> -  unsigned int num_active_marker;
> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
> +inline static Boolean gc_con_start_condition( GC* gc ) {
> +   return space_should_start_mark(gc);
> +}
>
> -  /*If the number of markers is specfied, just return the specified value.*/
> -  if(NUM_MARKERS != 0) return NUM_MARKERS;
>
> -  /*If the number of markers isn't specified, we decide the value dynamically.*/
> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION){
> -    /*Start trial cycle, collection set to 1 in trial cycle and */
> -    num_active_marker = 1;
> -  }else{
> -    num_active_marker = cc_scheduler->last_marker_num;
> -    int64 c_time = cc_scheduler->last_collector_time;
> -    int64 m_time = cc_scheduler->last_mutator_time;
> -    int64 d_time = cc_scheduler->time_delay_to_start_mark;
> -
> -    if(num_active_marker == 0) num_active_marker = 1;
> -
> -    if((c_time + d_time) > m_time || (float)d_time < (m_time * 0.25)){
> -      TRACE2("gc.con.cs","[GC][Con] increase marker number.");
> -      num_active_marker ++;
> -      if(num_active_marker > gc->num_markers) num_active_marker = gc->num_markers;
> -    }else if((float)d_time > (m_time * 0.6)){
> -      TRACE2("gc.con.cs","[GC][Con] decrease marker number.");
> -      num_active_marker --;
> -      if(num_active_marker == 0)  num_active_marker = 1;
> -    }
> -
> -    TRACE2("gc.con.cs","[GC][Con] ctime  "<<(unsigned)(c_time>>10)<<"  mtime  "<<(unsigned)(m_time>>10)<<"  dtime  "<<(unsigned)(d_time>>10));
> -    TRACE2("gc.con.cs","[GC][Con] marker num : "<<num_active_marker<<" ");
> +void gc_reset_after_con_collection(GC *gc);
> +void gc_merge_free_list_global(GC *gc);
> +void gc_con_stat_information_out(GC *gc);
> +
> +unsigned int sub_time = 0;
> +int64 pause_time = 0;
> +/*
> +   concurrent collection entry function, it may start proper phase according to the current state.
> +*/
> +Boolean gc_con_perform_collection( GC* gc ) {
> +  int disable_count;
> +  int64 pause_start;
> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +  switch( gc->gc_concurrent_status ) {
> +    case GC_CON_NIL :
> +      if( !gc_con_start_condition(gc) )
> +        return FALSE;
> +      if( !state_transformation( gc, GC_CON_NIL, GC_CON_STW_ENUM ) )
> +        return FALSE;
> +
> +      gc->num_collections++;
> +      gc->cause = GC_CAUSE_CONCURRENT_GC;
> +
> +      con_collection_stat->gc_start_time = time_now();
> +      disable_count = hythread_reset_suspend_disable();
> +
> +      gc_start_con_enumeration(gc); //now, it is a stw enumeration
> +      con_collection_stat->marking_start_time = time_now();
> +      state_transformation( gc, GC_CON_STW_ENUM, GC_CON_START_MARKERS );
> +      gc_start_con_marking(gc);
> +
> +      INFO2("gc.con.time","[ER] start con pause, ERSM="<<((unsigned int)(time_now()-con_collection_stat->gc_start_time))<<"  us "); // ERSM means enumerate rootset and start concurrent marking
> +      vm_resume_threads_after();
> +      hythread_set_suspend_disable(disable_count);
> +      break;
> +
> +    case GC_CON_BEFORE_SWEEP :
> +      if(!gc_is_specify_con_sweep())
> +         return FALSE;
> +      if( !state_transformation( gc, GC_CON_BEFORE_SWEEP, GC_CON_SWEEPING ) )
> +         return FALSE;
> +      gc_ms_start_con_sweep((GC_MS*)gc, gc_get_sweeper_numer(gc));
> +      break;
> +
> +
> +    case GC_CON_BEFORE_FINISH :
> +        if( !state_transformation( gc, GC_CON_BEFORE_FINISH, GC_CON_RESET ) )
> +                 return FALSE;
> +        /* thread should be suspended before the state transformation,
> +            it is for the case that the heap is exhausted in the reset state, although it is almost impossible */
> +        disable_count = vm_suspend_all_threads();
> +        pause_start = time_now();
> +
> +        gc_merge_free_list_global(gc);
> +        gc_reset_after_con_collection(gc);
> +        state_transformation( gc, GC_CON_RESET, GC_CON_NIL );
> +        pause_time = time_now()-pause_start;
> +
> +        vm_resume_all_threads(disable_count);
> +        gc_con_stat_information_out(gc);
> +        INFO2("gc.con.time","[GC][Con]pause(reset collection):  CRST="<<pause_time<<"  us\n\n"); // CRST means concurrent reset
> +        break;
> +    default :
> +      return FALSE;
>   }
> -
> -  cc_scheduler->last_marker_num = num_active_marker;
> -  return num_active_marker;
> +  return TRUE;
>  }
>
> +
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h Tue Oct 28 20:01:01 2008
> @@ -20,6 +20,7 @@
>
>  #define STAT_SAMPLE_WINDOW_SIZE 5
>
> +struct GC_MS;
>  typedef struct Con_Collection_Scheduler {
>   /*common field*/
>   GC* gc;
> @@ -46,10 +47,17 @@
>  void con_collection_scheduler_initialize(GC* gc);
>  void con_collection_scheduler_destruct(GC* gc);
>
> +void gc_update_scheduler_parameter( GC *gc );
> +void gc_force_update_scheduler_parameter( GC *gc );
> +Boolean gc_con_perform_collection( GC* gc );
>  Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause);
> -void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
>
>  void gc_decide_cc_scheduler_kind(char* cc_scheduler);
>  void gc_set_default_cc_scheduler_kind();
> +
> +extern unsigned int mostly_con_final_marker_num;
> +extern unsigned int mostly_con_long_marker_num;
> +
>  #endif
>
> +
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Tue Oct 28 20:01:01 2008
> @@ -22,7 +22,7 @@
>  #include "gc_common.h"
>  #include "gc_metadata.h"
>  #include "../thread/mutator.h"
> -#include "../thread/marker.h"
> +#include "../thread/conclctor.h"
>  #include "../finalizer_weakref/finalizer_weakref.h"
>  #include "../gen/gen.h"
>  #include "../mark_sweep/gc_ms.h"
> @@ -74,11 +74,19 @@
>  static int64 collection_start_time = time_now();
>  static int64 collection_end_time = time_now();
>
> -int64 get_collection_end_time()
> +int64 get_gc_start_time()
> +{ return collection_start_time; }
> +
> +void set_gc_start_time()
> +{ collection_start_time = time_now(); }
> +
> +int64 get_gc_end_time()
>  { return collection_end_time; }
>
> -void set_collection_end_time()
> -{ collection_end_time = time_now(); }
> +void set_gc_end_time()
> +{
> +  collection_end_time = time_now();
> +}
>
>  void gc_decide_collection_kind(GC* gc, unsigned int cause)
>  {
> @@ -93,17 +101,17 @@
>
>  }
>
> -void gc_update_space_stat(GC_MS* gc)
> +void gc_update_space_stat(GC* gc)
>  {
>  #ifdef USE_UNIQUE_MARK_SWEEP_GC
> -    gc_ms_update_space_stat((GC_MS*)gc);
> +      gc_ms_update_space_stat((GC_MS *)gc);
>  #endif
>  }
>
> -void gc_reset_space_stat(GC_MS* gc)
> +void gc_reset_space_stat(GC* gc)
>  {
>  #ifdef USE_UNIQUE_MARK_SWEEP_GC
> -    gc_ms_reset_space_stat((GC_MS*)gc);
> +      gc_ms_reset_space_stat((GC_MS *)gc);
>  #endif
>  }
>
> @@ -118,7 +126,7 @@
>   gc_set_rootset(gc);
>  }
>
> -void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection)
> +void gc_reset_after_collection(GC* gc)
>  {
>   if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
>
> @@ -139,11 +147,9 @@
>  #endif
>   }
>
> -  gc_update_space_stat((GC_MS*)gc);
> +  gc_update_space_stat(gc);
>
> -  gc_update_collection_scheduler(gc, time_mutator, time_collection);
> -
> -  gc_reset_space_stat((GC_MS*)gc);
> +  gc_reset_space_stat(gc);
>
>   gc_reset_collector_state(gc);
>
> @@ -154,23 +160,25 @@
>
>  }
>
> +void set_check_delay( int64 mutator_time );
> +
>  void gc_reclaim_heap(GC* gc, unsigned int gc_cause)
>  {
>   INFO2("gc.process", "\nGC: GC start ...\n");
>
> -  collection_start_time = time_now();
> -  int64 time_mutator = collection_start_time - collection_end_time;
> -
> -  gc->num_collections++;
>   gc->cause = gc_cause;
>
>   if(gc_is_specify_con_gc()){
> -    gc_finish_con_GC(gc, time_mutator);
> -    collection_end_time = time_now();
> +    gc_wait_con_finish(gc);
>     INFO2("gc.process", "GC: GC end\n");
>     return;
>   }
>
> +   set_gc_start_time();
> +  int64 time_mutator = get_gc_start_time() - get_gc_end_time();
> +
> +  gc->num_collections++;
> +
>   /* FIXME:: before mutators suspended, the ops below should be very careful
>      to avoid racing with mutators. */
>
> @@ -207,16 +215,16 @@
>   gc_gen_reclaim_heap((GC_Gen*)gc, collection_start_time);
>  #endif
>
> -  collection_end_time = time_now();
> +  set_gc_end_time();
>
> -  int64 time_collection = collection_end_time - collection_start_time;
> +  int64 time_collection = get_gc_end_time() - get_gc_start_time();
>
>  #if !defined(USE_UNIQUE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC)
>   gc_gen_collection_verbose_info((GC_Gen*)gc, time_collection, time_mutator);
>   gc_gen_space_verbose_info((GC_Gen*)gc);
>  #endif
>
> -  gc_reset_after_collection(gc, time_mutator, time_collection);
> +  gc_reset_after_collection(gc);
>
>   gc_assign_free_area_to_mutators(gc);
>
> @@ -230,6 +238,3 @@
>
>
>
> -
> -
> -
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Tue Oct 28 20:01:01 2008
> @@ -39,7 +39,8 @@
>
>  #include "../common/gc_for_barrier.h"
>
> -/*
> +
> + /*
>  #define USE_UNIQUE_MARK_SWEEP_GC  //define it to only use Mark-Sweep GC (no NOS, no LOS).
>  #define USE_UNIQUE_MOVE_COMPACT_GC //define it to only use Move-Compact GC (no NOS, no LOS).
>  */
> @@ -336,19 +337,7 @@
>   return TRUE;
>  }
>
> -extern volatile Boolean obj_alloced_live;
> -inline Boolean is_obj_alloced_live()
> -{ return obj_alloced_live;  }
>
> -inline void gc_enable_alloc_obj_live()
> -{
> -  obj_alloced_live = TRUE;
> -}
> -
> -inline void gc_disable_alloc_obj_live()
> -{
> -  obj_alloced_live = FALSE;
> -}
>
>  /***************************************************************/
>
> @@ -391,7 +380,7 @@
>  /***************************************************************/
>
>  /* all GCs inherit this GC structure */
> -struct Marker;
> +struct Conclctor;
>  struct Mutator;
>  struct Collector;
>  struct GC_Metadata;
> @@ -421,9 +410,12 @@
>   unsigned int num_collectors;
>   unsigned int num_active_collectors; /* not all collectors are working */
>
> -  Marker** markers;
> -  unsigned int num_markers;
> +  /*concurrent markers and collectors*/
> +  Conclctor** conclctors;
> +  unsigned int num_conclctors;
> +  //unsigned int num_active_conclctors;
>   unsigned int num_active_markers;
> +  unsigned int num_active_sweepers;
>
>   /* metadata is the pool for rootset, tracestack, etc. */
>   GC_Metadata* metadata;
> @@ -443,7 +435,7 @@
>
>   Space_Tuner* tuner;
>
> -  unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
> +  volatile unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
>   Collection_Scheduler* collection_scheduler;
>
>   SpinLock lock_con_mark;
> @@ -488,11 +480,15 @@
>
>  GC* gc_parse_options();
>  void gc_reclaim_heap(GC* gc, unsigned int gc_cause);
> +void gc_relaim_heap_con_mode( GC *gc);
>  void gc_prepare_rootset(GC* gc);
>
>
> -int64 get_collection_end_time();
> -void set_collection_end_time();
> +int64 get_gc_start_time();
> +void set_gc_start_time();
> +
> +int64 get_gc_end_time();
> +void set_gc_end_time();
>
>  /* generational GC related */
>
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp Tue Oct 28 20:01:01 2008
> @@ -17,325 +17,582 @@
>  #include "gc_common.h"
>  #include "gc_metadata.h"
>  #include "../thread/mutator.h"
> -#include "../thread/marker.h"
> +#include "../thread/conclctor.h"
>  #include "../thread/collector.h"
>  #include "../finalizer_weakref/finalizer_weakref.h"
>  #include "../gen/gen.h"
>  #include "../mark_sweep/gc_ms.h"
> +#include "../mark_sweep/wspace_mark_sweep.h"
>  #include "interior_pointer.h"
>  #include "collection_scheduler.h"
>  #include "gc_concurrent.h"
>  #include "../common/gc_for_barrier.h"
> +#include "concurrent_collection_scheduler.h"
> +#include "../verify/verify_live_heap.h"
>
> -volatile Boolean concurrent_in_marking  = FALSE;
> -volatile Boolean concurrent_in_sweeping = FALSE;
> -volatile Boolean mark_is_concurrent     = FALSE;
> -volatile Boolean sweep_is_concurrent    = FALSE;
> +struct Con_Collection_Statistics;
>
>  volatile Boolean gc_sweep_global_normal_chunk = FALSE;
>
> -static void gc_check_con_mark(GC* gc)
> +//just debugging
> +inline void gc_ms_get_current_heap_usage(GC_MS *gc)
>  {
> -  if(!is_mark_finished(gc)){
> -    lock(gc->lock_con_mark);
> -    if(gc_is_kind(ALGO_CON_OTF_OBJ)){
> -      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
> -    }else if(gc_is_kind(ALGO_CON_OTF_REF)){
> -      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
> -    }else if(gc_is_kind(ALGO_CON_MOSTLY)){
> -      //ignore.
> -    }
> -    unlock(gc->lock_con_mark);
> -  }
> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat(gc);
> +  unsigned int new_obj_size = gc_get_mutator_new_obj_size((GC *)gc);
> +  unsigned int current_size = con_collection_stat->surviving_size_at_gc_end + new_obj_size;
> +  INFO2("gc.con.scheduler", "[Heap Usage]surviving_size("<<con_collection_stat->surviving_size_at_gc_end<<")+new_obj_size("<<new_obj_size << ")="<<current_size<<" bytes");
> +  INFO2("gc.con.scheduler", "[Heap Usage]usage rate ("<< (float)current_size/gc->committed_heap_size<<")");
>  }
>
> -static void gc_wait_con_mark_finish(GC* gc)
> +void gc_con_update_stat_before_enable_alloc_live(GC *gc)
>  {
> -  wait_mark_finish(gc);
> -  gc_set_barrier_function(WB_REM_NIL);
> -  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS *)gc);
> +  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
>  }
> +
> +volatile Boolean obj_alloced_live;
>
> -unsigned int gc_decide_marker_number(GC* gc);
> +void gc_enable_alloc_obj_live(GC *gc)
> +{
> +  gc_con_update_stat_before_enable_alloc_live(gc);
> +  obj_alloced_live = TRUE;
> +}
>
> -void gc_start_con_mark(GC* gc)
> +void gc_mostly_con_update_stat_after_final_marking(GC *gc)
>  {
> -  int disable_count;
> -  unsigned int num_marker;
> -
> -  if(!try_lock(gc->lock_con_mark) || gc_mark_is_concurrent()) return;
> -
> -  lock(gc->lock_enum);
> -  disable_count = hythread_reset_suspend_disable();
> -  int64 pause_start = time_now();
> -  gc_set_rootset_type(ROOTSET_IS_OBJ);
> -  gc_prepare_rootset(gc);
> -
> -  gc_set_concurrent_status(gc, GC_CON_MARK_PHASE);
> +  POINTER_SIZE_INT num_live_obj = 0;
> +  POINTER_SIZE_INT size_live_obj = 0;
> +  POINTER_SIZE_INT num_dirty_obj_traced = 0;
>
> -  num_marker = gc_decide_marker_number(gc);
> -
> -  /*start concurrent mark*/
> -  if(gc_is_kind(ALGO_CON_OTF_OBJ)){
> -    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
> -    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
> -  }else if(gc_is_kind(ALGO_CON_MOSTLY)){
> -    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
> -    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
> -  }else if(gc_is_kind(ALGO_CON_OTF_REF)){
> -    gc_set_barrier_function(WB_REM_OLD_VAR);
> -    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
> +  unsigned int num_conclctors = gc->num_conclctors;
> +  for( unsigned int i=0; i<num_conclctors; i++ ) {
> +    Conclctor* conclctor = gc->conclctors[i];
> +    if( conclctor->role != CONCLCTOR_ROLE_MARKER )
> +      continue;
> +    num_live_obj += conclctor->live_obj_num;
> +    size_live_obj += conclctor->live_obj_size;
> +    num_dirty_obj_traced += conclctor->num_dirty_slots_traced;
> +    conclctor->live_obj_num = 0;
> +    conclctor->live_obj_size = 0;
> +    conclctor->num_dirty_slots_traced = 0;
>   }
>
> -  unlock(gc->lock_enum);
> -  INFO2("gc.con.time","[GC][Con]pause(enumeration root):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
> -  vm_resume_threads_after();
> -  assert(hythread_is_suspend_enabled());
> -  hythread_set_suspend_disable(disable_count);
> -
> -  unlock(gc->lock_con_mark);
> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +  con_collection_stat->live_size_marked += size_live_obj;
> +  INFO2("gc.con.scheduler", "[Final Mark Finish] live_marked_size:      "<<con_collection_stat->live_size_marked<<" bytes");
> +
>  }
>
> -void mostly_con_mark_terminate_reset();
> -void terminate_mostly_con_mark();
> -
> -void gc_finish_con_mark(GC* gc, Boolean need_STW)
> +unsigned int gc_get_conclcor_num(GC* gc, unsigned int req_role);
> +//called by the marker when it finishes
> +void gc_con_update_stat_after_marking(GC *gc)
>  {
> -  gc_check_con_mark(gc);
> -
> -  if(gc_is_kind(ALGO_CON_MOSTLY))
> -    terminate_mostly_con_mark();
> -
> -  gc_wait_con_mark_finish(gc);
> +  POINTER_SIZE_INT num_live_obj = 0;
> +  POINTER_SIZE_INT size_live_obj = 0;
> +  POINTER_SIZE_INT num_dirty_obj_traced = 0;
>
> -  int disable_count;
> -  if(need_STW){
> -    /*suspend the mutators.*/
> -    lock(gc->lock_enum);
> -    if(gc_is_kind(ALGO_CON_MOSTLY)){
> -      /*In mostly concurrent algorithm, there's a final marking pause.
> -            Prepare root set for final marking.*/
> -      disable_count = hythread_reset_suspend_disable();
> -      gc_set_rootset_type(ROOTSET_IS_OBJ);
> -      gc_prepare_rootset(gc);
> -    }else{
> -      disable_count = vm_suspend_all_threads();
> -    }
> +  unsigned int num_conclctors = gc->num_conclctors;
> +  for( unsigned int i=0; i<num_conclctors; i++ ) {
> +    Conclctor* conclctor = gc->conclctors[i];
> +    if( conclctor->role != CONCLCTOR_ROLE_MARKER )
> +      continue;
> +    num_live_obj += conclctor->live_obj_num;
> +    size_live_obj += conclctor->live_obj_size;
> +    num_dirty_obj_traced += conclctor->num_dirty_slots_traced;
> +    conclctor->live_obj_num = 0;
> +    conclctor->live_obj_size = 0;
> +    conclctor->num_dirty_slots_traced = 0;
>   }
>
> -  if(gc_is_kind(ALGO_CON_MOSTLY)){
> -    /*In mostly concurrent algorithm, there's a final marking pause.
> -          Suspend the mutators once again and finish the marking phase.*/
> -
> -    /*prepare dirty object*/
> -    gc_prepare_dirty_set(gc);
> -
> -    gc_set_weakref_sets(gc);
> -
> -    /*start STW mark*/
> -    gc_ms_start_mostly_con_final_mark((GC_MS*)gc, MIN_NUM_MARKERS);
> -
> -    mostly_con_mark_terminate_reset();
> -    gc_clear_dirty_set(gc);
> -  }
> -
> -  gc_reset_dirty_set(gc);
> -
> -  if(need_STW){
> -    unlock(gc->lock_enum);
> -    if(gc_is_kind(ALGO_CON_MOSTLY)){
> -      vm_resume_threads_after();
> -      assert(hythread_is_suspend_enabled());
> -      hythread_set_suspend_disable(disable_count);
> -    }else{
> -      vm_resume_all_threads(disable_count);
> -    }
> -  }
> +  unsigned int write_barrier_marked_size = gc_get_mutator_write_barrier_marked_size(gc);
> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +  con_collection_stat->live_size_marked = size_live_obj + write_barrier_marked_size;
> +  //INFO2("gc.con.scheduler", "[Mark Finish] live_marked_size:      "<<con_collection_stat->live_size_marked<<" bytes");
>
> +   /*statistics information update (marking_end_time, trace_rate) */
> +  con_collection_stat->marking_end_time = time_now();
> +  int64 marking_time = (unsigned int)(con_collection_stat->marking_end_time - con_collection_stat->marking_start_time);
> +
> +  unsigned int heap_size =
> +       con_collection_stat->surviving_size_at_gc_end +
> +       gc_get_mutator_new_obj_size(gc);
> +
> +  con_collection_stat->trace_rate = heap_size/trans_time_unit(marking_time);
> +
> +
> +
> +  /*
> +  //statistics just for debugging
> +  unsigned int marker_num = gc_get_conclcor_num(gc, CONCLCTOR_ROLE_MARKER);
> +  float heap_used_rate = (float)heap_size/gc->committed_heap_size;
> +  unsigned int new_obj_size_marking = gc_get_mutator_new_obj_size(gc) - con_collection_stat->alloc_size_before_alloc_live;
> +  unsigned int alloc_rate_marking = new_obj_size_marking/trans_time_unit(con_collection_stat->marking_end_time - con_collection_stat->marking_start_time);
> +  INFO2("gc.con.scheduler", "[Mark Finish] tracing time=" <<marking_time<<" us, trace rate=" << con_collection_stat->trace_rate<<"b/ms, current heap used="<<heap_used_rate );
> +  INFO2("gc.con.scheduler", "[Mark Finish] marker num="<<marker_num << ", alloc factor=" << (float)alloc_rate_marking/con_collection_stat->alloc_rate);
> +  */
>  }
>
> -void gc_reset_con_mark(GC* gc)
> +void gc_PSTW_update_stat_after_marking(GC *gc)
>  {
> -  gc->num_active_markers = 0;
> -  gc_mark_unset_concurrent();
> +  unsigned int size_live_obj = gc_ms_get_live_object_size((GC_MS*)gc);
> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +  con_collection_stat->live_size_marked = size_live_obj;
> +  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
> +
> +  INFO2("gc.con.scheduler", "[Mark Finish] live_marked:      "<<con_collection_stat->live_size_marked<<" bytes");
> +  INFO2("gc.con.scheduler", "[Mark Finish] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
> +  INFO2("gc.con.scheduler", "[Mark Finish] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
>  }
>
> -int64 gc_get_con_mark_time(GC* gc)
> +//Called only when heap is exhuaset
> +void gc_con_update_stat_heap_exhausted(GC* gc)
>  {
> -  int64 time_mark = 0;
> -  Marker** markers = gc->markers;
> -  unsigned int i;
> -  for(i = 0; i < gc->num_active_markers; i++){
> -    Marker* marker = markers[i];
> -    if(marker->time_mark > time_mark){
> -      time_mark = marker->time_mark;
> -    }
> -    marker->time_mark = 0;
> -  }
> -  return time_mark;
> +  unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +  con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
> +  //INFO2("gc.con.scheduler", "[Heap exhausted] surviving size="<<con_collection_stat->surviving_size_at_gc_end<<" bytes, new_obj_size="<<new_obj_size<<" bytes");
> +  //INFO2("gc.con.scheduler", "[Heap exhausted] current utilization rate="<<con_collection_stat->heap_utilization_rate);
>  }
>
> -void gc_start_con_sweep(GC* gc)
> +
> +//just debugging
> +unsigned int gc_con_get_live_size_from_sweeper(GC *gc)
>  {
> -  if(!try_lock(gc->lock_con_sweep) || gc_sweep_is_concurrent()) return;
> +  POINTER_SIZE_INT num_live_obj = 0;
> +  POINTER_SIZE_INT size_live_obj = 0;
>
> -  /*FIXME: enable finref*/
> -  if(!IGNORE_FINREF ){
> -    gc_set_obj_with_fin(gc);
> -    Collector* collector = gc->collectors[0];
> -    collector_identify_finref(collector);
> -#ifndef BUILD_IN_REFERENT
> -  }else{
> -    gc_set_weakref_sets(gc);
> -    gc_update_weakref_ignore_finref(gc);
> -#endif
> +  unsigned int num_collectors = gc->num_active_collectors;
> +  Collector** collectors = gc->collectors;
> +  unsigned int i;
> +  for(i = 0; i < num_collectors; i++){
> +    Collector* collector = collectors[i];
> +    num_live_obj += collector->live_obj_num;
> +    size_live_obj += collector->live_obj_size;
> +    collector->live_obj_num = 0;
> +    collector->live_obj_size = 0;
>   }
> +
> +  return size_live_obj;
> +}
>
> -  gc_set_concurrent_status(gc, GC_CON_SWEEP_PHASE);
> +//Called when Con GC ends, must called in a STW period
> +void gc_reset_con_space_stat(GC *gc)
> +{
> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +  unsigned int new_obj_size = gc_reset_mutator_new_obj_size((GC *)gc);
>
> -  gc_set_weakref_sets(gc);
> +  if( gc_is_kind(ALGO_CON_MOSTLY) ) {
> +    con_collection_stat->live_alloc_size = 0; //mostly concurrent do not make new alloc obj live
> +  } else if ( gc_is_kind( ALGO_CON_OTF_OBJ ) || gc_is_kind( ALGO_CON_OTF_REF ) ) {
> +    con_collection_stat->live_alloc_size = new_obj_size - con_collection_stat->alloc_size_before_alloc_live;
> +  }
> +
> +  /*live obj size at the end of gc = the size of objs belong to {marked_live + alloc_at_marking+alloc_at_sweeping},
> +  (for mostly concurrent, con_collection_stat->surviving_size_at_gc_end = con_collection_stat->live_size_marked .)*/
> +  con_collection_stat->surviving_size_at_gc_end = con_collection_stat->live_size_marked + con_collection_stat->live_alloc_size;
> +  //INFO2( "gc.con.scheduler", "[Mark Live] live_size_marked = " << con_collection_stat->live_size_marked << ", live_alloc_size=" << con_collection_stat->live_alloc_size );
>
> -  /*Note: We assumed that adding entry to weakroot_pool is happened in STW rootset enumeration.
> -      So, when this assumption changed, we should modified the below function.*/
> -  gc_identify_dead_weak_roots(gc);
>
> -  /*start concurrent mark*/
> -  gc_ms_start_con_sweep((GC_MS*)gc, MIN_NUM_MARKERS);
> +  /*
> +  //just debugging
> +  if( !gc_is_specify_con_sweep() ) {
> +    unsigned int surviving_sweeper = gc_con_get_live_size_from_sweeper(gc);
> +    unsigned int surviving_marker = con_collection_stat->surviving_size_at_gc_end;
> +    INFO2("gc.con.scheduler", "[Surviving size] by sweeper: " << surviving_sweeper << " bytes, by marker:" << surviving_marker << " bytes, diff=" << (surviving_sweeper - surviving_marker) );
> +  }*/
>
> -  unlock(gc->lock_con_sweep);
> +  int64 current_time = time_now();
> +
> +  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
> +       unsigned int gc_interval_time = 0;
> +       if( con_collection_stat->pause_start_time != 0 ) //remove the stw time
> +            gc_interval_time = trans_time_unit(con_collection_stat->pause_start_time - con_collection_stat->gc_end_time);
> +       else
> +            gc_interval_time = trans_time_unit(current_time -con_collection_stat->gc_end_time );
> +       con_collection_stat->alloc_rate = new_obj_size/gc_interval_time;
> +       gc_update_scheduler_parameter(gc);
> +  } else {
> +     gc_force_update_scheduler_parameter(gc);
> +  }
> +
> +  con_collection_stat->gc_end_time = current_time;
> +
> +  con_collection_stat->live_size_marked = 0;
> +  con_collection_stat->live_alloc_size = 0;
> +  con_collection_stat->alloc_size_before_alloc_live = 0;
> +  con_collection_stat->marking_start_time = 0;
> +  con_collection_stat->marking_end_time = 0;
> +  con_collection_stat->sweeping_time = gc_get_conclctor_time((GC *)gc, CONCLCTOR_ROLE_SWEEPER); //be 0 if not CMCS
> +  con_collection_stat->pause_start_time = 0;
> +  assert(con_collection_stat->heap_utilization_rate<1);
> +
>  }
>
> -void gc_reset_con_sweep(GC* gc)
> +void gc_con_stat_information_out(GC *gc)
>  {
> -  gc->num_active_collectors = 0;
> -  gc_sweep_unset_concurrent();
> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
> +  INFO2("gc.con.scheduler", "[Reset] surviving_at_end:       "<<con_collection_stat->surviving_size_at_gc_end<<" bytes");
> +  INFO2("gc.con.scheduler", "[Reset] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
> +  INFO2("gc.con.scheduler", "[Reset] utilization_rate:      "<<con_collection_stat->heap_utilization_rate);
> +  INFO2("gc.con.scheduler", "[Reset] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
> +  INFO2("gc.con.scheduler", "[Reset] sweeping time:      "<<con_collection_stat->sweeping_time<<" us");
> +  INFO2("gc.con.scheduler", "[Reset] gc time:      "<< trans_time_unit(con_collection_stat->gc_end_time - con_collection_stat->gc_start_time) );
> +  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
>  }
>
> -void gc_wait_con_sweep_finish(GC* gc)
> +void gc_reset_after_con_collection(GC* gc)
>  {
> -  wait_collection_finish(gc);
> -  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
> +  assert(gc_is_specify_con_gc());
> +  int64 reset_start = time_now();
> +  if(!IGNORE_FINREF ){
> +    INFO2("gc.process", "GC: finref process after collection ...\n");
> +    gc_put_finref_to_vm(gc);
> +    gc_reset_finref_metadata(gc);
> +    gc_activate_finref_threads((GC*)gc);
> +#ifndef BUILD_IN_REFERENT
> +  } else {
> +    gc_clear_weakref_pools(gc);
> +    gc_clear_finref_repset_pool(gc);
> +#endif
> +  }
> +  reset_start = time_now();
> +  gc_reset_con_space_stat(gc);
> +  gc_clear_conclctor_role(gc);
> +  vm_reclaim_native_objs();
>  }
>
> -void gc_finish_con_sweep(GC * gc)
> +
> +
> +void gc_set_default_con_algo()
>  {
> -  gc_wait_con_sweep_finish(gc);
> +  assert((GC_PROP & ALGO_CON_MASK) == 0);
> +  GC_PROP |= ALGO_CON_OTF_OBJ;
>  }
>
> -void gc_try_finish_con_phase(GC * gc)
> +void gc_decide_con_algo(char* concurrent_algo)
>  {
> -  /*Note: we do not finish concurrent mark here if we do not want to start concurrent sweep.*/
> -  if(gc_con_is_in_marking(gc) && is_mark_finished(gc)){
> -    /*Although all conditions above are satisfied, we can not guarantee concurrent marking is finished.
> -          Because, sometimes, the concurrent marking has not started yet. We check the concurrent mark lock
> -          here to guarantee this occasional case.*/
> -    if(try_lock(gc->lock_con_mark)){
> -      unlock(gc->lock_con_mark);
> -      gc_finish_con_mark(gc, TRUE);
> -    }
> -  }
> -
> -  if(gc_con_is_in_sweeping(gc) && is_collector_finished(gc)){
> -    //The reason is same as concurrent mark above.
> -    if(try_lock(gc->lock_con_sweep)){
> -      unlock(gc->lock_con_sweep);
> -      gc_finish_con_sweep(gc);
> -    }
> +  string_to_upper(concurrent_algo);
> +  GC_PROP &= ~ALGO_CON_MASK;
> +  if(!strcmp(concurrent_algo, "OTF_OBJ")){
> +    GC_PROP |= ALGO_CON_OTF_OBJ;
> +  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
> +    GC_PROP |= ALGO_CON_MOSTLY;
> +  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
> +    GC_PROP |= ALGO_CON_OTF_REF;
>   }
>  }
>
> -void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection);
>
> -void gc_reset_after_con_collect(GC* gc)
> +/*
> +    gc start enumeration phase, now, it is in a stop-the-world manner
> +*/
> +void gc_start_con_enumeration(GC * gc)
>  {
> -  assert(gc_is_specify_con_gc());
> -
> -  int64 time_mutator = gc_get_mutator_time(gc);
> -  int64 time_collection = gc_get_collector_time(gc) + gc_get_marker_time(gc);
> +  gc_set_rootset_type(ROOTSET_IS_OBJ);
> +  gc_prepare_rootset(gc);
> +}
>
> -  gc_reset_interior_pointer_table();
> +//unsigned int gc_decide_marker_number(GC* gc);
> +unsigned int gc_get_marker_number(GC* gc);
> +/*  gc start marking phase */
> +void gc_start_con_marking(GC *gc)
> +{
> +  unsigned int num_marker;
> +  num_marker = gc_get_marker_number(gc);
>
> -  gc_reset_after_collection(gc, time_mutator, time_collection);
> -
> -  if(gc_mark_is_concurrent()){
> -    gc_reset_con_mark(gc);
> +  if(gc_is_kind(ALGO_CON_OTF_OBJ)) {
> +    gc_enable_alloc_obj_live(gc);
> +    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
> +    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
> +  } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
> +    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
> +    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
> +  } else if(gc_is_kind(ALGO_CON_OTF_REF)) {
> +    gc_enable_alloc_obj_live(gc);
> +    gc_set_barrier_function(WB_REM_OLD_VAR);
> +    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>   }
> +}
> +
>
> -  if(gc_sweep_is_concurrent()){
> -    gc_reset_con_sweep(gc);
> +/*
> +    gc start sweeping phase
> +*/
> +void gc_prepare_sweeping(GC *gc) {
> +  INFO2("gc.con.info", "Concurrent collection, current collection = " << gc->num_collections );
> +  /*FIXME: enable finref*/
> +  if(!IGNORE_FINREF ){
> +    gc_set_obj_with_fin(gc);
> +    Collector* collector = gc->collectors[0];
> +    collector_identify_finref(collector);
> +  #ifndef BUILD_IN_REFERENT
> +  } else {
> +    conclctor_set_weakref_sets(gc);
> +    gc_update_weakref_ignore_finref(gc);
> +  #endif
>   }
> +  gc_identify_dead_weak_roots(gc);
>  }
>
> -void gc_finish_con_GC(GC* gc, int64 time_mutator)
> -{
> +int64 get_last_check_point();
> +// for the case pure stop the world
> +static void gc_partial_con_PSTW( GC *gc) {
>   int64 time_collection_start = time_now();
> -
> +  INFO2("gc.space.stat","Stop-the-world collection = "<<gc->num_collections<<"");
> +  INFO2("gc.con.info", "from last check point =" << (unsigned int)(time_collection_start -get_last_check_point()) );
> +  // stop the world enumeration
>   gc->num_collections++;
> -
> -  lock(gc->lock_enum);
> -
>   int disable_count = hythread_reset_suspend_disable();
>   gc_set_rootset_type(ROOTSET_IS_REF);
>   gc_prepare_rootset(gc);
> -  unlock(gc->lock_enum);
> -
> -  if(gc_sweep_is_concurrent()){
> -    if(gc_con_is_in_sweeping())
> -      gc_finish_con_sweep(gc);
> -  }else{
> -    if(gc_con_is_in_marking()){
> -      gc_finish_con_mark(gc, FALSE);
> -    }
> -    gc->in_collection = TRUE;
> -    gc_reset_mutator_context(gc);
> -    if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
> -    gc_ms_reclaim_heap((GC_MS*)gc);
> -  }
> -
> -  int64 time_collection = 0;
> -  if(gc_mark_is_concurrent()){
> -    time_collection = gc_get_con_mark_time(gc);
> -    gc_reset_con_mark(gc);
> -  }else{
> -    time_collection = time_now()-time_collection_start;
> -  }
> +
> +  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
> +      unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
> +      Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +      con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
> +  }
> +
> +  //reclaim heap
> +  gc_reset_mutator_context(gc);
> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
> +  gc_ms_reclaim_heap((GC_MS*)gc);
> +
> +  //update live size
> +  gc_PSTW_update_stat_after_marking(gc);
> +
> +  // reset the collection and resume mutators
> +  gc_reset_after_con_collection(gc);
>
> -  if(gc_sweep_is_concurrent()){
> -    gc_reset_con_sweep(gc);
> -  }
> -
> -  gc_reset_after_collection(gc, time_mutator, time_collection);
> -
> -  gc_start_mutator_time_measure(gc);
> -
> +  set_con_nil(gc); // concurrent scheduling will continue after mutators are resumed
>   vm_resume_threads_after();
>   assert(hythread_is_suspend_enabled());
> -  hythread_set_suspend_disable(disable_count);
> -  int64 pause_time = time_now()-time_collection_start;
> -
> -  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause){
> -    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
> -  }else{
> -    INFO2("gc.con.time","[GC][Con]pause( Heap exhuasted ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
> -  }
> -  return;
> +  hythread_set_suspend_disable(disable_count);
>  }
>
> -void gc_set_default_con_algo()
> -{
> -  assert((GC_PROP & ALGO_CON_MASK) == 0);
> -  GC_PROP |= ALGO_CON_OTF_OBJ;
> +void terminate_mostly_con_mark();
> +void wspace_mostly_con_final_mark( GC *gc );
> +
> +// for the case concurrent marking is not finished before heap is exhausted
> +static void gc_partial_con_PMSS(GC *gc) {
> +  INFO2("gc.con.info", "[PMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
> +  // wait concurrent marking finishes
> +  int64 wait_start = time_now();
> +  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
> +  //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
> +  while( gc->gc_concurrent_status == GC_CON_START_MARKERS ||
> +             gc->gc_concurrent_status == GC_CON_TRACING ||
> +             gc->gc_concurrent_status == GC_CON_TRACE_DONE)
> +  {
> +      vm_thread_yield(); //let the unfinished marker run
> +  }
> +
> +  /*just debugging*/
> +    gc_ms_get_current_heap_usage((GC_MS *)gc);
> +    int64 pause_time = time_now() - wait_start;
> +    INFO2("gc.con.info", "[PMSS]wait marking time="<<pause_time<<" us" );
> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +    unsigned int marking_time_shortage = (unsigned int)(con_collection_stat->marking_end_time - wait_start);
> +    INFO2("gc.con.info", "[PMSS] marking late time [" << marking_time_shortage << "] us" );
> +
> +  // start STW reclaiming heap
> +  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
> +  gc_reset_mutator_context(gc);
> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
> +  gc_ms_reclaim_heap((GC_MS*)gc);
> +
> +  // reset after partial stop the world collection
> +  gc_reset_after_con_collection(gc);
> +  set_con_nil(gc);
> +}
> +
> +// only when current sweep is set to false
> +static void gc_partial_con_CMSS(GC *gc) {
> +
> +  INFO2("gc.con.info", "[CMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
> +  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
> +
> +  /*just debugging*/
> +    gc_ms_get_current_heap_usage((GC_MS *)gc);
> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +    unsigned int from_marking_end = (unsigned int)(time_now() - con_collection_stat->marking_end_time);
> +    INFO2("gc.con.info", "[CMSS] marking early time [" << from_marking_end << "] us" );
> +
> +  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
> +
> +  // start reclaiming heap, it will skip the marking phase
> +  gc_reset_mutator_context(gc);
> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
> +  gc_ms_reclaim_heap((GC_MS*)gc);
> +
> +  // reset after partial stop the world collection
> +  gc_reset_after_con_collection(gc);
> +  set_con_nil(gc);
> +}
> +
> +void gc_merge_free_list_global(GC *gc);
> +//for the case concurrent marking and partial concurrent sweeping
> +static void gc_partial_con_CMPS( GC *gc ) {
> +
> +  while(gc->gc_concurrent_status == GC_CON_SWEEPING || gc->gc_concurrent_status == GC_CON_SWEEP_DONE) {
> +      vm_thread_yield();  //let the unfinished sweeper run
> +  }
> +  gc_merge_free_list_global(gc);
> +  // reset after partial stop the world collection
> +  gc_reset_after_con_collection(gc);
> +  set_con_nil(gc);
> +}
> +
> +
> +inline static void partial_stop_the_world_info( unsigned int type, unsigned int pause_time ) {
> +  switch( type ) {
> +    case GC_PARTIAL_PSTW :
> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PSTW=" << pause_time << " us");
> +      break;
> +    case GC_PARTIAL_PMSS :
> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PMSS=" << pause_time << " us");
> +      break;
> +    case GC_PARTIAL_CMPS :
> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMPS=" << pause_time << " us");
> +      break;
> +    case GC_PARTIAL_CMSS :
> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMSS=" << pause_time << " us");
> +      break;
> +    case GC_PARTIAL_FCSR :
> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), FCSR=" << pause_time << " us");
> +      break;
> +  }
> +}
> +
> +static unsigned int gc_con_heap_full_mostly_con( GC *gc )
> +{
> +   while( gc->gc_concurrent_status == GC_CON_START_MARKERS ) { // we should enumerate rootset after old rootset is traced
> +      vm_thread_yield();
> +   }
> +
> +   int64 final_start = time_now();
> +   int disable_count = hythread_reset_suspend_disable();
> +   gc_set_rootset_type(ROOTSET_IS_OBJ);
> +   gc_prepare_rootset(gc);
> +
> +   gc_set_barrier_function(WB_REM_NIL); //in stw phase, so we can remove write barrier at any time
> +   terminate_mostly_con_mark(); // terminate current mostly concurrent marking
> +
> +   //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
> +   while(gc->gc_concurrent_status == GC_CON_TRACING) {
> +      vm_thread_yield(); //let the unfinished marker run
> +   }
> +
> +   //final marking phase
> +   gc_clear_conclctor_role(gc);
> +   wspace_mostly_con_final_mark(gc);
> +
> +   /*just debugging*/
> +   int64 final_time = time_now() - final_start;
> +   INFO2("gc.scheduler", "[MOSTLY_CON] final marking time=" << final_time << " us");
> +   gc_ms_get_current_heap_usage((GC_MS *)gc);
> +
> +  // start STW reclaiming heap
> +   gc_con_update_stat_heap_exhausted(gc); // calculate util rate
> +   gc_reset_mutator_context(gc);
> +   if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
> +   gc_ms_reclaim_heap((GC_MS*)gc);
> +
> +   // reset after partial stop the world collection
> +   gc_reset_after_con_collection(gc);
> +   set_con_nil(gc);
> +
> +   vm_resume_threads_after();
> +   hythread_set_suspend_disable(disable_count);
> +   return GC_PARTIAL_PMSS;
> +
> +}
> +
> +static unsigned int gc_con_heap_full_otf( GC *gc )
> +{
> +   unsigned int partial_type; //for time measuring and debugging
> +   int disable_count = vm_suspend_all_threads();
> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +   con_collection_stat->pause_start_time = time_now();
> +   switch(gc->gc_concurrent_status) {
> +       case GC_CON_START_MARKERS :
> +       case GC_CON_TRACING :
> +       case GC_CON_TRACE_DONE :
> +         partial_type = GC_PARTIAL_PMSS;
> +         gc_partial_con_PMSS(gc);
> +         break;
> +       case GC_CON_BEFORE_SWEEP : // only when current sweep is set to false
> +         partial_type = GC_PARTIAL_CMSS;
> +         gc_partial_con_CMSS(gc);
> +         break;
> +       case GC_CON_SWEEPING :
> +       case GC_CON_SWEEP_DONE :
> +         partial_type = GC_PARTIAL_CMPS;
> +         gc_partial_con_CMPS(gc);
> +         break;
> +       case GC_CON_BEFORE_FINISH : //heap can be exhausted when sweeping finishes, very rare
> +         partial_type = GC_PARTIAL_FCSR;
> +         gc_merge_free_list_global(gc);
> +         gc_reset_after_con_collection(gc);
> +         set_con_nil(gc);
> +         break;
> +       case GC_CON_RESET :
> +       case GC_CON_NIL :
> +       case GC_CON_STW_ENUM :
> +         /*do nothing, if still in gc_con_reset, will wait to finish after resuming. this case happens rarely*/
> +         partial_type = GC_PARTIAL_FCSR;
> +         break;
> +       /* other state is illegal here */
> +       default:
> +         INFO2("gc.con.info", "illegal state when the heap is out [" << gc->gc_concurrent_status << "]");
> +         RAISE_ERROR;
> +    }
> +    vm_resume_all_threads(disable_count);
> +    return partial_type;
>  }
>
> -void gc_decide_con_algo(char* concurrent_algo)
> -{
> -  string_to_upper(concurrent_algo);
> -  GC_PROP &= ~ALGO_CON_MASK;
> -  if(!strcmp(concurrent_algo, "OTF_OBJ")){
> -    GC_PROP |= ALGO_CON_OTF_OBJ;
> -  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
> -    GC_PROP |= ALGO_CON_MOSTLY;
> -  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
> -    GC_PROP |= ALGO_CON_OTF_REF;
> +void gc_con_stat_information_out(GC *gc);
> +/*
> +this method is called before STW gc start, there is a big lock outside
> +*/
> +void gc_wait_con_finish( GC* gc ) {
> +  int64 time_collection_start = time_now();
> +  unsigned int partial_type; //for time measuring and debugging
> +
> +   /* cocurrent gc is idle */
> +   if( state_transformation( gc, GC_CON_NIL, GC_CON_DISABLE ) ) { // for the race condition of con schduling and STW gc
> +        Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
> +        con_collection_stat->gc_start_time = time_now();
> +        con_collection_stat->pause_start_time = con_collection_stat->gc_start_time;
> +        partial_type = GC_PARTIAL_PSTW;
> +        gc_partial_con_PSTW( gc );
> +   } else {
> +      while(gc->gc_concurrent_status == GC_CON_STW_ENUM ) { //wait concurrent gc finish enumeration
> +          hythread_safe_point();
> +          vm_thread_yield();
> +       }
> +       if( gc_is_kind(ALGO_CON_MOSTLY) )
> +         partial_type = gc_con_heap_full_mostly_con(gc);
> +       else if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
> +         partial_type = gc_con_heap_full_otf(gc);
> +         if(gc->gc_concurrent_status == GC_CON_RESET) {
> +            while( gc->gc_concurrent_status == GC_CON_RESET ) { //wait concurrent to finish
> +              hythread_safe_point();
> +              vm_thread_yield();
> +            }
> +         }
> +       }
> +       else
> +         RAISE_ERROR;
> +   }
> +
> +  int64 pause_time = time_now()-time_collection_start;
> +  gc_con_stat_information_out(gc);
> +  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) {
> +    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<(unsigned int)(pause_time)<<"  us ");
> +  } else {
> +    partial_stop_the_world_info( partial_type, (unsigned int)pause_time );
>   }
>  }
> +
> +
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h Tue Oct 28 20:01:01 2008
> @@ -19,21 +19,69 @@
>  #define _GC_CONCURRENT_H_
>  #include "gc_common.h"
>
> -enum GC_CONCURRENT_STATUS{
> -  GC_CON_STATUS_NIL = 0x00,
> -  GC_CON_MARK_PHASE = 0x01,
> -  GC_MOSTLY_CON_FINAL_MARK_PHASE = 0x11, // for mostly concurrent only.
> -  GC_CON_SWEEP_PHASE = 0x02
> +
> +#define RATE_CALCULATE_DENOMINATOR_FACTOR 10; //trans us to ms
> +inline unsigned int trans_time_unit(int64 x)
> +{
> +  int64 result = x>>10;
> +  if(result) return (unsigned int)result;
> +  return 1;
> +}
> +
> +#define RAISE_ERROR  assert(0);
> +/* concurrent collection states in new design */
> +enum GC_CONCURRENT_STATUS {
> +  GC_CON_NIL = 0x00,
> +  GC_CON_STW_ENUM = 0x01,
> +  GC_CON_START_MARKERS = 0x02,
> +  GC_CON_TRACING = 0x03,
> +  GC_CON_TRACE_DONE = 0x04,
> +  GC_CON_BEFORE_SWEEP = 0x05,
> +  GC_CON_SWEEPING = 0x06,
> +  GC_CON_SWEEP_DONE = 0x07,
> +  GC_CON_BEFORE_FINISH = 0x08,
> +  GC_CON_RESET = 0x09,
> +  GC_CON_DISABLE = 0x0A,
> +};
> +
> +// this type is just for debugging and time measuring
> +enum GC_PARTIAL_STW_TYPE {
> +  GC_PARTIAL_PSTW = 0x00,  //pure stop the world
> +  GC_PARTIAL_PMSS = 0x01,  //concurrent marking has finished and stop the world sweeping
> +  GC_PARTIAL_CMSS = 0x02,  // partial concurrent marking and stop the world sweeping
> +  GC_PARTIAL_CMPS = 0x03,  //concurrent marking and sweeping
> +  GC_PARTIAL_FCSR = 0x04, //fully concurrent marking and sweeping, but stw finish reset
>  };
>
>  enum HANDSHAKE_SINGAL{
>   HSIG_MUTATOR_SAFE = 0x0,
> -
>   HSIG_DISABLE_SWEEP_LOCAL_CHUNKS  = 0x01,
>   HSIG_DISABLE_SWEEP_GLOBAL_CHUNKS = 0x02,
>   HSIG_MUTATOR_ENTER_ALLOC_MARK    = 0x03,
>  };
>
> +typedef struct Con_Collection_Statistics {
> +    POINTER_SIZE_INT live_size_marked;     //marked objects size
> +    POINTER_SIZE_INT alloc_size_before_alloc_live;  //alloc objects size before marking
> +    POINTER_SIZE_INT live_alloc_size;
> +    POINTER_SIZE_INT surviving_size_at_gc_end; //total live object size when gc is ended
> +
> +    POINTER_SIZE_INT trace_rate;  //bytes per ms
> +    POINTER_SIZE_INT alloc_rate;       //bytes per ms
> +
> +    float heap_utilization_rate;
> +
> +    int64 gc_start_time;
> +    int64 gc_end_time;
> +
> +    int64 marking_start_time;
> +    int64 marking_end_time;
> +
> +    int64 sweeping_time;
> +    int64 pause_start_time;
> +
> +} Con_Space_Statistics;
> +
>  inline void gc_set_con_gc(unsigned int con_phase)
>  { GC_PROP |= con_phase;  }
>
> @@ -58,107 +106,101 @@
>  inline Boolean gc_is_specify_con_sweep()
>  { return (GC_PROP & ALGO_CON_SWEEP) == ALGO_CON_SWEEP; }
>
> -extern volatile Boolean concurrent_in_marking;
> -extern volatile Boolean concurrent_in_sweeping;
> -extern volatile Boolean mark_is_concurrent;
> -extern volatile Boolean sweep_is_concurrent;
>
> -inline Boolean gc_mark_is_concurrent()
> -{
> -  return mark_is_concurrent;
> -}
> +extern volatile Boolean obj_alloced_live;
>
> -inline void gc_mark_set_concurrent()
> -{
> -  if(gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF))
> -    gc_enable_alloc_obj_live();
> -  mark_is_concurrent = TRUE;
> -}
> +inline Boolean is_obj_alloced_live()
> +{ return obj_alloced_live;  }
>
> -inline void gc_mark_unset_concurrent()
> -{
> -  gc_disable_alloc_obj_live();
> -  mark_is_concurrent = FALSE;
> +inline void gc_disable_alloc_obj_live(GC *gc)
> +{
> +  obj_alloced_live = FALSE;
>  }
>
> -inline Boolean gc_con_is_in_marking()
> +void gc_enable_alloc_obj_live(GC * gc);
> +
> +/*
> +    tranform the states across the collection process,
> +  which should be a atomic operation because there are several collector run parallel
> +*/
> +inline Boolean state_transformation( GC* gc, unsigned int from_state, unsigned int to_state )
>  {
> -  return concurrent_in_marking;
> +  unsigned int old_state = apr_atomic_cas32( &gc->gc_concurrent_status, to_state, from_state );
> +  if( old_state != from_state )
> +    return FALSE;
> +  else
> +    return TRUE;
>  }
>
> -inline Boolean gc_con_is_in_marking(GC* gc)
> -{
> -  return gc->gc_concurrent_status == GC_CON_MARK_PHASE;
> +/* set concurrent to idle,
> +    Or enable concurrent gc, called when STW gc finishes
> + */
> +inline void set_con_nil( GC *gc ) {
> +  apr_atomic_set32( &gc->gc_concurrent_status, GC_CON_NIL );
>  }
>
> -inline Boolean gc_sweep_is_concurrent()
> -{
> -  return sweep_is_concurrent;
> +
> +/* gc start enumeration phase, now, it is in a stop-the-world manner */
> +void gc_start_con_enumeration(GC * gc);
> +
> +/* gc start marking phase */
> +void gc_start_con_marking(GC *gc);
> +
> +
> +/* prepare for sweeping */
> +void gc_prepare_sweeping(GC *gc);
> +
> +/* gc start sweeping phase */
> +void gc_start_con_sweeping(GC *gc);
> +
> +/* gc finish concurrent collection */
> +void gc_con_final_work(GC* gc);
> +
> +
> +/* gc wait cocurrent collection finishes */
> +void gc_wait_con_finish( GC* gc );
> +
> +/* is in gc marking phase */
> +inline Boolean in_con_marking_phase( GC *gc ) {
> +  unsigned int status = gc->gc_concurrent_status;
> +  return (status == GC_CON_TRACING) || (status == GC_CON_TRACE_DONE);
>  }
>
> -inline void gc_sweep_set_concurrent()
> -{
> -  sweep_is_concurrent = TRUE;
> +/* is in gc sweeping phase */
> +inline Boolean in_con_sweeping_phase( GC *gc ) {
> +  unsigned int status = gc->gc_concurrent_status;
> +  return (status == GC_CON_SWEEPING) || (status == GC_CON_SWEEP_DONE);
>  }
>
> -inline void gc_sweep_unset_concurrent()
> -{
> -  sweep_is_concurrent = FALSE;
> +inline Boolean in_con_idle( GC *gc ) {
> +  return gc->gc_concurrent_status == GC_CON_NIL;
>  }
>
> -inline Boolean gc_con_is_in_sweeping()
> -{
> -  return concurrent_in_sweeping;
> +inline Boolean gc_con_is_in_STW( GC *gc ) {
> +  return gc->gc_concurrent_status == GC_CON_DISABLE;
>  }
>
> -inline Boolean gc_con_is_in_sweeping(GC* gc)
> -{
> -  return gc->gc_concurrent_status == GC_CON_SWEEP_PHASE;
> +/* is gc ready to sweeping */
> +inline Boolean in_con_ready_sweep( GC *gc ) {
> +  return gc->gc_concurrent_status == GC_CON_BEFORE_SWEEP;
>  }
>
> -inline void gc_set_concurrent_status(GC*gc, unsigned int status)
> -{
> -  /*Reset status*/
> -  concurrent_in_marking = FALSE;
> -  concurrent_in_sweeping = FALSE;
> -
> -  gc->gc_concurrent_status = status;
> -  switch(status){
> -    case GC_CON_MARK_PHASE:
> -      gc_mark_set_concurrent();
> -      concurrent_in_marking = TRUE;
> -      break;
> -    case GC_CON_SWEEP_PHASE:
> -      gc_sweep_set_concurrent();
> -      concurrent_in_sweeping = TRUE;
> -      break;
> -    default:
> -      assert(!concurrent_in_marking && !concurrent_in_sweeping);
> -  }
> +/* is gc sweeping */
> +inline Boolean in_con_sweep( GC *gc ) {
> +  return ( gc->gc_concurrent_status == GC_CON_SWEEPING || gc->gc_concurrent_status == GC_CON_SWEEP_DONE );
>
> -  return;
>  }
>
> -void gc_reset_con_mark(GC* gc);
> -void gc_start_con_mark(GC* gc);
> -void gc_finish_con_mark(GC* gc, Boolean need_STW);
> -int64 gc_get_con_mark_time(GC* gc);
> -
> -void gc_start_con_sweep(GC* gc);
> -void gc_finish_con_sweep(GC * gc);
> +void gc_con_update_stat_after_marking( GC *gc );
>
> -void gc_reset_after_con_collect(GC* gc);
> -void gc_try_finish_con_phase(GC * gc);
>
>  void gc_decide_con_algo(char* concurrent_algo);
>  void gc_set_default_con_algo();
>
> -void gc_reset_con_sweep(GC* gc);
> -
> -void gc_finish_con_GC(GC* gc, int64 time_mutator);
>
>  extern volatile Boolean gc_sweep_global_normal_chunk;
>
> +
>  inline Boolean gc_is_sweep_global_normal_chunk()
>  { return gc_sweep_global_normal_chunk; }
>
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp Tue Oct 28 20:01:01 2008
> @@ -18,13 +18,17 @@
>  /**
>  * @author Xiao-Feng Li, 2006/10/05
>  */
> -
> +
> +#include <open/vm_class_info.h>
> +#include <open/vm_class_manipulation.h>
>  #include "../gen/gen.h"
>  #include "../thread/mutator.h"
>  #include "gc_for_barrier.h"
>  #include "../mark_sweep/wspace_mark_sweep.h"
>  #include "../common/gc_concurrent.h"
> +#include "../common/gc_common.h"
>  #include "../finalizer_weakref/finalizer_weakref.h"
> +#include "../verify/verify_live_heap.h"
>
>
>  /* All the write barrier interfaces need cleanup */
> @@ -117,10 +121,8 @@
>     Mutator *mutator = (Mutator *)gc_get_tls();
>
>     //FIXME: Release lock.
> -    lock(mutator->dirty_set_lock);
>     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
> -    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
> -    unlock(mutator->dirty_set_lock);
> +    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
>   }
>  }
>
> @@ -204,7 +206,8 @@
>           mutator_dirtyset_add_entry(mutator, obj_to_snapshot);
>       }
>     }
> -    obj_mark_black_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
> +    obj_mark_gray_in_table((Partial_Reveal_Object *) p_obj_holding_ref);  // now, the black-only obj (no gray bit been set) will also be scaned by marker, here mark it to gray to prevent this, just a workaround
> +    obj_mark_black_in_table((Partial_Reveal_Object *) p_obj_holding_ref, mutator);
>     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>   }
>  }
> @@ -215,32 +218,141 @@
>   REF* p_obj_slot = (REF*) p_slot ;
>   Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)read_slot(p_obj_slot);
>   if(p_obj && obj_need_remember_oldvar(p_obj)){
> +    mutator->dirty_obj_num++;
>     mutator_dirtyset_add_entry(mutator, p_obj);
>   }
>  }
>
> +/*
> +static void write_barrier_for_check(Managed_Object_Handle p_obj_holding_ref,Managed_Object_Handle *p_slot, Managed_Object_Handle p_target)
> +{
> +  //Mutator *mutator = (Mutator *)gc_get_tls();
> +
> +  Partial_Reveal_Object* src_obj = (Partial_Reveal_Object*)p_obj_holding_ref;
> +  Partial_Reveal_Object* sub_obj = (Partial_Reveal_Object*)read_slot((REF*) p_slot);
> +  Partial_Reveal_Object* target_obj = (Partial_Reveal_Object*)p_target;
> +
> +  if(src_obj && (!obj_is_mark_black_in_table(src_obj))){
> +     INFO2("gc.verifier", "[write_barrier_for_check] [Src]");
> +     analyze_bad_obj(src_obj);
> +     RAISE_ERROR;
> +  }
> +
> +  if(sub_obj && (!obj_is_mark_black_in_table(sub_obj))){
> +     INFO2("gc.verifier", "[write_barrier_for_check] [Sub]");
> +     analyze_bad_obj(sub_obj);
> +     INFO2("gc.verifier", "[source object]");
> +     analyze_bad_obj(src_obj);
> +     //RAISE_ERROR;
> +     return;
> +  }
> +
> +  if(target_obj && (!obj_is_mark_black_in_table(target_obj))){
> +     INFO2("gc.verifier", "[write_barrier_for_check] [Target]");
> +     analyze_bad_obj(target_obj);
> +     RAISE_ERROR;
> +  }
> +
> +  *p_slot = p_target;
> +}
> +*/
>  //===========================================
>
>  /* The following routines were supposed to be the only way to alter any value in gc heap. */
>  void gc_heap_write_ref (Managed_Object_Handle p_obj_holding_ref, unsigned offset, Managed_Object_Handle p_target)
>  {  assert(0); }
>
> -void gc_heap_wrote_object (Managed_Object_Handle p_obj_written)
> +
> +Boolean gc_heap_copy_object_array(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length)
> +{
> +
> +
> +    GC_VTable_Info *src_gcvt = obj_get_gcvt((Partial_Reveal_Object*)src_array);
> +    GC_VTable_Info *dst_gcvt = obj_get_gcvt((Partial_Reveal_Object*)dst_array);
> +
> +    Class_Handle src_class = src_gcvt->gc_clss;
> +    Class_Handle dst_class = dst_gcvt->gc_clss;
> +
> +
> +       //element size of src should be same as element size of dst
> +       assert(src_gcvt->array_elem_size == dst_gcvt->array_elem_size);
> +       unsigned int elem_size = src_gcvt->array_elem_size;
> +       unsigned int src_first_elem_offset = array_first_element_offset((Partial_Reveal_Array*)src_array);
> +       unsigned int dst_first_elem_offset = array_first_element_offset((Partial_Reveal_Array*)dst_array);
> +       /*
> +       #ifdef COMPRESS_REFERENCE
> +          COMPRESSED_REFERENCE *src_copy_body = (COMPRESSED_REFERENCE *)((POINTER_SIZE_INT)src_array + src_first_elem_offset + elem_size*src_start);
> +          COMPRESSED_REFERENCE *dst_copy_body = (COMPRESSED_REFERENCE *)((POINTER_SIZE_INT)dst_array + dst_first_elem_offset + elem_size*dst_start);
> +       #else
> +       #endif
> +       */
> +          REF* src_copy_body = (REF*)((POINTER_SIZE_INT)src_array + src_first_elem_offset + elem_size*src_start);
> +          REF* dst_copy_body = (REF*)((POINTER_SIZE_INT)dst_array + dst_first_elem_offset + elem_size*dst_start);
> +
> +
> +       if(class_is_instanceof(src_class, dst_class)) {
> +         //rem obj before is for OTF GC barriers
> +         if(WB_REM_OLD_VAR == write_barrier_function) {
> +            for (unsigned int count = 0; count < length; count++) {
> +               write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
> +            }
> +         } else if(WB_REM_OBJ_SNAPSHOT == write_barrier_function) {
> +            write_barrier_rem_obj_snapshot(dst_array);
> +         }
> +
> +         memmove(dst_copy_body, src_copy_body, length * elem_size);
> +
> +       } else { //for the condition src is not the type of dst
> +          Class_Handle dst_elem_clss = class_get_array_element_class(dst_class);
> +          if(WB_REM_OBJ_SNAPSHOT == write_barrier_function) {
> +            write_barrier_rem_obj_snapshot(dst_array);
> +          }
> +
> +          for (unsigned int count = 0; count < length; count++) {
> +             // 1, null elements copy direct
> +             if (src_copy_body[count] == NULL) {
> +                  if(WB_REM_OLD_VAR == write_barrier_function) {
> +                      write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
> +                 }
> +                  dst_copy_body[count] = NULL;
> +                  continue;
> +               }
> +
> +             // 2, For non-null elements check if types are compatible.
> +/*
> +#ifdef COMPRESS_REFERENCE
> +             ManagedObject *src_elem = (ManagedObject *)uncompress_compressed_reference(src_elem_offset);
> +             Class_Handle src_elem_clss = src_elem->vt()->clss;
> +#else
> +#endif
> +*/
> +             Class_Handle src_elem_clss = obj_get_gcvt(ref_to_obj_ptr(src_copy_body[count]))->gc_clss;
> +
> +             if (!class_is_instanceof(src_elem_clss, dst_elem_clss)) {
> +                  if(WB_REM_SOURCE_OBJ == write_barrier_function) {
> +                      write_barrier_rem_source_obj(dst_array);
> +                  }
> +                  return FALSE;
> +             }
> +
> +             if(WB_REM_OLD_VAR == write_barrier_function) {
> +                 write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
> +             }
> +              dst_copy_body[count] = src_copy_body[count];
> +        }
> +      }
> +
> +    //rem obj after is for mostly concurrent
> +    if(WB_REM_SOURCE_OBJ == write_barrier_function) {
> +        write_barrier_rem_source_obj(dst_array);
> +    }
> +
> +    return TRUE;
> +}
> +
> +
> +void gc_heap_wrote_object (Managed_Object_Handle p_obj_written )
>  {
> -  /*Concurrent Mark: Since object clone and array copy do not modify object slots,
> -      we treat it as an new object. It has already been marked when dest object was created.
> -      We use WB_REM_SOURCE_OBJ function here to debug.
> -    */
> -
> -  if(WB_REM_SOURCE_OBJ == write_barrier_function){
> -    Mutator *mutator = (Mutator *)gc_get_tls();
> -    lock(mutator->dirty_set_lock);
> -
> -    obj_dirty_in_table((Partial_Reveal_Object *) p_obj_written);
> -    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_written);
> -
> -    unlock(mutator->dirty_set_lock);
> -  }
>
>   if( !gc_is_gen_mode() || !object_has_ref_field((Partial_Reveal_Object*)p_obj_written))
>     return;
> @@ -283,6 +395,13 @@
>       write_barrier_rem_slot_oldvar(p_slot);
>       *p_slot = p_target;
>       break;
> +    //just debugging
> +    /*
> +    case WB_CON_DEBUG:
> +       write_barrier_for_check(p_obj_holding_ref, p_slot, p_target);
> +       //*p_slot = p_target;
> +       break;
> +    */
>     default:
>       assert(0);
>       return;
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h Tue Oct 28 20:01:01 2008
> @@ -32,7 +32,8 @@
>   WB_REM_SOURCE_REF    = 0x02,
>   WB_REM_OLD_VAR       = 0x03,
>   WB_REM_NEW_VAR       = 0x04,
> -  WB_REM_OBJ_SNAPSHOT  = 0x05
> +  WB_REM_OBJ_SNAPSHOT  = 0x05,
> +  WB_CON_DEBUG = 0x06
>  };
>
>  inline void gc_set_barrier_function(unsigned int wb_function)
> @@ -43,4 +44,3 @@
>  #endif /* _GC_FOR_BARRIER_H_ */
>
>
> -
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp Tue Oct 28 20:01:01 2008
> @@ -203,4 +203,3 @@
>
>
>
> -
>
> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp?rev=708756&r1=708755&r2=708756&view=diff
> ==============================================================================
> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp (original)
> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp Tue Oct 28 20:01:01 2008
> @@ -30,7 +30,7 @@
>  #include "../mark_sweep/gc_ms.h"
>  #include "../move_compact/gc_mc.h"
>  #include "interior_pointer.h"
> -#include "../thread/marker.h"
> +#include "../thread/conclctor.h"
>  #include "../thread/collector.h"
>  #include "../verify/verify_live_heap.h"
>  #include "../finalizer_weakref/finalizer_weakref.h"
> @@ -115,7 +115,10 @@
>   collection_scheduler_initialize(gc);
>
>   if(gc_is_specify_con_gc()){
> -    marker_initialize(gc);
> +     gc->gc_concurrent_status = GC_CON_NIL;
> +    conclctor_initialize(gc);
> +  } else {
> +     gc->gc_concurrent_status = GC_CON_DISABLE;
>   }
>
>   collector_initialize(gc);
> @@ -134,6 +137,9 @@
>  {
>   INFO2("gc.process", "GC: call GC wrapup ....");
>   GC* gc =  p_global_gc;
> +  // destruct threads first, and then destruct data structures
> +  conclctor_destruct(gc);
> +  collector_destruct(gc);
>
>  #if defined(USE_UNIQUE_MARK_SWEEP_GC)
>  gc_ms_destruct((GC_MS*)gc);
> @@ -148,8 +154,6 @@
>  #ifndef BUILD_IN_REFERENT
>   gc_finref_metadata_destruct(gc);
>  #endif
> -  collector_destruct(gc);
> -  marker_destruct(gc);
>
>   if( verify_live_heap ){
>     gc_terminate_heap_verification(gc);
> @@ -446,4 +450,3 @@
>
>
>
> -
>
>
>



-- 
Unless stated otherwise above:
IBM United Kingdom Limited - Registered in England and Wales with number 741598.
Registered office: PO Box 41, North Harbour, Portsmouth, Hampshire PO6 3AU

Re: svn commit: r708756 [1/3] - in /harmony/enhanced/drlvm/trunk/vm: gc_gen/build/ gc_gen/src/common/ gc_gen/src/finalizer_weakref/ gc_gen/src/gen/ gc_gen/src/los/ gc_gen/src/mark_sweep/ gc_gen/src/thread/ gc_gen/src/trace_forward/ gc_gen/src/verify/

Posted by Tim Ellison <t....@gmail.com>.
Sian January wrote:
> The website says "During feature freeze new functionality, big changes
> and code redesign are forbidden; only bugs fixes and code tidy-up are
> allowed".
> 
> So I think we already agreed that "big changes" are also forbidden,
> but maybe we need to be a bit clearer about this when we do the freeze
> each time, or rename "feature-freeze" to something else to make it
> more obvious?
> 
> Or perhaps this discussion will be a good enough reminder to everyone :-)

+1  a heads-up would have been good, but I trust Xiao-Feng's assessment
that it will be safe to leave in now.

I don't think a byte count limit is necessary.  People intuitively know
what constitutes a big patch at this stage of the release.  And if there
is disagreement it is right that it provokes a discussion like this.

Regards,
Tim


> 2008/10/29 Xiao-Feng Li <xi...@gmail.com>:
>> On Wed, Oct 29, 2008 at 6:33 PM, Sian January
>> <si...@googlemail.com> wrote:
>>> Thanks for your quick reply Xiao-Feng.
>>>
>>> I haven't studied the code that much, so if it's all disabled by
>>> default as you say then I think it's ok to leave it in.
>>>
>>> In future I do think it would be better practice to discuss it on the
>>> dev list and get some agreement before committing something this size
>>> during feature freeze week.  This is because with some large changes
>>> there can be unforeseen effects that can impact the code in ways that
>>> the original author hadn't realised.  Also if we had several major
>>> changes and then saw regressions it could be difficult to work out
>>> what had caused them and it could badly delay the release.
>> Agree. To discuss beforehand is a better practice. I will surely follow it.
>>
>> To improve the process, we can introduce a guideline that, say, any
>> patch bigger than 10KB (?) should be considered as a work of new
>> feature, hence not allowed for commit during feature-freeze period.
>> That could help to clarify the confusion on what a new feature is.
>>
>> Suggestion?
>>
>> Thanks,
>> xiaofeng
>>
>>> Does anyone else have a different opinion on either rolling back the
>>> code or on general practice during feature freeze?
>>>
>>> Thanks,
>>>
>>> Sian
>>>
>>>
>>> 2008/10/29 Xiao-Feng Li <xi...@gmail.com>:
>>>> Sian, thanks for your notice.
>>>>
>>>> This patch is indeed big. Most of the code are guarded by a macro
>>>> USE_UNIQUE_MARK_SWEEP_GC and has no impact on the existing code base.
>>>> It is disabled by default, and I tested it before I committed it.
>>>>
>>>> Actually it is not a new feature, but a fix of existing concurrent GC
>>>> scheduler. Well, I admit it looks like a new feature since it changes
>>>> lot of code...
>>>>
>>>> If it has any impact on stability, I will roll back it immediately.
>>>> Thanks for your patience.
>>>>
>>>> Thanks,
>>>> xiaofeng
>>>>
>>>> On Wed, Oct 29, 2008 at 5:22 PM, Sian January
>>>> <si...@googlemail.com> wrote:
>>>>> Hi Xiao-Feng,
>>>>>
>>>>> This commit looks like quite a large new feature to me.  Since we're
>>>>> in feature freeze this week for M8 I really think it should be backed
>>>>> out until after the milestone, as we should be focussing on testing
>>>>> and stability at the moment.
>>>>>
>>>>> Thanks,
>>>>>
>>>>> Sian
>>>>>
>>>>>
>>>>> 2008/10/29  <xl...@apache.org>:
>>>>>> Author: xli
>>>>>> Date: Tue Oct 28 20:01:01 2008
>>>>>> New Revision: 708756
>>>>>>
>>>>>> URL: http://svn.apache.org/viewvc?rev=708756&view=rev
>>>>>> Log:
>>>>>> HARMONY-5989 : Concurrent GC (Tick) enhancement in scheduling
>>>>>>
>>>>>> Added:
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.cpp   (with props)
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.h   (with props)
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_concurrent_mark.cpp   (with props)
>>>>>> Removed:
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h
>>>>>> Modified:
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/include/open/gc.h
>>>>>>    harmony/enhanced/drlvm/trunk/vm/vmcore/build/vmcore.exp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp
>>>>>>    harmony/enhanced/drlvm/trunk/vm/vmcore/src/object/vm_arrays.cpp
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp Tue Oct 28 20:01:01 2008
>>>>>> @@ -34,6 +34,7 @@
>>>>>>     gc_heap_write_global_slot;
>>>>>>     gc_heap_write_ref;
>>>>>>     gc_heap_wrote_object;
>>>>>> +    gc_heap_copy_object_array;
>>>>>>     gc_init;
>>>>>>     gc_is_object_pinned;
>>>>>>     gc_iterate_heap;
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp Tue Oct 28 20:01:01 2008
>>>>>> @@ -31,24 +31,16 @@
>>>>>>   return;
>>>>>>  }
>>>>>>
>>>>>> -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
>>>>>> -{
>>>>>> -  if(gc_is_specify_con_gc()){
>>>>>> -    gc_update_con_collection_scheduler(gc, time_mutator, time_collection);
>>>>>> -  }
>>>>>> -  return;
>>>>>> -}
>>>>>>
>>>>>>  Boolean gc_sched_collection(GC* gc, unsigned int gc_cause)
>>>>>>  {
>>>>>>   /*collection scheduler only schedules concurrent collection now.*/
>>>>>>   if(GC_CAUSE_CONCURRENT_GC == gc_cause){
>>>>>>     assert(gc_is_specify_con_gc());
>>>>>> -    return gc_sched_con_collection(gc, gc_cause);
>>>>>> +    return gc_con_perform_collection( gc );
>>>>>>   }else{
>>>>>>     return FALSE;
>>>>>>   }
>>>>>>  }
>>>>>>
>>>>>>
>>>>>> -
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h Tue Oct 28 20:01:01 2008
>>>>>> @@ -26,12 +26,8 @@
>>>>>>  void collection_scheduler_initialize(GC* gc);
>>>>>>  void collection_scheduler_destruct(GC* gc);
>>>>>>
>>>>>> -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
>>>>>>  Boolean gc_sched_collection(GC* gc, unsigned int gc_cause);
>>>>>>
>>>>>>  #endif
>>>>>>
>>>>>>
>>>>>> -
>>>>>> -
>>>>>> -
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp Tue Oct 28 20:01:01 2008
>>>>>> @@ -22,7 +22,7 @@
>>>>>>  #include "collection_scheduler.h"
>>>>>>  #include "concurrent_collection_scheduler.h"
>>>>>>  #include "gc_concurrent.h"
>>>>>> -#include "../thread/marker.h"
>>>>>> +#include "../thread/conclctor.h"
>>>>>>  #include "../verify/verify_live_heap.h"
>>>>>>
>>>>>>  #define NUM_TRIAL_COLLECTION 2
>>>>>> @@ -53,6 +53,7 @@
>>>>>>  Boolean gc_use_space_scheduler()
>>>>>>  { return cc_scheduler_kind & SPACE_BASED_SCHEDULER; }
>>>>>>
>>>>>> +
>>>>>>  static int64 time_delay_to_start_mark = MAX_DELAY_TIME;
>>>>>>  static POINTER_SIZE_INT space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
>>>>>>
>>>>>> @@ -75,6 +76,7 @@
>>>>>>   STD_FREE(gc->collection_scheduler);
>>>>>>  }
>>>>>>
>>>>>> +
>>>>>>  void gc_decide_cc_scheduler_kind(char* cc_scheduler)
>>>>>>  {
>>>>>>   string_to_upper(cc_scheduler);
>>>>>> @@ -93,281 +95,248 @@
>>>>>>   gc_enable_time_scheduler();
>>>>>>  }
>>>>>>
>>>>>> -static Boolean time_to_start_mark(GC* gc)
>>>>>> -{
>>>>>> -  if(!gc_use_time_scheduler()) return FALSE;
>>>>>> -
>>>>>> -  int64 time_current = time_now();
>>>>>> -  return (time_current - get_collection_end_time()) > time_delay_to_start_mark;
>>>>>> -}
>>>>>> -
>>>>>> -static Boolean space_to_start_mark(GC* gc)
>>>>>> -{
>>>>>> -  if(!gc_use_space_scheduler()) return FALSE;
>>>>>> +/*====================== new scheduler ===================*/
>>>>>> +extern unsigned int NUM_CON_MARKERS;
>>>>>> +extern unsigned int NUM_CON_SWEEPERS;
>>>>>> +unsigned int gc_get_mutator_number(GC *gc);
>>>>>> +
>>>>>> +#define MOSTLY_CON_MARKER_DIVISION 0.5
>>>>>> +unsigned int mostly_con_final_marker_num=1;
>>>>>> +unsigned int mostly_con_long_marker_num=1;
>>>>>> +
>>>>>> +unsigned int gc_get_marker_number(GC* gc) {
>>>>>> +  unsigned int mutator_num = gc_get_mutator_number(gc);
>>>>>> +  unsigned int marker_specified = NUM_CON_MARKERS;
>>>>>> +  if(marker_specified == 0) {
>>>>>> +    if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>>>>>> +       marker_specified = min(gc->num_conclctors, mutator_num>>1);
>>>>>> +       INFO2("gc.con.scheduler", "[Marker Num] mutator num="<<mutator_num<<", assign marker num="<<marker_specified);
>>>>>> +    } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>>>>>> +       marker_specified = min(gc->num_conclctors, mutator_num>>1);
>>>>>> +       mostly_con_final_marker_num = max(marker_specified, mostly_con_final_marker_num); // in the STW phase, so all the conclctor can be used
>>>>>> +       mostly_con_long_marker_num = (unsigned int)(marker_specified*MOSTLY_CON_MARKER_DIVISION);
>>>>>> +       //INFO2("gc.con.scheduler", "[Marker Num] common marker="<<marker_specified<<", final marker="<<mostly_con_final_marker_num);
>>>>>> +    }
>>>>>> +  }
>>>>>>
>>>>>> -  POINTER_SIZE_INT size_new_obj = gc_get_new_object_size(gc,FALSE);
>>>>>> -  return (size_new_obj > space_threshold_to_start_mark);
>>>>>> +  assert(marker_specified);
>>>>>> +  return marker_specified;
>>>>>>  }
>>>>>>
>>>>>> -static Boolean gc_need_start_con_mark(GC* gc)
>>>>>> -{
>>>>>> -  if(!gc_is_specify_con_mark() || gc_mark_is_concurrent()) return FALSE;
>>>>>> -
>>>>>> -  if(time_to_start_mark(gc) || space_to_start_mark(gc))
>>>>>> -    return TRUE;
>>>>>> -  else
>>>>>> -    return FALSE;
>>>>>> +#define CON_SWEEPER_DIVISION 0.8
>>>>>> +unsigned int gc_get_sweeper_numer(GC *gc) {
>>>>>> +  unsigned int sweeper_specified = NUM_CON_SWEEPERS;
>>>>>> +  if(sweeper_specified == 0)
>>>>>> +    sweeper_specified = (unsigned int)(gc->num_conclctors*CON_SWEEPER_DIVISION);
>>>>>> +  //INFO2("gc.con.scheduler", "[Sweeper Num] assign sweeper num="<<sweeper_specified);
>>>>>> +  assert(sweeper_specified);
>>>>>> +  return sweeper_specified;
>>>>>>  }
>>>>>>
>>>>>> -static Boolean gc_need_start_con_sweep(GC* gc)
>>>>>> -{
>>>>>> -  if(!gc_is_specify_con_sweep() || gc_sweep_is_concurrent()) return FALSE;
>>>>>>
>>>>>> -  /*if mark is concurrent and STW GC has not started, we should start concurrent sweep*/
>>>>>> -  if(gc_mark_is_concurrent() && !gc_con_is_in_marking(gc))
>>>>>> -    return TRUE;
>>>>>> -  else
>>>>>> -    return FALSE;
>>>>>> -}
>>>>>>
>>>>>> -static Boolean gc_need_reset_after_con_collect(GC* gc)
>>>>>> -{
>>>>>> -  if(gc_sweep_is_concurrent() && !gc_con_is_in_sweeping(gc))
>>>>>> -    return TRUE;
>>>>>> -  else
>>>>>> -    return FALSE;
>>>>>> -}
>>>>>>
>>>>>> -static Boolean gc_need_start_con_enum(GC* gc)
>>>>>> -{
>>>>>> -  /*TODO: support on-the-fly root set enumeration.*/
>>>>>> -  return FALSE;
>>>>>> -}
>>>>>> +#define DEFAULT_CONSERCATIVE_FACTOR (1.0f)
>>>>>> +#define CONSERCATIVE_FACTOR_FULLY_CONCURRENT (0.95f)
>>>>>> +static float conservative_factor = DEFAULT_CONSERCATIVE_FACTOR;
>>>>>>
>>>>>> -#define SPACE_UTIL_RATIO_CORRETION 0.2f
>>>>>> -#define TIME_CORRECTION_OTF_MARK 0.65f
>>>>>> -#define TIME_CORRECTION_OTF_MARK_SWEEP 1.0f
>>>>>> -#define TIME_CORRECTION_MOSTLY_MARK 0.5f
>>>>>> -
>>>>>> -static void con_collection_scheduler_update_stat(GC* gc, int64 time_mutator, int64 time_collection)
>>>>>> -{
>>>>>> -  Space* space = NULL;
>>>>>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>>>>>> -
>>>>>> -#ifdef USE_UNIQUE_MARK_SWEEP_GC
>>>>>> -  space = (Space*) gc_get_wspace(gc);
>>>>>> -#endif
>>>>>> -  if(!space) return;
>>>>>> +/* for checking heap effcient*/
>>>>>> +#define SMALL_DELTA 1000 //minimal check frequency is about delta us
>>>>>> +#define SPACE_CHECK_STAGE_TWO_TIME (SMALL_DELTA<<6)
>>>>>> +#define SPACE_CHECK_STAGE_ONE_TIME (SMALL_DELTA<<12)
>>>>>>
>>>>>> -  Space_Statistics* space_stat = space->space_statistic;
>>>>>> -
>>>>>> -  unsigned int slot_index = cc_scheduler->last_window_index;
>>>>>> -  unsigned int num_slot   = cc_scheduler->num_window_slots;
>>>>>> -
>>>>>> -  cc_scheduler->trace_load_window[slot_index] = space_stat->num_live_obj;
>>>>>> -  cc_scheduler->alloc_load_window[slot_index] = space_stat->size_new_obj;
>>>>>> -  cc_scheduler->space_utilization_ratio[slot_index] = space_stat->space_utilization_ratio;
>>>>>> +#define DEFAULT_ALLOC_RATE (1<<19) //500k/ms
>>>>>> +#define DEFAULT_MARKING_TIME (1<<9) //512 ms
>>>>>>
>>>>>> -  cc_scheduler->last_mutator_time = time_mutator;
>>>>>> -  cc_scheduler->last_collector_time = time_collection;
>>>>>> -
>>>>>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
>>>>>> -    return;
>>>>>> -
>>>>>> -  cc_scheduler->alloc_rate_window[slot_index]
>>>>>> -    = time_mutator == 0 ? 0 : (float)cc_scheduler->alloc_load_window[slot_index] / time_mutator;
>>>>>> +static int64 last_check_time_point = time_now();
>>>>>> +static int64 check_delay_time = time_now(); //  initial value is just for modifying
>>>>>>
>>>>>> -  if(gc_mark_is_concurrent()){
>>>>>> -    cc_scheduler->trace_rate_window[slot_index]
>>>>>> -      = time_collection == 0 ? MAX_TRACING_RATE : (float)cc_scheduler->trace_load_window[slot_index] / time_collection;
>>>>>> -  }else{
>>>>>> -    cc_scheduler->trace_rate_window[slot_index] = MIN_TRACING_RATE;
>>>>>> -  }
>>>>>> -
>>>>>> -  cc_scheduler->num_window_slots = num_slot >= STAT_SAMPLE_WINDOW_SIZE ? num_slot : (++num_slot);
>>>>>> -  cc_scheduler->last_window_index = (++slot_index)% STAT_SAMPLE_WINDOW_SIZE;
>>>>>> +//just debugging
>>>>>> +int64 get_last_check_point()
>>>>>> +{
>>>>>> +   return last_check_time_point;
>>>>>>  }
>>>>>>
>>>>>> -static void con_collection_scheduler_update_start_point(GC* gc, int64 time_mutator, int64 time_collection)
>>>>>> -{
>>>>>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
>>>>>> -    return;
>>>>>> +static unsigned int alloc_space_threshold = 0;
>>>>>>
>>>>>> -  Space* space = NULL;
>>>>>> -#ifdef USE_UNIQUE_MARK_SWEEP_GC
>>>>>> -  space = (Space*) gc_get_wspace(gc);
>>>>>> -#endif
>>>>>> -  if(!space) return;
>>>>>> -
>>>>>> -  Space_Statistics* space_stat = space->space_statistic;
>>>>>> -
>>>>>> -  float sum_alloc_rate = 0;
>>>>>> -  float sum_trace_rate = 0;
>>>>>> -  float sum_space_util_ratio = 0;
>>>>>> +static unsigned int space_check_stage_1; //SPACE_CHECK_EXPECTED_START_TIME
>>>>>> +static unsigned int space_check_stage_2; //BIG_DELTA
>>>>>>
>>>>>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>>>>>> +static unsigned int calculate_start_con_space_threshold(Con_Collection_Statistics *con_collection_stat, unsigned int heap_size)
>>>>>> +{
>>>>>>
>>>>>> -  int64 time_this_collection_correction = 0;
>>>>>> -#if 0
>>>>>> -  float space_util_ratio = space_stat->space_utilization_ratio;
>>>>>> -  if(space_util_ratio > (1-SPACE_UTIL_RATIO_CORRETION)){
>>>>>> -    time_this_collection_correction = 0;
>>>>>> -  }else{
>>>>>> -    time_this_collection_correction
>>>>>> -      = (int64)(((1 - space_util_ratio - SPACE_UTIL_RATIO_CORRETION)/(space_util_ratio))* time_mutator);
>>>>>> -  }
>>>>>> -#endif
>>>>>> -
>>>>>> -  unsigned int i;
>>>>>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>>>>>> -    sum_alloc_rate += cc_scheduler->alloc_rate_window[i];
>>>>>> -    sum_trace_rate += cc_scheduler->trace_rate_window[i];
>>>>>> -    sum_space_util_ratio += cc_scheduler->space_utilization_ratio[i];
>>>>>> -  }
>>>>>> -
>>>>>> -  TRACE2("gc.con.cs","Allocation Rate: ");
>>>>>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>>>>>> -    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->alloc_rate_window[i]);
>>>>>> -  }
>>>>>> -
>>>>>> -  TRACE2("gc.con.cs","Tracing Rate: ");
>>>>>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>>>>>> -    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->trace_rate_window[i]);
>>>>>> -  }
>>>>>> -
>>>>>> -  float average_alloc_rate = sum_alloc_rate / cc_scheduler->num_window_slots;
>>>>>> -  float average_trace_rate = sum_trace_rate / cc_scheduler->num_window_slots;
>>>>>> -  float average_space_util_ratio = sum_space_util_ratio / cc_scheduler->num_window_slots;
>>>>>> -
>>>>>> -  TRACE2("gc.con.cs","averAllocRate: "<<average_alloc_rate<<"averTraceRate: "<<average_trace_rate<<"  average_space_util_ratio: "<<average_space_util_ratio<<" ");
>>>>>> -
>>>>>> -  if(average_alloc_rate == 0 ){
>>>>>> -    time_delay_to_start_mark = MIN_DELAY_TIME;
>>>>>> -    space_threshold_to_start_mark = MIN_SPACE_THRESHOLD;
>>>>>> -  }else if(average_trace_rate == 0){
>>>>>> -    time_delay_to_start_mark = MAX_DELAY_TIME;
>>>>>> -    space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
>>>>>> -  }else{
>>>>>> -    float time_alloc_expected = (space_stat->size_free_space * average_space_util_ratio) / average_alloc_rate;
>>>>>> -    float time_trace_expected = space_stat->num_live_obj / average_trace_rate;
>>>>>> -    TRACE2("gc.con.cs","[GC][Con] expected alloc time "<<time_alloc_expected<<"  expected collect time  "<<time_trace_expected<<" ");
>>>>>> -
>>>>>> -    if(time_alloc_expected > time_trace_expected){
>>>>>> -      if(gc_is_kind(ALGO_CON_OTF_OBJ)||gc_is_kind(ALGO_CON_OTF_REF)){
>>>>>> -        float time_correction = gc_sweep_is_concurrent()? TIME_CORRECTION_OTF_MARK_SWEEP : TIME_CORRECTION_OTF_MARK;
>>>>>> -        cc_scheduler->time_delay_to_start_mark = (int64)((time_alloc_expected - time_trace_expected)*time_correction);
>>>>>> -      }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>>>> -        cc_scheduler->time_delay_to_start_mark = (int64)(time_mutator* TIME_CORRECTION_MOSTLY_MARK);
>>>>>> -      }
>>>>>> -    }else{
>>>>>> -      cc_scheduler->time_delay_to_start_mark = MIN_DELAY_TIME;
>>>>>> +  float util_rate = con_collection_stat->heap_utilization_rate;
>>>>>> +  unsigned int space_threshold = 0;
>>>>>> +  if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>>>>>> +    if( con_collection_stat->trace_rate == 0 )  //for initial iteration
>>>>>> +         con_collection_stat->trace_rate = con_collection_stat->alloc_rate*20;
>>>>>> +    unsigned int alloc_rate = con_collection_stat->alloc_rate;
>>>>>> +    if(alloc_rate<con_collection_stat->trace_rate) {       //  THRESHOLD = Heap*utilization_rate*(1-alloc_rate/marking_rate), accurate formaler
>>>>>> +      float alloc_marking_rate_ratio = (float)(alloc_rate)/con_collection_stat->trace_rate;
>>>>>> +
>>>>>> +      space_threshold = (unsigned int)(heap_size*util_rate*(1-alloc_marking_rate_ratio)*conservative_factor);
>>>>>> +    } else {  //use default
>>>>>> +       unsigned int alloc_while_marking = DEFAULT_MARKING_TIME*con_collection_stat->alloc_rate;
>>>>>> +       space_threshold = (unsigned int)(heap_size*util_rate) -alloc_while_marking;
>>>>>>     }
>>>>>> -
>>>>>> -    cc_scheduler->space_threshold_to_start_mark =
>>>>>> -      (POINTER_SIZE_INT)(space_stat->size_free_space * ((time_alloc_expected - time_trace_expected) / time_alloc_expected));
>>>>>> -
>>>>>> -    time_delay_to_start_mark = cc_scheduler->time_delay_to_start_mark + time_this_collection_correction;
>>>>>> -    space_threshold_to_start_mark = cc_scheduler->space_threshold_to_start_mark;
>>>>>> +  } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>>>>>> +    unsigned int alloc_while_marking = DEFAULT_MARKING_TIME*con_collection_stat->alloc_rate;
>>>>>> +    space_threshold = (unsigned int)(heap_size*util_rate) -alloc_while_marking;
>>>>>>   }
>>>>>> -  TRACE2("gc.con.cs","[GC][Con] concurrent marking will delay "<<(unsigned int)(time_delay_to_start_mark>>10)<<" ms ");
>>>>>> -  TRACE2("gc.con.cs","[GC][Con] time correction "<<(unsigned int)(time_this_collection_correction>>10)<<" ms ");
>>>>>>
>>>>>> +  if( space_threshold > con_collection_stat->surviving_size_at_gc_end )
>>>>>> +    alloc_space_threshold = space_threshold - con_collection_stat->surviving_size_at_gc_end;
>>>>>> +  else
>>>>>> +    alloc_space_threshold = MIN_SPACE_THRESHOLD;
>>>>>> +
>>>>>> +  //INFO2("gc.con.info", "[Threshold] alloc_space_threshold=" << alloc_space_threshold);
>>>>>> +  return space_threshold;
>>>>>>  }
>>>>>>
>>>>>> -void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
>>>>>> -{
>>>>>> -  assert(gc_is_specify_con_gc());
>>>>>> -  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) return;
>>>>>> -
>>>>>> -  con_collection_scheduler_update_stat(gc, time_mutator, time_collection);
>>>>>> -  con_collection_scheduler_update_start_point(gc, time_mutator, time_collection);
>>>>>> -
>>>>>> -  return;
>>>>>> -}
>>>>>> -
>>>>>> -Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause)
>>>>>> +/* this parameters are updated at end of GC */
>>>>>> +void gc_update_scheduler_parameter( GC *gc )
>>>>>>  {
>>>>>> -  if(!try_lock(gc->lock_collect_sched)) return FALSE;
>>>>>> -  vm_gc_lock_enum();
>>>>>> -
>>>>>> -  gc_try_finish_con_phase(gc);
>>>>>> -
>>>>>> -  if(gc_need_start_con_enum(gc)){
>>>>>> -    /*TODO:Concurrent rootset enumeration.*/
>>>>>> -    assert(0);
>>>>>> -  }
>>>>>> -
>>>>>> -  if(gc_need_start_con_mark(gc)){
>>>>>> -    INFO2("gc.con.info", "[GC][Con] concurrent mark start ...");
>>>>>> -    gc_start_con_mark(gc);
>>>>>> -    vm_gc_unlock_enum();
>>>>>> -    unlock(gc->lock_collect_sched);
>>>>>> -    return TRUE;
>>>>>> -  }
>>>>>> -
>>>>>> -  if(gc_need_start_con_sweep(gc)){
>>>>>> -    gc->num_collections++;
>>>>>> -    INFO2("gc.con.info", "[GC][Con] collection number:"<< gc->num_collections<<" ");
>>>>>> -    gc_start_con_sweep(gc);
>>>>>> -    vm_gc_unlock_enum();
>>>>>> -    unlock(gc->lock_collect_sched);
>>>>>> -    return TRUE;
>>>>>> -  }
>>>>>> -
>>>>>> -  if(gc_need_reset_after_con_collect(gc)){
>>>>>> -    int64 pause_start = time_now();
>>>>>> -    int disable_count = vm_suspend_all_threads();
>>>>>> -    gc_reset_after_con_collect(gc);
>>>>>> -    gc_start_mutator_time_measure(gc);
>>>>>> -    set_collection_end_time();
>>>>>> -    vm_resume_all_threads(disable_count);
>>>>>> -    vm_gc_unlock_enum();
>>>>>> -    INFO2("gc.con.time","[GC][Con]pause(reset collection):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
>>>>>> -    unlock(gc->lock_collect_sched);
>>>>>> -    return TRUE;
>>>>>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +   last_check_time_point = time_now();
>>>>>> +
>>>>>> +   unsigned int alloc_rate = con_collection_stat->alloc_rate;
>>>>>> +   space_check_stage_1 = alloc_rate * trans_time_unit(SPACE_CHECK_STAGE_ONE_TIME);
>>>>>> +   space_check_stage_2 = alloc_rate * trans_time_unit(SPACE_CHECK_STAGE_TWO_TIME);
>>>>>> +   //INFO2( "gc.con.scheduler", "space_check_stage_1=["<<space_check_stage_1<<"], space_check_stage_2=["<<space_check_stage_2<<"]" );
>>>>>> +
>>>>>> +   check_delay_time = (con_collection_stat->gc_start_time - con_collection_stat->gc_end_time)>>2;
>>>>>> +   //INFO2("gc.con.scheduler", "next check time = [" << trans_time_unit(check_delay_time) << "] ms" );
>>>>>> +   if(gc_is_specify_con_sweep()) {
>>>>>> +         conservative_factor = CONSERCATIVE_FACTOR_FULLY_CONCURRENT;
>>>>>> +   }
>>>>>> +   calculate_start_con_space_threshold(con_collection_stat, gc->committed_heap_size);
>>>>>> +}
>>>>>> +
>>>>>> +void gc_force_update_scheduler_parameter( GC *gc )
>>>>>> +{
>>>>>> +    last_check_time_point = time_now();
>>>>>> +    //check_delay_time = SPACE_CHECK_STAGE_ONE_TIME;
>>>>>> +    check_delay_time = time_now();
>>>>>> +    //INFO2("gc.con.scheduler", "next check time = [" << trans_time_unit(check_delay_time) << "] ms" );
>>>>>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +    con_collection_stat->alloc_rate = DEFAULT_ALLOC_RATE;
>>>>>> +}
>>>>>> +
>>>>>> +
>>>>>> +
>>>>>> +static inline Boolean check_start_mark( GC *gc )
>>>>>> +{
>>>>>> +   unsigned int new_object_occupied_size = gc_get_mutator_new_obj_size(gc);
>>>>>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +   /*just debugging*/
>>>>>> +   float used_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_object_occupied_size)/gc->committed_heap_size;
>>>>>> +   if( alloc_space_threshold < new_object_occupied_size ) {
>>>>>> +       INFO2( "gc.con.info", "[Start Con] check has been delayed " << check_delay_time << " us, until ratio at start point="<<used_rate );
>>>>>> +       return TRUE;
>>>>>> +   }
>>>>>> +
>>>>>> +   unsigned int free_space = alloc_space_threshold - new_object_occupied_size;
>>>>>> +     //INFO2("gc.con.info", "[GC Scheduler debug] alloc_space_threshold="<<alloc_space_threshold<<", new_object_occupied_size"<<new_object_occupied_size);
>>>>>> +   int64 last_check_delay = check_delay_time;
>>>>>> +
>>>>>> +   if( free_space < space_check_stage_2 ) {
>>>>>> +       check_delay_time = SMALL_DELTA;
>>>>>> +   } else if( free_space < space_check_stage_1 ) {
>>>>>> +       if(check_delay_time>SPACE_CHECK_STAGE_TWO_TIME ) { //if time interval is too small, the alloc rate will not be updated
>>>>>> +           unsigned int interval_time = trans_time_unit(time_now() - con_collection_stat->gc_end_time);
>>>>>> +           unsigned int interval_space = new_object_occupied_size;
>>>>>> +           con_collection_stat->alloc_rate = interval_space/interval_time;
>>>>>> +       }
>>>>>> +       check_delay_time = ((alloc_space_threshold - new_object_occupied_size)/con_collection_stat->alloc_rate)<<9;
>>>>>> +   }
>>>>>> +   last_check_time_point = time_now();
>>>>>> +
>>>>>> +   //INFO2("gc.con.info", "[GC Scheduler] check has been delayed=" << last_check_delay << " us, used_rate=" << used_rate << ", free_space=" << free_space << " bytes, next delay=" << check_delay_time << " us" );
>>>>>> +   return FALSE;
>>>>>> +}
>>>>>> +
>>>>>> +static SpinLock check_lock;
>>>>>> +static inline Boolean space_should_start_mark( GC *gc)
>>>>>> +{
>>>>>> +  if( ( time_now() -last_check_time_point ) > check_delay_time && try_lock(check_lock) ) { //first condition is checked frequently, second condition is for synchronization
>>>>>> +      Boolean should_start = check_start_mark(gc);
>>>>>> +      unlock(check_lock);
>>>>>> +      return should_start;
>>>>>>   }
>>>>>> -  vm_gc_unlock_enum();
>>>>>> -  unlock(gc->lock_collect_sched);
>>>>>>   return FALSE;
>>>>>>  }
>>>>>>
>>>>>> -extern unsigned int NUM_MARKERS;
>>>>>> -
>>>>>> -unsigned int gc_decide_marker_number(GC* gc)
>>>>>> -{
>>>>>> -  unsigned int num_active_marker;
>>>>>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>>>>>> +inline static Boolean gc_con_start_condition( GC* gc ) {
>>>>>> +   return space_should_start_mark(gc);
>>>>>> +}
>>>>>>
>>>>>> -  /*If the number of markers is specfied, just return the specified value.*/
>>>>>> -  if(NUM_MARKERS != 0) return NUM_MARKERS;
>>>>>>
>>>>>> -  /*If the number of markers isn't specified, we decide the value dynamically.*/
>>>>>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION){
>>>>>> -    /*Start trial cycle, collection set to 1 in trial cycle and */
>>>>>> -    num_active_marker = 1;
>>>>>> -  }else{
>>>>>> -    num_active_marker = cc_scheduler->last_marker_num;
>>>>>> -    int64 c_time = cc_scheduler->last_collector_time;
>>>>>> -    int64 m_time = cc_scheduler->last_mutator_time;
>>>>>> -    int64 d_time = cc_scheduler->time_delay_to_start_mark;
>>>>>> -
>>>>>> -    if(num_active_marker == 0) num_active_marker = 1;
>>>>>> -
>>>>>> -    if((c_time + d_time) > m_time || (float)d_time < (m_time * 0.25)){
>>>>>> -      TRACE2("gc.con.cs","[GC][Con] increase marker number.");
>>>>>> -      num_active_marker ++;
>>>>>> -      if(num_active_marker > gc->num_markers) num_active_marker = gc->num_markers;
>>>>>> -    }else if((float)d_time > (m_time * 0.6)){
>>>>>> -      TRACE2("gc.con.cs","[GC][Con] decrease marker number.");
>>>>>> -      num_active_marker --;
>>>>>> -      if(num_active_marker == 0)  num_active_marker = 1;
>>>>>> -    }
>>>>>> -
>>>>>> -    TRACE2("gc.con.cs","[GC][Con] ctime  "<<(unsigned)(c_time>>10)<<"  mtime  "<<(unsigned)(m_time>>10)<<"  dtime  "<<(unsigned)(d_time>>10));
>>>>>> -    TRACE2("gc.con.cs","[GC][Con] marker num : "<<num_active_marker<<" ");
>>>>>> +void gc_reset_after_con_collection(GC *gc);
>>>>>> +void gc_merge_free_list_global(GC *gc);
>>>>>> +void gc_con_stat_information_out(GC *gc);
>>>>>> +
>>>>>> +unsigned int sub_time = 0;
>>>>>> +int64 pause_time = 0;
>>>>>> +/*
>>>>>> +   concurrent collection entry function, it may start proper phase according to the current state.
>>>>>> +*/
>>>>>> +Boolean gc_con_perform_collection( GC* gc ) {
>>>>>> +  int disable_count;
>>>>>> +  int64 pause_start;
>>>>>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +  switch( gc->gc_concurrent_status ) {
>>>>>> +    case GC_CON_NIL :
>>>>>> +      if( !gc_con_start_condition(gc) )
>>>>>> +        return FALSE;
>>>>>> +      if( !state_transformation( gc, GC_CON_NIL, GC_CON_STW_ENUM ) )
>>>>>> +        return FALSE;
>>>>>> +
>>>>>> +      gc->num_collections++;
>>>>>> +      gc->cause = GC_CAUSE_CONCURRENT_GC;
>>>>>> +
>>>>>> +      con_collection_stat->gc_start_time = time_now();
>>>>>> +      disable_count = hythread_reset_suspend_disable();
>>>>>> +
>>>>>> +      gc_start_con_enumeration(gc); //now, it is a stw enumeration
>>>>>> +      con_collection_stat->marking_start_time = time_now();
>>>>>> +      state_transformation( gc, GC_CON_STW_ENUM, GC_CON_START_MARKERS );
>>>>>> +      gc_start_con_marking(gc);
>>>>>> +
>>>>>> +      INFO2("gc.con.time","[ER] start con pause, ERSM="<<((unsigned int)(time_now()-con_collection_stat->gc_start_time))<<"  us "); // ERSM means enumerate rootset and start concurrent marking
>>>>>> +      vm_resume_threads_after();
>>>>>> +      hythread_set_suspend_disable(disable_count);
>>>>>> +      break;
>>>>>> +
>>>>>> +    case GC_CON_BEFORE_SWEEP :
>>>>>> +      if(!gc_is_specify_con_sweep())
>>>>>> +         return FALSE;
>>>>>> +      if( !state_transformation( gc, GC_CON_BEFORE_SWEEP, GC_CON_SWEEPING ) )
>>>>>> +         return FALSE;
>>>>>> +      gc_ms_start_con_sweep((GC_MS*)gc, gc_get_sweeper_numer(gc));
>>>>>> +      break;
>>>>>> +
>>>>>> +
>>>>>> +    case GC_CON_BEFORE_FINISH :
>>>>>> +        if( !state_transformation( gc, GC_CON_BEFORE_FINISH, GC_CON_RESET ) )
>>>>>> +                 return FALSE;
>>>>>> +        /* thread should be suspended before the state transformation,
>>>>>> +            it is for the case that the heap is exhausted in the reset state, although it is almost impossible */
>>>>>> +        disable_count = vm_suspend_all_threads();
>>>>>> +        pause_start = time_now();
>>>>>> +
>>>>>> +        gc_merge_free_list_global(gc);
>>>>>> +        gc_reset_after_con_collection(gc);
>>>>>> +        state_transformation( gc, GC_CON_RESET, GC_CON_NIL );
>>>>>> +        pause_time = time_now()-pause_start;
>>>>>> +
>>>>>> +        vm_resume_all_threads(disable_count);
>>>>>> +        gc_con_stat_information_out(gc);
>>>>>> +        INFO2("gc.con.time","[GC][Con]pause(reset collection):  CRST="<<pause_time<<"  us\n\n"); // CRST means concurrent reset
>>>>>> +        break;
>>>>>> +    default :
>>>>>> +      return FALSE;
>>>>>>   }
>>>>>> -
>>>>>> -  cc_scheduler->last_marker_num = num_active_marker;
>>>>>> -  return num_active_marker;
>>>>>> +  return TRUE;
>>>>>>  }
>>>>>>
>>>>>> +
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h Tue Oct 28 20:01:01 2008
>>>>>> @@ -20,6 +20,7 @@
>>>>>>
>>>>>>  #define STAT_SAMPLE_WINDOW_SIZE 5
>>>>>>
>>>>>> +struct GC_MS;
>>>>>>  typedef struct Con_Collection_Scheduler {
>>>>>>   /*common field*/
>>>>>>   GC* gc;
>>>>>> @@ -46,10 +47,17 @@
>>>>>>  void con_collection_scheduler_initialize(GC* gc);
>>>>>>  void con_collection_scheduler_destruct(GC* gc);
>>>>>>
>>>>>> +void gc_update_scheduler_parameter( GC *gc );
>>>>>> +void gc_force_update_scheduler_parameter( GC *gc );
>>>>>> +Boolean gc_con_perform_collection( GC* gc );
>>>>>>  Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause);
>>>>>> -void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
>>>>>>
>>>>>>  void gc_decide_cc_scheduler_kind(char* cc_scheduler);
>>>>>>  void gc_set_default_cc_scheduler_kind();
>>>>>> +
>>>>>> +extern unsigned int mostly_con_final_marker_num;
>>>>>> +extern unsigned int mostly_con_long_marker_num;
>>>>>> +
>>>>>>  #endif
>>>>>>
>>>>>> +
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Tue Oct 28 20:01:01 2008
>>>>>> @@ -22,7 +22,7 @@
>>>>>>  #include "gc_common.h"
>>>>>>  #include "gc_metadata.h"
>>>>>>  #include "../thread/mutator.h"
>>>>>> -#include "../thread/marker.h"
>>>>>> +#include "../thread/conclctor.h"
>>>>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>>>>  #include "../gen/gen.h"
>>>>>>  #include "../mark_sweep/gc_ms.h"
>>>>>> @@ -74,11 +74,19 @@
>>>>>>  static int64 collection_start_time = time_now();
>>>>>>  static int64 collection_end_time = time_now();
>>>>>>
>>>>>> -int64 get_collection_end_time()
>>>>>> +int64 get_gc_start_time()
>>>>>> +{ return collection_start_time; }
>>>>>> +
>>>>>> +void set_gc_start_time()
>>>>>> +{ collection_start_time = time_now(); }
>>>>>> +
>>>>>> +int64 get_gc_end_time()
>>>>>>  { return collection_end_time; }
>>>>>>
>>>>>> -void set_collection_end_time()
>>>>>> -{ collection_end_time = time_now(); }
>>>>>> +void set_gc_end_time()
>>>>>> +{
>>>>>> +  collection_end_time = time_now();
>>>>>> +}
>>>>>>
>>>>>>  void gc_decide_collection_kind(GC* gc, unsigned int cause)
>>>>>>  {
>>>>>> @@ -93,17 +101,17 @@
>>>>>>
>>>>>>  }
>>>>>>
>>>>>> -void gc_update_space_stat(GC_MS* gc)
>>>>>> +void gc_update_space_stat(GC* gc)
>>>>>>  {
>>>>>>  #ifdef USE_UNIQUE_MARK_SWEEP_GC
>>>>>> -    gc_ms_update_space_stat((GC_MS*)gc);
>>>>>> +      gc_ms_update_space_stat((GC_MS *)gc);
>>>>>>  #endif
>>>>>>  }
>>>>>>
>>>>>> -void gc_reset_space_stat(GC_MS* gc)
>>>>>> +void gc_reset_space_stat(GC* gc)
>>>>>>  {
>>>>>>  #ifdef USE_UNIQUE_MARK_SWEEP_GC
>>>>>> -    gc_ms_reset_space_stat((GC_MS*)gc);
>>>>>> +      gc_ms_reset_space_stat((GC_MS *)gc);
>>>>>>  #endif
>>>>>>  }
>>>>>>
>>>>>> @@ -118,7 +126,7 @@
>>>>>>   gc_set_rootset(gc);
>>>>>>  }
>>>>>>
>>>>>> -void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection)
>>>>>> +void gc_reset_after_collection(GC* gc)
>>>>>>  {
>>>>>>   if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
>>>>>>
>>>>>> @@ -139,11 +147,9 @@
>>>>>>  #endif
>>>>>>   }
>>>>>>
>>>>>> -  gc_update_space_stat((GC_MS*)gc);
>>>>>> +  gc_update_space_stat(gc);
>>>>>>
>>>>>> -  gc_update_collection_scheduler(gc, time_mutator, time_collection);
>>>>>> -
>>>>>> -  gc_reset_space_stat((GC_MS*)gc);
>>>>>> +  gc_reset_space_stat(gc);
>>>>>>
>>>>>>   gc_reset_collector_state(gc);
>>>>>>
>>>>>> @@ -154,23 +160,25 @@
>>>>>>
>>>>>>  }
>>>>>>
>>>>>> +void set_check_delay( int64 mutator_time );
>>>>>> +
>>>>>>  void gc_reclaim_heap(GC* gc, unsigned int gc_cause)
>>>>>>  {
>>>>>>   INFO2("gc.process", "\nGC: GC start ...\n");
>>>>>>
>>>>>> -  collection_start_time = time_now();
>>>>>> -  int64 time_mutator = collection_start_time - collection_end_time;
>>>>>> -
>>>>>> -  gc->num_collections++;
>>>>>>   gc->cause = gc_cause;
>>>>>>
>>>>>>   if(gc_is_specify_con_gc()){
>>>>>> -    gc_finish_con_GC(gc, time_mutator);
>>>>>> -    collection_end_time = time_now();
>>>>>> +    gc_wait_con_finish(gc);
>>>>>>     INFO2("gc.process", "GC: GC end\n");
>>>>>>     return;
>>>>>>   }
>>>>>>
>>>>>> +   set_gc_start_time();
>>>>>> +  int64 time_mutator = get_gc_start_time() - get_gc_end_time();
>>>>>> +
>>>>>> +  gc->num_collections++;
>>>>>> +
>>>>>>   /* FIXME:: before mutators suspended, the ops below should be very careful
>>>>>>      to avoid racing with mutators. */
>>>>>>
>>>>>> @@ -207,16 +215,16 @@
>>>>>>   gc_gen_reclaim_heap((GC_Gen*)gc, collection_start_time);
>>>>>>  #endif
>>>>>>
>>>>>> -  collection_end_time = time_now();
>>>>>> +  set_gc_end_time();
>>>>>>
>>>>>> -  int64 time_collection = collection_end_time - collection_start_time;
>>>>>> +  int64 time_collection = get_gc_end_time() - get_gc_start_time();
>>>>>>
>>>>>>  #if !defined(USE_UNIQUE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC)
>>>>>>   gc_gen_collection_verbose_info((GC_Gen*)gc, time_collection, time_mutator);
>>>>>>   gc_gen_space_verbose_info((GC_Gen*)gc);
>>>>>>  #endif
>>>>>>
>>>>>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>>>>>> +  gc_reset_after_collection(gc);
>>>>>>
>>>>>>   gc_assign_free_area_to_mutators(gc);
>>>>>>
>>>>>> @@ -230,6 +238,3 @@
>>>>>>
>>>>>>
>>>>>>
>>>>>> -
>>>>>> -
>>>>>> -
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Tue Oct 28 20:01:01 2008
>>>>>> @@ -39,7 +39,8 @@
>>>>>>
>>>>>>  #include "../common/gc_for_barrier.h"
>>>>>>
>>>>>> -/*
>>>>>> +
>>>>>> + /*
>>>>>>  #define USE_UNIQUE_MARK_SWEEP_GC  //define it to only use Mark-Sweep GC (no NOS, no LOS).
>>>>>>  #define USE_UNIQUE_MOVE_COMPACT_GC //define it to only use Move-Compact GC (no NOS, no LOS).
>>>>>>  */
>>>>>> @@ -336,19 +337,7 @@
>>>>>>   return TRUE;
>>>>>>  }
>>>>>>
>>>>>> -extern volatile Boolean obj_alloced_live;
>>>>>> -inline Boolean is_obj_alloced_live()
>>>>>> -{ return obj_alloced_live;  }
>>>>>>
>>>>>> -inline void gc_enable_alloc_obj_live()
>>>>>> -{
>>>>>> -  obj_alloced_live = TRUE;
>>>>>> -}
>>>>>> -
>>>>>> -inline void gc_disable_alloc_obj_live()
>>>>>> -{
>>>>>> -  obj_alloced_live = FALSE;
>>>>>> -}
>>>>>>
>>>>>>  /***************************************************************/
>>>>>>
>>>>>> @@ -391,7 +380,7 @@
>>>>>>  /***************************************************************/
>>>>>>
>>>>>>  /* all GCs inherit this GC structure */
>>>>>> -struct Marker;
>>>>>> +struct Conclctor;
>>>>>>  struct Mutator;
>>>>>>  struct Collector;
>>>>>>  struct GC_Metadata;
>>>>>> @@ -421,9 +410,12 @@
>>>>>>   unsigned int num_collectors;
>>>>>>   unsigned int num_active_collectors; /* not all collectors are working */
>>>>>>
>>>>>> -  Marker** markers;
>>>>>> -  unsigned int num_markers;
>>>>>> +  /*concurrent markers and collectors*/
>>>>>> +  Conclctor** conclctors;
>>>>>> +  unsigned int num_conclctors;
>>>>>> +  //unsigned int num_active_conclctors;
>>>>>>   unsigned int num_active_markers;
>>>>>> +  unsigned int num_active_sweepers;
>>>>>>
>>>>>>   /* metadata is the pool for rootset, tracestack, etc. */
>>>>>>   GC_Metadata* metadata;
>>>>>> @@ -443,7 +435,7 @@
>>>>>>
>>>>>>   Space_Tuner* tuner;
>>>>>>
>>>>>> -  unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
>>>>>> +  volatile unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
>>>>>>   Collection_Scheduler* collection_scheduler;
>>>>>>
>>>>>>   SpinLock lock_con_mark;
>>>>>> @@ -488,11 +480,15 @@
>>>>>>
>>>>>>  GC* gc_parse_options();
>>>>>>  void gc_reclaim_heap(GC* gc, unsigned int gc_cause);
>>>>>> +void gc_relaim_heap_con_mode( GC *gc);
>>>>>>  void gc_prepare_rootset(GC* gc);
>>>>>>
>>>>>>
>>>>>> -int64 get_collection_end_time();
>>>>>> -void set_collection_end_time();
>>>>>> +int64 get_gc_start_time();
>>>>>> +void set_gc_start_time();
>>>>>> +
>>>>>> +int64 get_gc_end_time();
>>>>>> +void set_gc_end_time();
>>>>>>
>>>>>>  /* generational GC related */
>>>>>>
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp Tue Oct 28 20:01:01 2008
>>>>>> @@ -17,325 +17,582 @@
>>>>>>  #include "gc_common.h"
>>>>>>  #include "gc_metadata.h"
>>>>>>  #include "../thread/mutator.h"
>>>>>> -#include "../thread/marker.h"
>>>>>> +#include "../thread/conclctor.h"
>>>>>>  #include "../thread/collector.h"
>>>>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>>>>  #include "../gen/gen.h"
>>>>>>  #include "../mark_sweep/gc_ms.h"
>>>>>> +#include "../mark_sweep/wspace_mark_sweep.h"
>>>>>>  #include "interior_pointer.h"
>>>>>>  #include "collection_scheduler.h"
>>>>>>  #include "gc_concurrent.h"
>>>>>>  #include "../common/gc_for_barrier.h"
>>>>>> +#include "concurrent_collection_scheduler.h"
>>>>>> +#include "../verify/verify_live_heap.h"
>>>>>>
>>>>>> -volatile Boolean concurrent_in_marking  = FALSE;
>>>>>> -volatile Boolean concurrent_in_sweeping = FALSE;
>>>>>> -volatile Boolean mark_is_concurrent     = FALSE;
>>>>>> -volatile Boolean sweep_is_concurrent    = FALSE;
>>>>>> +struct Con_Collection_Statistics;
>>>>>>
>>>>>>  volatile Boolean gc_sweep_global_normal_chunk = FALSE;
>>>>>>
>>>>>> -static void gc_check_con_mark(GC* gc)
>>>>>> +//just debugging
>>>>>> +inline void gc_ms_get_current_heap_usage(GC_MS *gc)
>>>>>>  {
>>>>>> -  if(!is_mark_finished(gc)){
>>>>>> -    lock(gc->lock_con_mark);
>>>>>> -    if(gc_is_kind(ALGO_CON_OTF_OBJ)){
>>>>>> -      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>>>>>> -    }else if(gc_is_kind(ALGO_CON_OTF_REF)){
>>>>>> -      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>>>>>> -    }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>>>> -      //ignore.
>>>>>> -    }
>>>>>> -    unlock(gc->lock_con_mark);
>>>>>> -  }
>>>>>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat(gc);
>>>>>> +  unsigned int new_obj_size = gc_get_mutator_new_obj_size((GC *)gc);
>>>>>> +  unsigned int current_size = con_collection_stat->surviving_size_at_gc_end + new_obj_size;
>>>>>> +  INFO2("gc.con.scheduler", "[Heap Usage]surviving_size("<<con_collection_stat->surviving_size_at_gc_end<<")+new_obj_size("<<new_obj_size << ")="<<current_size<<" bytes");
>>>>>> +  INFO2("gc.con.scheduler", "[Heap Usage]usage rate ("<< (float)current_size/gc->committed_heap_size<<")");
>>>>>>  }
>>>>>>
>>>>>> -static void gc_wait_con_mark_finish(GC* gc)
>>>>>> +void gc_con_update_stat_before_enable_alloc_live(GC *gc)
>>>>>>  {
>>>>>> -  wait_mark_finish(gc);
>>>>>> -  gc_set_barrier_function(WB_REM_NIL);
>>>>>> -  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
>>>>>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS *)gc);
>>>>>> +  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
>>>>>>  }
>>>>>> +
>>>>>> +volatile Boolean obj_alloced_live;
>>>>>>
>>>>>> -unsigned int gc_decide_marker_number(GC* gc);
>>>>>> +void gc_enable_alloc_obj_live(GC *gc)
>>>>>> +{
>>>>>> +  gc_con_update_stat_before_enable_alloc_live(gc);
>>>>>> +  obj_alloced_live = TRUE;
>>>>>> +}
>>>>>>
>>>>>> -void gc_start_con_mark(GC* gc)
>>>>>> +void gc_mostly_con_update_stat_after_final_marking(GC *gc)
>>>>>>  {
>>>>>> -  int disable_count;
>>>>>> -  unsigned int num_marker;
>>>>>> -
>>>>>> -  if(!try_lock(gc->lock_con_mark) || gc_mark_is_concurrent()) return;
>>>>>> -
>>>>>> -  lock(gc->lock_enum);
>>>>>> -  disable_count = hythread_reset_suspend_disable();
>>>>>> -  int64 pause_start = time_now();
>>>>>> -  gc_set_rootset_type(ROOTSET_IS_OBJ);
>>>>>> -  gc_prepare_rootset(gc);
>>>>>> -
>>>>>> -  gc_set_concurrent_status(gc, GC_CON_MARK_PHASE);
>>>>>> +  POINTER_SIZE_INT num_live_obj = 0;
>>>>>> +  POINTER_SIZE_INT size_live_obj = 0;
>>>>>> +  POINTER_SIZE_INT num_dirty_obj_traced = 0;
>>>>>>
>>>>>> -  num_marker = gc_decide_marker_number(gc);
>>>>>> -
>>>>>> -  /*start concurrent mark*/
>>>>>> -  if(gc_is_kind(ALGO_CON_OTF_OBJ)){
>>>>>> -    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
>>>>>> -    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>>>> -  }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>>>> -    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
>>>>>> -    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
>>>>>> -  }else if(gc_is_kind(ALGO_CON_OTF_REF)){
>>>>>> -    gc_set_barrier_function(WB_REM_OLD_VAR);
>>>>>> -    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>>>> +  unsigned int num_conclctors = gc->num_conclctors;
>>>>>> +  for( unsigned int i=0; i<num_conclctors; i++ ) {
>>>>>> +    Conclctor* conclctor = gc->conclctors[i];
>>>>>> +    if( conclctor->role != CONCLCTOR_ROLE_MARKER )
>>>>>> +      continue;
>>>>>> +    num_live_obj += conclctor->live_obj_num;
>>>>>> +    size_live_obj += conclctor->live_obj_size;
>>>>>> +    num_dirty_obj_traced += conclctor->num_dirty_slots_traced;
>>>>>> +    conclctor->live_obj_num = 0;
>>>>>> +    conclctor->live_obj_size = 0;
>>>>>> +    conclctor->num_dirty_slots_traced = 0;
>>>>>>   }
>>>>>>
>>>>>> -  unlock(gc->lock_enum);
>>>>>> -  INFO2("gc.con.time","[GC][Con]pause(enumeration root):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
>>>>>> -  vm_resume_threads_after();
>>>>>> -  assert(hythread_is_suspend_enabled());
>>>>>> -  hythread_set_suspend_disable(disable_count);
>>>>>> -
>>>>>> -  unlock(gc->lock_con_mark);
>>>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +  con_collection_stat->live_size_marked += size_live_obj;
>>>>>> +  INFO2("gc.con.scheduler", "[Final Mark Finish] live_marked_size:      "<<con_collection_stat->live_size_marked<<" bytes");
>>>>>> +
>>>>>>  }
>>>>>>
>>>>>> -void mostly_con_mark_terminate_reset();
>>>>>> -void terminate_mostly_con_mark();
>>>>>> -
>>>>>> -void gc_finish_con_mark(GC* gc, Boolean need_STW)
>>>>>> +unsigned int gc_get_conclcor_num(GC* gc, unsigned int req_role);
>>>>>> +//called by the marker when it finishes
>>>>>> +void gc_con_update_stat_after_marking(GC *gc)
>>>>>>  {
>>>>>> -  gc_check_con_mark(gc);
>>>>>> -
>>>>>> -  if(gc_is_kind(ALGO_CON_MOSTLY))
>>>>>> -    terminate_mostly_con_mark();
>>>>>> -
>>>>>> -  gc_wait_con_mark_finish(gc);
>>>>>> +  POINTER_SIZE_INT num_live_obj = 0;
>>>>>> +  POINTER_SIZE_INT size_live_obj = 0;
>>>>>> +  POINTER_SIZE_INT num_dirty_obj_traced = 0;
>>>>>>
>>>>>> -  int disable_count;
>>>>>> -  if(need_STW){
>>>>>> -    /*suspend the mutators.*/
>>>>>> -    lock(gc->lock_enum);
>>>>>> -    if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>>>> -      /*In mostly concurrent algorithm, there's a final marking pause.
>>>>>> -            Prepare root set for final marking.*/
>>>>>> -      disable_count = hythread_reset_suspend_disable();
>>>>>> -      gc_set_rootset_type(ROOTSET_IS_OBJ);
>>>>>> -      gc_prepare_rootset(gc);
>>>>>> -    }else{
>>>>>> -      disable_count = vm_suspend_all_threads();
>>>>>> -    }
>>>>>> +  unsigned int num_conclctors = gc->num_conclctors;
>>>>>> +  for( unsigned int i=0; i<num_conclctors; i++ ) {
>>>>>> +    Conclctor* conclctor = gc->conclctors[i];
>>>>>> +    if( conclctor->role != CONCLCTOR_ROLE_MARKER )
>>>>>> +      continue;
>>>>>> +    num_live_obj += conclctor->live_obj_num;
>>>>>> +    size_live_obj += conclctor->live_obj_size;
>>>>>> +    num_dirty_obj_traced += conclctor->num_dirty_slots_traced;
>>>>>> +    conclctor->live_obj_num = 0;
>>>>>> +    conclctor->live_obj_size = 0;
>>>>>> +    conclctor->num_dirty_slots_traced = 0;
>>>>>>   }
>>>>>>
>>>>>> -  if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>>>> -    /*In mostly concurrent algorithm, there's a final marking pause.
>>>>>> -          Suspend the mutators once again and finish the marking phase.*/
>>>>>> -
>>>>>> -    /*prepare dirty object*/
>>>>>> -    gc_prepare_dirty_set(gc);
>>>>>> -
>>>>>> -    gc_set_weakref_sets(gc);
>>>>>> -
>>>>>> -    /*start STW mark*/
>>>>>> -    gc_ms_start_mostly_con_final_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>>>>>> -
>>>>>> -    mostly_con_mark_terminate_reset();
>>>>>> -    gc_clear_dirty_set(gc);
>>>>>> -  }
>>>>>> -
>>>>>> -  gc_reset_dirty_set(gc);
>>>>>> -
>>>>>> -  if(need_STW){
>>>>>> -    unlock(gc->lock_enum);
>>>>>> -    if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>>>> -      vm_resume_threads_after();
>>>>>> -      assert(hythread_is_suspend_enabled());
>>>>>> -      hythread_set_suspend_disable(disable_count);
>>>>>> -    }else{
>>>>>> -      vm_resume_all_threads(disable_count);
>>>>>> -    }
>>>>>> -  }
>>>>>> +  unsigned int write_barrier_marked_size = gc_get_mutator_write_barrier_marked_size(gc);
>>>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +  con_collection_stat->live_size_marked = size_live_obj + write_barrier_marked_size;
>>>>>> +  //INFO2("gc.con.scheduler", "[Mark Finish] live_marked_size:      "<<con_collection_stat->live_size_marked<<" bytes");
>>>>>>
>>>>>> +   /*statistics information update (marking_end_time, trace_rate) */
>>>>>> +  con_collection_stat->marking_end_time = time_now();
>>>>>> +  int64 marking_time = (unsigned int)(con_collection_stat->marking_end_time - con_collection_stat->marking_start_time);
>>>>>> +
>>>>>> +  unsigned int heap_size =
>>>>>> +       con_collection_stat->surviving_size_at_gc_end +
>>>>>> +       gc_get_mutator_new_obj_size(gc);
>>>>>> +
>>>>>> +  con_collection_stat->trace_rate = heap_size/trans_time_unit(marking_time);
>>>>>> +
>>>>>> +
>>>>>> +
>>>>>> +  /*
>>>>>> +  //statistics just for debugging
>>>>>> +  unsigned int marker_num = gc_get_conclcor_num(gc, CONCLCTOR_ROLE_MARKER);
>>>>>> +  float heap_used_rate = (float)heap_size/gc->committed_heap_size;
>>>>>> +  unsigned int new_obj_size_marking = gc_get_mutator_new_obj_size(gc) - con_collection_stat->alloc_size_before_alloc_live;
>>>>>> +  unsigned int alloc_rate_marking = new_obj_size_marking/trans_time_unit(con_collection_stat->marking_end_time - con_collection_stat->marking_start_time);
>>>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] tracing time=" <<marking_time<<" us, trace rate=" << con_collection_stat->trace_rate<<"b/ms, current heap used="<<heap_used_rate );
>>>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] marker num="<<marker_num << ", alloc factor=" << (float)alloc_rate_marking/con_collection_stat->alloc_rate);
>>>>>> +  */
>>>>>>  }
>>>>>>
>>>>>> -void gc_reset_con_mark(GC* gc)
>>>>>> +void gc_PSTW_update_stat_after_marking(GC *gc)
>>>>>>  {
>>>>>> -  gc->num_active_markers = 0;
>>>>>> -  gc_mark_unset_concurrent();
>>>>>> +  unsigned int size_live_obj = gc_ms_get_live_object_size((GC_MS*)gc);
>>>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +  con_collection_stat->live_size_marked = size_live_obj;
>>>>>> +  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
>>>>>> +
>>>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] live_marked:      "<<con_collection_stat->live_size_marked<<" bytes");
>>>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
>>>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
>>>>>>  }
>>>>>>
>>>>>> -int64 gc_get_con_mark_time(GC* gc)
>>>>>> +//Called only when heap is exhuaset
>>>>>> +void gc_con_update_stat_heap_exhausted(GC* gc)
>>>>>>  {
>>>>>> -  int64 time_mark = 0;
>>>>>> -  Marker** markers = gc->markers;
>>>>>> -  unsigned int i;
>>>>>> -  for(i = 0; i < gc->num_active_markers; i++){
>>>>>> -    Marker* marker = markers[i];
>>>>>> -    if(marker->time_mark > time_mark){
>>>>>> -      time_mark = marker->time_mark;
>>>>>> -    }
>>>>>> -    marker->time_mark = 0;
>>>>>> -  }
>>>>>> -  return time_mark;
>>>>>> +  unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
>>>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +  con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
>>>>>> +  //INFO2("gc.con.scheduler", "[Heap exhausted] surviving size="<<con_collection_stat->surviving_size_at_gc_end<<" bytes, new_obj_size="<<new_obj_size<<" bytes");
>>>>>> +  //INFO2("gc.con.scheduler", "[Heap exhausted] current utilization rate="<<con_collection_stat->heap_utilization_rate);
>>>>>>  }
>>>>>>
>>>>>> -void gc_start_con_sweep(GC* gc)
>>>>>> +
>>>>>> +//just debugging
>>>>>> +unsigned int gc_con_get_live_size_from_sweeper(GC *gc)
>>>>>>  {
>>>>>> -  if(!try_lock(gc->lock_con_sweep) || gc_sweep_is_concurrent()) return;
>>>>>> +  POINTER_SIZE_INT num_live_obj = 0;
>>>>>> +  POINTER_SIZE_INT size_live_obj = 0;
>>>>>>
>>>>>> -  /*FIXME: enable finref*/
>>>>>> -  if(!IGNORE_FINREF ){
>>>>>> -    gc_set_obj_with_fin(gc);
>>>>>> -    Collector* collector = gc->collectors[0];
>>>>>> -    collector_identify_finref(collector);
>>>>>> -#ifndef BUILD_IN_REFERENT
>>>>>> -  }else{
>>>>>> -    gc_set_weakref_sets(gc);
>>>>>> -    gc_update_weakref_ignore_finref(gc);
>>>>>> -#endif
>>>>>> +  unsigned int num_collectors = gc->num_active_collectors;
>>>>>> +  Collector** collectors = gc->collectors;
>>>>>> +  unsigned int i;
>>>>>> +  for(i = 0; i < num_collectors; i++){
>>>>>> +    Collector* collector = collectors[i];
>>>>>> +    num_live_obj += collector->live_obj_num;
>>>>>> +    size_live_obj += collector->live_obj_size;
>>>>>> +    collector->live_obj_num = 0;
>>>>>> +    collector->live_obj_size = 0;
>>>>>>   }
>>>>>> +
>>>>>> +  return size_live_obj;
>>>>>> +}
>>>>>>
>>>>>> -  gc_set_concurrent_status(gc, GC_CON_SWEEP_PHASE);
>>>>>> +//Called when Con GC ends, must called in a STW period
>>>>>> +void gc_reset_con_space_stat(GC *gc)
>>>>>> +{
>>>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +  unsigned int new_obj_size = gc_reset_mutator_new_obj_size((GC *)gc);
>>>>>>
>>>>>> -  gc_set_weakref_sets(gc);
>>>>>> +  if( gc_is_kind(ALGO_CON_MOSTLY) ) {
>>>>>> +    con_collection_stat->live_alloc_size = 0; //mostly concurrent do not make new alloc obj live
>>>>>> +  } else if ( gc_is_kind( ALGO_CON_OTF_OBJ ) || gc_is_kind( ALGO_CON_OTF_REF ) ) {
>>>>>> +    con_collection_stat->live_alloc_size = new_obj_size - con_collection_stat->alloc_size_before_alloc_live;
>>>>>> +  }
>>>>>> +
>>>>>> +  /*live obj size at the end of gc = the size of objs belong to {marked_live + alloc_at_marking+alloc_at_sweeping},
>>>>>> +  (for mostly concurrent, con_collection_stat->surviving_size_at_gc_end = con_collection_stat->live_size_marked .)*/
>>>>>> +  con_collection_stat->surviving_size_at_gc_end = con_collection_stat->live_size_marked + con_collection_stat->live_alloc_size;
>>>>>> +  //INFO2( "gc.con.scheduler", "[Mark Live] live_size_marked = " << con_collection_stat->live_size_marked << ", live_alloc_size=" << con_collection_stat->live_alloc_size );
>>>>>>
>>>>>> -  /*Note: We assumed that adding entry to weakroot_pool is happened in STW rootset enumeration.
>>>>>> -      So, when this assumption changed, we should modified the below function.*/
>>>>>> -  gc_identify_dead_weak_roots(gc);
>>>>>>
>>>>>> -  /*start concurrent mark*/
>>>>>> -  gc_ms_start_con_sweep((GC_MS*)gc, MIN_NUM_MARKERS);
>>>>>> +  /*
>>>>>> +  //just debugging
>>>>>> +  if( !gc_is_specify_con_sweep() ) {
>>>>>> +    unsigned int surviving_sweeper = gc_con_get_live_size_from_sweeper(gc);
>>>>>> +    unsigned int surviving_marker = con_collection_stat->surviving_size_at_gc_end;
>>>>>> +    INFO2("gc.con.scheduler", "[Surviving size] by sweeper: " << surviving_sweeper << " bytes, by marker:" << surviving_marker << " bytes, diff=" << (surviving_sweeper - surviving_marker) );
>>>>>> +  }*/
>>>>>>
>>>>>> -  unlock(gc->lock_con_sweep);
>>>>>> +  int64 current_time = time_now();
>>>>>> +
>>>>>> +  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
>>>>>> +       unsigned int gc_interval_time = 0;
>>>>>> +       if( con_collection_stat->pause_start_time != 0 ) //remove the stw time
>>>>>> +            gc_interval_time = trans_time_unit(con_collection_stat->pause_start_time - con_collection_stat->gc_end_time);
>>>>>> +       else
>>>>>> +            gc_interval_time = trans_time_unit(current_time -con_collection_stat->gc_end_time );
>>>>>> +       con_collection_stat->alloc_rate = new_obj_size/gc_interval_time;
>>>>>> +       gc_update_scheduler_parameter(gc);
>>>>>> +  } else {
>>>>>> +     gc_force_update_scheduler_parameter(gc);
>>>>>> +  }
>>>>>> +
>>>>>> +  con_collection_stat->gc_end_time = current_time;
>>>>>> +
>>>>>> +  con_collection_stat->live_size_marked = 0;
>>>>>> +  con_collection_stat->live_alloc_size = 0;
>>>>>> +  con_collection_stat->alloc_size_before_alloc_live = 0;
>>>>>> +  con_collection_stat->marking_start_time = 0;
>>>>>> +  con_collection_stat->marking_end_time = 0;
>>>>>> +  con_collection_stat->sweeping_time = gc_get_conclctor_time((GC *)gc, CONCLCTOR_ROLE_SWEEPER); //be 0 if not CMCS
>>>>>> +  con_collection_stat->pause_start_time = 0;
>>>>>> +  assert(con_collection_stat->heap_utilization_rate<1);
>>>>>> +
>>>>>>  }
>>>>>>
>>>>>> -void gc_reset_con_sweep(GC* gc)
>>>>>> +void gc_con_stat_information_out(GC *gc)
>>>>>>  {
>>>>>> -  gc->num_active_collectors = 0;
>>>>>> -  gc_sweep_unset_concurrent();
>>>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
>>>>>> +  INFO2("gc.con.scheduler", "[Reset] surviving_at_end:       "<<con_collection_stat->surviving_size_at_gc_end<<" bytes");
>>>>>> +  INFO2("gc.con.scheduler", "[Reset] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
>>>>>> +  INFO2("gc.con.scheduler", "[Reset] utilization_rate:      "<<con_collection_stat->heap_utilization_rate);
>>>>>> +  INFO2("gc.con.scheduler", "[Reset] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
>>>>>> +  INFO2("gc.con.scheduler", "[Reset] sweeping time:      "<<con_collection_stat->sweeping_time<<" us");
>>>>>> +  INFO2("gc.con.scheduler", "[Reset] gc time:      "<< trans_time_unit(con_collection_stat->gc_end_time - con_collection_stat->gc_start_time) );
>>>>>> +  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
>>>>>>  }
>>>>>>
>>>>>> -void gc_wait_con_sweep_finish(GC* gc)
>>>>>> +void gc_reset_after_con_collection(GC* gc)
>>>>>>  {
>>>>>> -  wait_collection_finish(gc);
>>>>>> -  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
>>>>>> +  assert(gc_is_specify_con_gc());
>>>>>> +  int64 reset_start = time_now();
>>>>>> +  if(!IGNORE_FINREF ){
>>>>>> +    INFO2("gc.process", "GC: finref process after collection ...\n");
>>>>>> +    gc_put_finref_to_vm(gc);
>>>>>> +    gc_reset_finref_metadata(gc);
>>>>>> +    gc_activate_finref_threads((GC*)gc);
>>>>>> +#ifndef BUILD_IN_REFERENT
>>>>>> +  } else {
>>>>>> +    gc_clear_weakref_pools(gc);
>>>>>> +    gc_clear_finref_repset_pool(gc);
>>>>>> +#endif
>>>>>> +  }
>>>>>> +  reset_start = time_now();
>>>>>> +  gc_reset_con_space_stat(gc);
>>>>>> +  gc_clear_conclctor_role(gc);
>>>>>> +  vm_reclaim_native_objs();
>>>>>>  }
>>>>>>
>>>>>> -void gc_finish_con_sweep(GC * gc)
>>>>>> +
>>>>>> +
>>>>>> +void gc_set_default_con_algo()
>>>>>>  {
>>>>>> -  gc_wait_con_sweep_finish(gc);
>>>>>> +  assert((GC_PROP & ALGO_CON_MASK) == 0);
>>>>>> +  GC_PROP |= ALGO_CON_OTF_OBJ;
>>>>>>  }
>>>>>>
>>>>>> -void gc_try_finish_con_phase(GC * gc)
>>>>>> +void gc_decide_con_algo(char* concurrent_algo)
>>>>>>  {
>>>>>> -  /*Note: we do not finish concurrent mark here if we do not want to start concurrent sweep.*/
>>>>>> -  if(gc_con_is_in_marking(gc) && is_mark_finished(gc)){
>>>>>> -    /*Although all conditions above are satisfied, we can not guarantee concurrent marking is finished.
>>>>>> -          Because, sometimes, the concurrent marking has not started yet. We check the concurrent mark lock
>>>>>> -          here to guarantee this occasional case.*/
>>>>>> -    if(try_lock(gc->lock_con_mark)){
>>>>>> -      unlock(gc->lock_con_mark);
>>>>>> -      gc_finish_con_mark(gc, TRUE);
>>>>>> -    }
>>>>>> -  }
>>>>>> -
>>>>>> -  if(gc_con_is_in_sweeping(gc) && is_collector_finished(gc)){
>>>>>> -    //The reason is same as concurrent mark above.
>>>>>> -    if(try_lock(gc->lock_con_sweep)){
>>>>>> -      unlock(gc->lock_con_sweep);
>>>>>> -      gc_finish_con_sweep(gc);
>>>>>> -    }
>>>>>> +  string_to_upper(concurrent_algo);
>>>>>> +  GC_PROP &= ~ALGO_CON_MASK;
>>>>>> +  if(!strcmp(concurrent_algo, "OTF_OBJ")){
>>>>>> +    GC_PROP |= ALGO_CON_OTF_OBJ;
>>>>>> +  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
>>>>>> +    GC_PROP |= ALGO_CON_MOSTLY;
>>>>>> +  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
>>>>>> +    GC_PROP |= ALGO_CON_OTF_REF;
>>>>>>   }
>>>>>>  }
>>>>>>
>>>>>> -void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection);
>>>>>>
>>>>>> -void gc_reset_after_con_collect(GC* gc)
>>>>>> +/*
>>>>>> +    gc start enumeration phase, now, it is in a stop-the-world manner
>>>>>> +*/
>>>>>> +void gc_start_con_enumeration(GC * gc)
>>>>>>  {
>>>>>> -  assert(gc_is_specify_con_gc());
>>>>>> -
>>>>>> -  int64 time_mutator = gc_get_mutator_time(gc);
>>>>>> -  int64 time_collection = gc_get_collector_time(gc) + gc_get_marker_time(gc);
>>>>>> +  gc_set_rootset_type(ROOTSET_IS_OBJ);
>>>>>> +  gc_prepare_rootset(gc);
>>>>>> +}
>>>>>>
>>>>>> -  gc_reset_interior_pointer_table();
>>>>>> +//unsigned int gc_decide_marker_number(GC* gc);
>>>>>> +unsigned int gc_get_marker_number(GC* gc);
>>>>>> +/*  gc start marking phase */
>>>>>> +void gc_start_con_marking(GC *gc)
>>>>>> +{
>>>>>> +  unsigned int num_marker;
>>>>>> +  num_marker = gc_get_marker_number(gc);
>>>>>>
>>>>>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>>>>>> -
>>>>>> -  if(gc_mark_is_concurrent()){
>>>>>> -    gc_reset_con_mark(gc);
>>>>>> +  if(gc_is_kind(ALGO_CON_OTF_OBJ)) {
>>>>>> +    gc_enable_alloc_obj_live(gc);
>>>>>> +    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
>>>>>> +    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>>>> +  } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>>>>>> +    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
>>>>>> +    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
>>>>>> +  } else if(gc_is_kind(ALGO_CON_OTF_REF)) {
>>>>>> +    gc_enable_alloc_obj_live(gc);
>>>>>> +    gc_set_barrier_function(WB_REM_OLD_VAR);
>>>>>> +    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>>>>   }
>>>>>> +}
>>>>>> +
>>>>>>
>>>>>> -  if(gc_sweep_is_concurrent()){
>>>>>> -    gc_reset_con_sweep(gc);
>>>>>> +/*
>>>>>> +    gc start sweeping phase
>>>>>> +*/
>>>>>> +void gc_prepare_sweeping(GC *gc) {
>>>>>> +  INFO2("gc.con.info", "Concurrent collection, current collection = " << gc->num_collections );
>>>>>> +  /*FIXME: enable finref*/
>>>>>> +  if(!IGNORE_FINREF ){
>>>>>> +    gc_set_obj_with_fin(gc);
>>>>>> +    Collector* collector = gc->collectors[0];
>>>>>> +    collector_identify_finref(collector);
>>>>>> +  #ifndef BUILD_IN_REFERENT
>>>>>> +  } else {
>>>>>> +    conclctor_set_weakref_sets(gc);
>>>>>> +    gc_update_weakref_ignore_finref(gc);
>>>>>> +  #endif
>>>>>>   }
>>>>>> +  gc_identify_dead_weak_roots(gc);
>>>>>>  }
>>>>>>
>>>>>> -void gc_finish_con_GC(GC* gc, int64 time_mutator)
>>>>>> -{
>>>>>> +int64 get_last_check_point();
>>>>>> +// for the case pure stop the world
>>>>>> +static void gc_partial_con_PSTW( GC *gc) {
>>>>>>   int64 time_collection_start = time_now();
>>>>>> -
>>>>>> +  INFO2("gc.space.stat","Stop-the-world collection = "<<gc->num_collections<<"");
>>>>>> +  INFO2("gc.con.info", "from last check point =" << (unsigned int)(time_collection_start -get_last_check_point()) );
>>>>>> +  // stop the world enumeration
>>>>>>   gc->num_collections++;
>>>>>> -
>>>>>> -  lock(gc->lock_enum);
>>>>>> -
>>>>>>   int disable_count = hythread_reset_suspend_disable();
>>>>>>   gc_set_rootset_type(ROOTSET_IS_REF);
>>>>>>   gc_prepare_rootset(gc);
>>>>>> -  unlock(gc->lock_enum);
>>>>>> -
>>>>>> -  if(gc_sweep_is_concurrent()){
>>>>>> -    if(gc_con_is_in_sweeping())
>>>>>> -      gc_finish_con_sweep(gc);
>>>>>> -  }else{
>>>>>> -    if(gc_con_is_in_marking()){
>>>>>> -      gc_finish_con_mark(gc, FALSE);
>>>>>> -    }
>>>>>> -    gc->in_collection = TRUE;
>>>>>> -    gc_reset_mutator_context(gc);
>>>>>> -    if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>>>> -    gc_ms_reclaim_heap((GC_MS*)gc);
>>>>>> -  }
>>>>>> -
>>>>>> -  int64 time_collection = 0;
>>>>>> -  if(gc_mark_is_concurrent()){
>>>>>> -    time_collection = gc_get_con_mark_time(gc);
>>>>>> -    gc_reset_con_mark(gc);
>>>>>> -  }else{
>>>>>> -    time_collection = time_now()-time_collection_start;
>>>>>> -  }
>>>>>> +
>>>>>> +  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
>>>>>> +      unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
>>>>>> +      Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +      con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
>>>>>> +  }
>>>>>> +
>>>>>> +  //reclaim heap
>>>>>> +  gc_reset_mutator_context(gc);
>>>>>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>>>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>>>>>> +
>>>>>> +  //update live size
>>>>>> +  gc_PSTW_update_stat_after_marking(gc);
>>>>>> +
>>>>>> +  // reset the collection and resume mutators
>>>>>> +  gc_reset_after_con_collection(gc);
>>>>>>
>>>>>> -  if(gc_sweep_is_concurrent()){
>>>>>> -    gc_reset_con_sweep(gc);
>>>>>> -  }
>>>>>> -
>>>>>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>>>>>> -
>>>>>> -  gc_start_mutator_time_measure(gc);
>>>>>> -
>>>>>> +  set_con_nil(gc); // concurrent scheduling will continue after mutators are resumed
>>>>>>   vm_resume_threads_after();
>>>>>>   assert(hythread_is_suspend_enabled());
>>>>>> -  hythread_set_suspend_disable(disable_count);
>>>>>> -  int64 pause_time = time_now()-time_collection_start;
>>>>>> -
>>>>>> -  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause){
>>>>>> -    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
>>>>>> -  }else{
>>>>>> -    INFO2("gc.con.time","[GC][Con]pause( Heap exhuasted ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
>>>>>> -  }
>>>>>> -  return;
>>>>>> +  hythread_set_suspend_disable(disable_count);
>>>>>>  }
>>>>>>
>>>>>> -void gc_set_default_con_algo()
>>>>>> -{
>>>>>> -  assert((GC_PROP & ALGO_CON_MASK) == 0);
>>>>>> -  GC_PROP |= ALGO_CON_OTF_OBJ;
>>>>>> +void terminate_mostly_con_mark();
>>>>>> +void wspace_mostly_con_final_mark( GC *gc );
>>>>>> +
>>>>>> +// for the case concurrent marking is not finished before heap is exhausted
>>>>>> +static void gc_partial_con_PMSS(GC *gc) {
>>>>>> +  INFO2("gc.con.info", "[PMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
>>>>>> +  // wait concurrent marking finishes
>>>>>> +  int64 wait_start = time_now();
>>>>>> +  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
>>>>>> +  //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
>>>>>> +  while( gc->gc_concurrent_status == GC_CON_START_MARKERS ||
>>>>>> +             gc->gc_concurrent_status == GC_CON_TRACING ||
>>>>>> +             gc->gc_concurrent_status == GC_CON_TRACE_DONE)
>>>>>> +  {
>>>>>> +      vm_thread_yield(); //let the unfinished marker run
>>>>>> +  }
>>>>>> +
>>>>>> +  /*just debugging*/
>>>>>> +    gc_ms_get_current_heap_usage((GC_MS *)gc);
>>>>>> +    int64 pause_time = time_now() - wait_start;
>>>>>> +    INFO2("gc.con.info", "[PMSS]wait marking time="<<pause_time<<" us" );
>>>>>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +    unsigned int marking_time_shortage = (unsigned int)(con_collection_stat->marking_end_time - wait_start);
>>>>>> +    INFO2("gc.con.info", "[PMSS] marking late time [" << marking_time_shortage << "] us" );
>>>>>> +
>>>>>> +  // start STW reclaiming heap
>>>>>> +  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>>>>>> +  gc_reset_mutator_context(gc);
>>>>>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>>>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>>>>>> +
>>>>>> +  // reset after partial stop the world collection
>>>>>> +  gc_reset_after_con_collection(gc);
>>>>>> +  set_con_nil(gc);
>>>>>> +}
>>>>>> +
>>>>>> +// only when current sweep is set to false
>>>>>> +static void gc_partial_con_CMSS(GC *gc) {
>>>>>> +
>>>>>> +  INFO2("gc.con.info", "[CMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
>>>>>> +  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
>>>>>> +
>>>>>> +  /*just debugging*/
>>>>>> +    gc_ms_get_current_heap_usage((GC_MS *)gc);
>>>>>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +    unsigned int from_marking_end = (unsigned int)(time_now() - con_collection_stat->marking_end_time);
>>>>>> +    INFO2("gc.con.info", "[CMSS] marking early time [" << from_marking_end << "] us" );
>>>>>> +
>>>>>> +  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>>>>>> +
>>>>>> +  // start reclaiming heap, it will skip the marking phase
>>>>>> +  gc_reset_mutator_context(gc);
>>>>>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>>>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>>>>>> +
>>>>>> +  // reset after partial stop the world collection
>>>>>> +  gc_reset_after_con_collection(gc);
>>>>>> +  set_con_nil(gc);
>>>>>> +}
>>>>>> +
>>>>>> +void gc_merge_free_list_global(GC *gc);
>>>>>> +//for the case concurrent marking and partial concurrent sweeping
>>>>>> +static void gc_partial_con_CMPS( GC *gc ) {
>>>>>> +
>>>>>> +  while(gc->gc_concurrent_status == GC_CON_SWEEPING || gc->gc_concurrent_status == GC_CON_SWEEP_DONE) {
>>>>>> +      vm_thread_yield();  //let the unfinished sweeper run
>>>>>> +  }
>>>>>> +  gc_merge_free_list_global(gc);
>>>>>> +  // reset after partial stop the world collection
>>>>>> +  gc_reset_after_con_collection(gc);
>>>>>> +  set_con_nil(gc);
>>>>>> +}
>>>>>> +
>>>>>> +
>>>>>> +inline static void partial_stop_the_world_info( unsigned int type, unsigned int pause_time ) {
>>>>>> +  switch( type ) {
>>>>>> +    case GC_PARTIAL_PSTW :
>>>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PSTW=" << pause_time << " us");
>>>>>> +      break;
>>>>>> +    case GC_PARTIAL_PMSS :
>>>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PMSS=" << pause_time << " us");
>>>>>> +      break;
>>>>>> +    case GC_PARTIAL_CMPS :
>>>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMPS=" << pause_time << " us");
>>>>>> +      break;
>>>>>> +    case GC_PARTIAL_CMSS :
>>>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMSS=" << pause_time << " us");
>>>>>> +      break;
>>>>>> +    case GC_PARTIAL_FCSR :
>>>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), FCSR=" << pause_time << " us");
>>>>>> +      break;
>>>>>> +  }
>>>>>> +}
>>>>>> +
>>>>>> +static unsigned int gc_con_heap_full_mostly_con( GC *gc )
>>>>>> +{
>>>>>> +   while( gc->gc_concurrent_status == GC_CON_START_MARKERS ) { // we should enumerate rootset after old rootset is traced
>>>>>> +      vm_thread_yield();
>>>>>> +   }
>>>>>> +
>>>>>> +   int64 final_start = time_now();
>>>>>> +   int disable_count = hythread_reset_suspend_disable();
>>>>>> +   gc_set_rootset_type(ROOTSET_IS_OBJ);
>>>>>> +   gc_prepare_rootset(gc);
>>>>>> +
>>>>>> +   gc_set_barrier_function(WB_REM_NIL); //in stw phase, so we can remove write barrier at any time
>>>>>> +   terminate_mostly_con_mark(); // terminate current mostly concurrent marking
>>>>>> +
>>>>>> +   //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
>>>>>> +   while(gc->gc_concurrent_status == GC_CON_TRACING) {
>>>>>> +      vm_thread_yield(); //let the unfinished marker run
>>>>>> +   }
>>>>>> +
>>>>>> +   //final marking phase
>>>>>> +   gc_clear_conclctor_role(gc);
>>>>>> +   wspace_mostly_con_final_mark(gc);
>>>>>> +
>>>>>> +   /*just debugging*/
>>>>>> +   int64 final_time = time_now() - final_start;
>>>>>> +   INFO2("gc.scheduler", "[MOSTLY_CON] final marking time=" << final_time << " us");
>>>>>> +   gc_ms_get_current_heap_usage((GC_MS *)gc);
>>>>>> +
>>>>>> +  // start STW reclaiming heap
>>>>>> +   gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>>>>>> +   gc_reset_mutator_context(gc);
>>>>>> +   if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>>>> +   gc_ms_reclaim_heap((GC_MS*)gc);
>>>>>> +
>>>>>> +   // reset after partial stop the world collection
>>>>>> +   gc_reset_after_con_collection(gc);
>>>>>> +   set_con_nil(gc);
>>>>>> +
>>>>>> +   vm_resume_threads_after();
>>>>>> +   hythread_set_suspend_disable(disable_count);
>>>>>> +   return GC_PARTIAL_PMSS;
>>>>>> +
>>>>>> +}
>>>>>> +
>>>>>> +static unsigned int gc_con_heap_full_otf( GC *gc )
>>>>>> +{
>>>>>> +   unsigned int partial_type; //for time measuring and debugging
>>>>>> +   int disable_count = vm_suspend_all_threads();
>>>>>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +   con_collection_stat->pause_start_time = time_now();
>>>>>> +   switch(gc->gc_concurrent_status) {
>>>>>> +       case GC_CON_START_MARKERS :
>>>>>> +       case GC_CON_TRACING :
>>>>>> +       case GC_CON_TRACE_DONE :
>>>>>> +         partial_type = GC_PARTIAL_PMSS;
>>>>>> +         gc_partial_con_PMSS(gc);
>>>>>> +         break;
>>>>>> +       case GC_CON_BEFORE_SWEEP : // only when current sweep is set to false
>>>>>> +         partial_type = GC_PARTIAL_CMSS;
>>>>>> +         gc_partial_con_CMSS(gc);
>>>>>> +         break;
>>>>>> +       case GC_CON_SWEEPING :
>>>>>> +       case GC_CON_SWEEP_DONE :
>>>>>> +         partial_type = GC_PARTIAL_CMPS;
>>>>>> +         gc_partial_con_CMPS(gc);
>>>>>> +         break;
>>>>>> +       case GC_CON_BEFORE_FINISH : //heap can be exhausted when sweeping finishes, very rare
>>>>>> +         partial_type = GC_PARTIAL_FCSR;
>>>>>> +         gc_merge_free_list_global(gc);
>>>>>> +         gc_reset_after_con_collection(gc);
>>>>>> +         set_con_nil(gc);
>>>>>> +         break;
>>>>>> +       case GC_CON_RESET :
>>>>>> +       case GC_CON_NIL :
>>>>>> +       case GC_CON_STW_ENUM :
>>>>>> +         /*do nothing, if still in gc_con_reset, will wait to finish after resuming. this case happens rarely*/
>>>>>> +         partial_type = GC_PARTIAL_FCSR;
>>>>>> +         break;
>>>>>> +       /* other state is illegal here */
>>>>>> +       default:
>>>>>> +         INFO2("gc.con.info", "illegal state when the heap is out [" << gc->gc_concurrent_status << "]");
>>>>>> +         RAISE_ERROR;
>>>>>> +    }
>>>>>> +    vm_resume_all_threads(disable_count);
>>>>>> +    return partial_type;
>>>>>>  }
>>>>>>
>>>>>> -void gc_decide_con_algo(char* concurrent_algo)
>>>>>> -{
>>>>>> -  string_to_upper(concurrent_algo);
>>>>>> -  GC_PROP &= ~ALGO_CON_MASK;
>>>>>> -  if(!strcmp(concurrent_algo, "OTF_OBJ")){
>>>>>> -    GC_PROP |= ALGO_CON_OTF_OBJ;
>>>>>> -  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
>>>>>> -    GC_PROP |= ALGO_CON_MOSTLY;
>>>>>> -  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
>>>>>> -    GC_PROP |= ALGO_CON_OTF_REF;
>>>>>> +void gc_con_stat_information_out(GC *gc);
>>>>>> +/*
>>>>>> +this method is called before STW gc start, there is a big lock outside
>>>>>> +*/
>>>>>> +void gc_wait_con_finish( GC* gc ) {
>>>>>> +  int64 time_collection_start = time_now();
>>>>>> +  unsigned int partial_type; //for time measuring and debugging
>>>>>> +
>>>>>> +   /* cocurrent gc is idle */
>>>>>> +   if( state_transformation( gc, GC_CON_NIL, GC_CON_DISABLE ) ) { // for the race condition of con schduling and STW gc
>>>>>> +        Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>>> +        con_collection_stat->gc_start_time = time_now();
>>>>>> +        con_collection_stat->pause_start_time = con_collection_stat->gc_start_time;
>>>>>> +        partial_type = GC_PARTIAL_PSTW;
>>>>>> +        gc_partial_con_PSTW( gc );
>>>>>> +   } else {
>>>>>> +      while(gc->gc_concurrent_status == GC_CON_STW_ENUM ) { //wait concurrent gc finish enumeration
>>>>>> +          hythread_safe_point();
>>>>>> +          vm_thread_yield();
>>>>>> +       }
>>>>>> +       if( gc_is_kind(ALGO_CON_MOSTLY) )
>>>>>> +         partial_type = gc_con_heap_full_mostly_con(gc);
>>>>>> +       else if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>>>>>> +         partial_type = gc_con_heap_full_otf(gc);
>>>>>> +         if(gc->gc_concurrent_status == GC_CON_RESET) {
>>>>>> +            while( gc->gc_concurrent_status == GC_CON_RESET ) { //wait concurrent to finish
>>>>>> +              hythread_safe_point();
>>>>>> +              vm_thread_yield();
>>>>>> +            }
>>>>>> +         }
>>>>>> +       }
>>>>>> +       else
>>>>>> +         RAISE_ERROR;
>>>>>> +   }
>>>>>> +
>>>>>> +  int64 pause_time = time_now()-time_collection_start;
>>>>>> +  gc_con_stat_information_out(gc);
>>>>>> +  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) {
>>>>>> +    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<(unsigned int)(pause_time)<<"  us ");
>>>>>> +  } else {
>>>>>> +    partial_stop_the_world_info( partial_type, (unsigned int)pause_time );
>>>>>>   }
>>>>>>  }
>>>>>> +
>>>>>> +
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h Tue Oct 28 20:01:01 2008
>>>>>> @@ -19,21 +19,69 @@
>>>>>>  #define _GC_CONCURRENT_H_
>>>>>>  #include "gc_common.h"
>>>>>>
>>>>>> -enum GC_CONCURRENT_STATUS{
>>>>>> -  GC_CON_STATUS_NIL = 0x00,
>>>>>> -  GC_CON_MARK_PHASE = 0x01,
>>>>>> -  GC_MOSTLY_CON_FINAL_MARK_PHASE = 0x11, // for mostly concurrent only.
>>>>>> -  GC_CON_SWEEP_PHASE = 0x02
>>>>>> +
>>>>>> +#define RATE_CALCULATE_DENOMINATOR_FACTOR 10; //trans us to ms
>>>>>> +inline unsigned int trans_time_unit(int64 x)
>>>>>> +{
>>>>>> +  int64 result = x>>10;
>>>>>> +  if(result) return (unsigned int)result;
>>>>>> +  return 1;
>>>>>> +}
>>>>>> +
>>>>>> +#define RAISE_ERROR  assert(0);
>>>>>> +/* concurrent collection states in new design */
>>>>>> +enum GC_CONCURRENT_STATUS {
>>>>>> +  GC_CON_NIL = 0x00,
>>>>>> +  GC_CON_STW_ENUM = 0x01,
>>>>>> +  GC_CON_START_MARKERS = 0x02,
>>>>>> +  GC_CON_TRACING = 0x03,
>>>>>> +  GC_CON_TRACE_DONE = 0x04,
>>>>>> +  GC_CON_BEFORE_SWEEP = 0x05,
>>>>>> +  GC_CON_SWEEPING = 0x06,
>>>>>> +  GC_CON_SWEEP_DONE = 0x07,
>>>>>> +  GC_CON_BEFORE_FINISH = 0x08,
>>>>>> +  GC_CON_RESET = 0x09,
>>>>>> +  GC_CON_DISABLE = 0x0A,
>>>>>> +};
>>>>>> +
>>>>>> +// this type is just for debugging and time measuring
>>>>>> +enum GC_PARTIAL_STW_TYPE {
>>>>>> +  GC_PARTIAL_PSTW = 0x00,  //pure stop the world
>>>>>> +  GC_PARTIAL_PMSS = 0x01,  //concurrent marking has finished and stop the world sweeping
>>>>>> +  GC_PARTIAL_CMSS = 0x02,  // partial concurrent marking and stop the world sweeping
>>>>>> +  GC_PARTIAL_CMPS = 0x03,  //concurrent marking and sweeping
>>>>>> +  GC_PARTIAL_FCSR = 0x04, //fully concurrent marking and sweeping, but stw finish reset
>>>>>>  };
>>>>>>
>>>>>>  enum HANDSHAKE_SINGAL{
>>>>>>   HSIG_MUTATOR_SAFE = 0x0,
>>>>>> -
>>>>>>   HSIG_DISABLE_SWEEP_LOCAL_CHUNKS  = 0x01,
>>>>>>   HSIG_DISABLE_SWEEP_GLOBAL_CHUNKS = 0x02,
>>>>>>   HSIG_MUTATOR_ENTER_ALLOC_MARK    = 0x03,
>>>>>>  };
>>>>>>
>>>>>> +typedef struct Con_Collection_Statistics {
>>>>>> +    POINTER_SIZE_INT live_size_marked;     //marked objects size
>>>>>> +    POINTER_SIZE_INT alloc_size_before_alloc_live;  //alloc objects size before marking
>>>>>> +    POINTER_SIZE_INT live_alloc_size;
>>>>>> +    POINTER_SIZE_INT surviving_size_at_gc_end; //total live object size when gc is ended
>>>>>> +
>>>>>> +    POINTER_SIZE_INT trace_rate;  //bytes per ms
>>>>>> +    POINTER_SIZE_INT alloc_rate;       //bytes per ms
>>>>>> +
>>>>>> +    float heap_utilization_rate;
>>>>>> +
>>>>>> +    int64 gc_start_time;
>>>>>> +    int64 gc_end_time;
>>>>>> +
>>>>>> +    int64 marking_start_time;
>>>>>> +    int64 marking_end_time;
>>>>>> +
>>>>>> +    int64 sweeping_time;
>>>>>> +    int64 pause_start_time;
>>>>>> +
>>>>>> +} Con_Space_Statistics;
>>>>>> +
>>>>>>  inline void gc_set_con_gc(unsigned int con_phase)
>>>>>>  { GC_PROP |= con_phase;  }
>>>>>>
>>>>>> @@ -58,107 +106,101 @@
>>>>>>  inline Boolean gc_is_specify_con_sweep()
>>>>>>  { return (GC_PROP & ALGO_CON_SWEEP) == ALGO_CON_SWEEP; }
>>>>>>
>>>>>> -extern volatile Boolean concurrent_in_marking;
>>>>>> -extern volatile Boolean concurrent_in_sweeping;
>>>>>> -extern volatile Boolean mark_is_concurrent;
>>>>>> -extern volatile Boolean sweep_is_concurrent;
>>>>>>
>>>>>> -inline Boolean gc_mark_is_concurrent()
>>>>>> -{
>>>>>> -  return mark_is_concurrent;
>>>>>> -}
>>>>>> +extern volatile Boolean obj_alloced_live;
>>>>>>
>>>>>> -inline void gc_mark_set_concurrent()
>>>>>> -{
>>>>>> -  if(gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF))
>>>>>> -    gc_enable_alloc_obj_live();
>>>>>> -  mark_is_concurrent = TRUE;
>>>>>> -}
>>>>>> +inline Boolean is_obj_alloced_live()
>>>>>> +{ return obj_alloced_live;  }
>>>>>>
>>>>>> -inline void gc_mark_unset_concurrent()
>>>>>> -{
>>>>>> -  gc_disable_alloc_obj_live();
>>>>>> -  mark_is_concurrent = FALSE;
>>>>>> +inline void gc_disable_alloc_obj_live(GC *gc)
>>>>>> +{
>>>>>> +  obj_alloced_live = FALSE;
>>>>>>  }
>>>>>>
>>>>>> -inline Boolean gc_con_is_in_marking()
>>>>>> +void gc_enable_alloc_obj_live(GC * gc);
>>>>>> +
>>>>>> +/*
>>>>>> +    tranform the states across the collection process,
>>>>>> +  which should be a atomic operation because there are several collector run parallel
>>>>>> +*/
>>>>>> +inline Boolean state_transformation( GC* gc, unsigned int from_state, unsigned int to_state )
>>>>>>  {
>>>>>> -  return concurrent_in_marking;
>>>>>> +  unsigned int old_state = apr_atomic_cas32( &gc->gc_concurrent_status, to_state, from_state );
>>>>>> +  if( old_state != from_state )
>>>>>> +    return FALSE;
>>>>>> +  else
>>>>>> +    return TRUE;
>>>>>>  }
>>>>>>
>>>>>> -inline Boolean gc_con_is_in_marking(GC* gc)
>>>>>> -{
>>>>>> -  return gc->gc_concurrent_status == GC_CON_MARK_PHASE;
>>>>>> +/* set concurrent to idle,
>>>>>> +    Or enable concurrent gc, called when STW gc finishes
>>>>>> + */
>>>>>> +inline void set_con_nil( GC *gc ) {
>>>>>> +  apr_atomic_set32( &gc->gc_concurrent_status, GC_CON_NIL );
>>>>>>  }
>>>>>>
>>>>>> -inline Boolean gc_sweep_is_concurrent()
>>>>>> -{
>>>>>> -  return sweep_is_concurrent;
>>>>>> +
>>>>>> +/* gc start enumeration phase, now, it is in a stop-the-world manner */
>>>>>> +void gc_start_con_enumeration(GC * gc);
>>>>>> +
>>>>>> +/* gc start marking phase */
>>>>>> +void gc_start_con_marking(GC *gc);
>>>>>> +
>>>>>> +
>>>>>> +/* prepare for sweeping */
>>>>>> +void gc_prepare_sweeping(GC *gc);
>>>>>> +
>>>>>> +/* gc start sweeping phase */
>>>>>> +void gc_start_con_sweeping(GC *gc);
>>>>>> +
>>>>>> +/* gc finish concurrent collection */
>>>>>> +void gc_con_final_work(GC* gc);
>>>>>> +
>>>>>> +
>>>>>> +/* gc wait cocurrent collection finishes */
>>>>>> +void gc_wait_con_finish( GC* gc );
>>>>>> +
>>>>>> +/* is in gc marking phase */
>>>>>> +inline Boolean in_con_marking_phase( GC *gc ) {
>>>>>> +  unsigned int status = gc->gc_concurrent_status;
>>>>>> +  return (status == GC_CON_TRACING) || (status == GC_CON_TRACE_DONE);
>>>>>>  }
>>>>>>
>>>>>> -inline void gc_sweep_set_concurrent()
>>>>>> -{
>>>>>> -  sweep_is_concurrent = TRUE;
>>>>>> +/* is in gc sweeping phase */
>>>>>> +inline Boolean in_con_sweeping_phase( GC *gc ) {
>>>>>> +  unsigned int status = gc->gc_concurrent_status;
>>>>>> +  return (status == GC_CON_SWEEPING) || (status == GC_CON_SWEEP_DONE);
>>>>>>  }
>>>>>>
>>>>>> -inline void gc_sweep_unset_concurrent()
>>>>>> -{
>>>>>> -  sweep_is_concurrent = FALSE;
>>>>>> +inline Boolean in_con_idle( GC *gc ) {
>>>>>> +  return gc->gc_concurrent_status == GC_CON_NIL;
>>>>>>  }
>>>>>>
>>>>>> -inline Boolean gc_con_is_in_sweeping()
>>>>>> -{
>>>>>> -  return concurrent_in_sweeping;
>>>>>> +inline Boolean gc_con_is_in_STW( GC *gc ) {
>>>>>> +  return gc->gc_concurrent_status == GC_CON_DISABLE;
>>>>>>  }
>>>>>>
>>>>>> -inline Boolean gc_con_is_in_sweeping(GC* gc)
>>>>>> -{
>>>>>> -  return gc->gc_concurrent_status == GC_CON_SWEEP_PHASE;
>>>>>> +/* is gc ready to sweeping */
>>>>>> +inline Boolean in_con_ready_sweep( GC *gc ) {
>>>>>> +  return gc->gc_concurrent_status == GC_CON_BEFORE_SWEEP;
>>>>>>  }
>>>>>>
>>>>>> -inline void gc_set_concurrent_status(GC*gc, unsigned int status)
>>>>>> -{
>>>>>> -  /*Reset status*/
>>>>>> -  concurrent_in_marking = FALSE;
>>>>>> -  concurrent_in_sweeping = FALSE;
>>>>>> -
>>>>>> -  gc->gc_concurrent_status = status;
>>>>>> -  switch(status){
>>>>>> -    case GC_CON_MARK_PHASE:
>>>>>> -      gc_mark_set_concurrent();
>>>>>> -      concurrent_in_marking = TRUE;
>>>>>> -      break;
>>>>>> -    case GC_CON_SWEEP_PHASE:
>>>>>> -      gc_sweep_set_concurrent();
>>>>>> -      concurrent_in_sweeping = TRUE;
>>>>>> -      break;
>>>>>> -    default:
>>>>>> -      assert(!concurrent_in_marking && !concurrent_in_sweeping);
>>>>>> -  }
>>>>>> +/* is gc sweeping */
>>>>>> +inline Boolean in_con_sweep( GC *gc ) {
>>>>>> +  return ( gc->gc_concurrent_status == GC_CON_SWEEPING || gc->gc_concurrent_status == GC_CON_SWEEP_DONE );
>>>>>>
>>>>>> -  return;
>>>>>>  }
>>>>>>
>>>>>> -void gc_reset_con_mark(GC* gc);
>>>>>> -void gc_start_con_mark(GC* gc);
>>>>>> -void gc_finish_con_mark(GC* gc, Boolean need_STW);
>>>>>> -int64 gc_get_con_mark_time(GC* gc);
>>>>>> -
>>>>>> -void gc_start_con_sweep(GC* gc);
>>>>>> -void gc_finish_con_sweep(GC * gc);
>>>>>> +void gc_con_update_stat_after_marking( GC *gc );
>>>>>>
>>>>>> -void gc_reset_after_con_collect(GC* gc);
>>>>>> -void gc_try_finish_con_phase(GC * gc);
>>>>>>
>>>>>>  void gc_decide_con_algo(char* concurrent_algo);
>>>>>>  void gc_set_default_con_algo();
>>>>>>
>>>>>> -void gc_reset_con_sweep(GC* gc);
>>>>>> -
>>>>>> -void gc_finish_con_GC(GC* gc, int64 time_mutator);
>>>>>>
>>>>>>  extern volatile Boolean gc_sweep_global_normal_chunk;
>>>>>>
>>>>>> +
>>>>>>  inline Boolean gc_is_sweep_global_normal_chunk()
>>>>>>  { return gc_sweep_global_normal_chunk; }
>>>>>>
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp Tue Oct 28 20:01:01 2008
>>>>>> @@ -18,13 +18,17 @@
>>>>>>  /**
>>>>>>  * @author Xiao-Feng Li, 2006/10/05
>>>>>>  */
>>>>>> -
>>>>>> +
>>>>>> +#include <open/vm_class_info.h>
>>>>>> +#include <open/vm_class_manipulation.h>
>>>>>>  #include "../gen/gen.h"
>>>>>>  #include "../thread/mutator.h"
>>>>>>  #include "gc_for_barrier.h"
>>>>>>  #include "../mark_sweep/wspace_mark_sweep.h"
>>>>>>  #include "../common/gc_concurrent.h"
>>>>>> +#include "../common/gc_common.h"
>>>>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>>>> +#include "../verify/verify_live_heap.h"
>>>>>>
>>>>>>
>>>>>>  /* All the write barrier interfaces need cleanup */
>>>>>> @@ -117,10 +121,8 @@
>>>>>>     Mutator *mutator = (Mutator *)gc_get_tls();
>>>>>>
>>>>>>     //FIXME: Release lock.
>>>>>> -    lock(mutator->dirty_set_lock);
>>>>>>     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>>>>> -    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
>>>>>> -    unlock(mutator->dirty_set_lock);
>>>>>> +    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
>>>>>>   }
>>>>>>  }
>>>>>>
>>>>>> @@ -204,7 +206,8 @@
>>>>>>           mutator_dirtyset_add_entry(mutator, obj_to_snapshot);
>>>>>>       }
>>>>>>     }
>>>>>> -    obj_mark_black_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>>>>> +    obj_mark_gray_in_table((Partial_Reveal_Object *) p_obj_holding_ref);  // now, the black-only obj (no gray bit been set) will also be scaned by marker, here mark it to gray to prevent this, just a workaround
>>>>>> +    obj_mark_black_in_table((Partial_Reveal_Object *) p_obj_holding_ref, mutator);
>>>>>>     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>>>>>   }
>>>>>>  }
>>>>>> @@ -215,32 +218,141 @@
>>>>>>   REF* p_obj_slot = (REF*) p_slot ;
>>>>>>   Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)read_slot(p_obj_slot);
>>>>>>   if(p_obj && obj_need_remember_oldvar(p_obj)){
>>>>>> +    mutator->dirty_obj_num++;
>>>>>>     mutator_dirtyset_add_entry(mutator, p_obj);
>>>>>>   }
>>>>>>  }
>>>>>>
>>>>>> +/*
>>>>>> +static void write_barrier_for_check(Managed_Object_Handle p_obj_holding_ref,Managed_Object_Handle *p_slot, Managed_Object_Handle p_target)
>>>>>> +{
>>>>>> +  //Mutator *mutator = (Mutator *)gc_get_tls();
>>>>>> +
>>>>>> +  Partial_Reveal_Object* src_obj = (Partial_Reveal_Object*)p_obj_holding_ref;
>>>>>> +  Partial_Reveal_Object* sub_obj = (Partial_Reveal_Object*)read_slot((REF*) p_slot);
>>>>>> +  Partial_Reveal_Object* target_obj = (Partial_Reveal_Object*)p_target;
>>>>>> +
>>>>>> +  if(src_obj && (!obj_is_mark_black_in_table(src_obj))){
>>>>>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Src]");
>>>>>> +     analyze_bad_obj(src_obj);
>>>>>> +     RAISE_ERROR;
>>>>>> +  }
>>>>>> +
>>>>>> +  if(sub_obj && (!obj_is_mark_black_in_table(sub_obj))){
>>>>>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Sub]");
>>>>>> +     analyze_bad_obj(sub_obj);
>>>>>> +     INFO2("gc.verifier", "[source object]");
>>>>>> +     analyze_bad_obj(src_obj);
>>>>>> +     //RAISE_ERROR;
>>>>>> +     return;
>>>>>> +  }
>>>>>> +
>>>>>> +  if(target_obj && (!obj_is_mark_black_in_table(target_obj))){
>>>>>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Target]");
>>>>>> +     analyze_bad_obj(target_obj);
>>>>>> +     RAISE_ERROR;
>>>>>> +  }
>>>>>> +
>>>>>> +  *p_slot = p_target;
>>>>>> +}
>>>>>> +*/
>>>>>>  //===========================================
>>>>>>
>>>>>>  /* The following routines were supposed to be the only way to alter any value in gc heap. */
>>>>>>  void gc_heap_write_ref (Managed_Object_Handle p_obj_holding_ref, unsigned offset, Managed_Object_Handle p_target)
>>>>>>  {  assert(0); }
>>>>>>
>>>>>> -void gc_heap_wrote_object (Managed_Object_Handle p_obj_written)
>>>>>> +
>>>>>> +Boolean gc_heap_copy_object_array(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length)
>>>>>> +{
>>>>>> +
>>>>>> +
>>>>>> +    GC_VTable_Info *src_gcvt = obj_get_gcvt((Partial_Reveal_Object*)src_array);
>>>>>> +    GC_VTable_Info *dst_gcvt = obj_get_gcvt((Partial_Reveal_Object*)dst_array);
>>>>>> +
>>>>>> +    Class_Handle src_class = src_gcvt->gc_clss;
>>>>>> +    Class_Handle dst_class = dst_gcvt->gc_clss;
>>>>>> +
>>>>>> +
>>>>>> +       //element size of src should be same as element size of dst
>>>>>> +       assert(src_gcvt->array_elem_size == dst_gcvt->array_elem_size);
>>>>>> +       unsigned int elem_size = src_gcvt->array_elem_size;
>>>>>> +       unsigned int src_first_elem_offset = array_first_element_offset((Partial_Reveal_Array*)src_array);
>>>>>> +       unsigned int dst_first_elem_offset = array_first_element_offset((Partial_Reveal_Array*)dst_array);
>>>>>> +       /*
>>>>>> +       #ifdef COMPRESS_REFERENCE
>>>>>> +          COMPRESSED_REFERENCE *src_copy_body = (COMPRESSED_REFERENCE *)((POINTER_SIZE_INT)src_array + src_first_elem_offset + elem_size*src_start);
>>>>>> +          COMPRESSED_REFERENCE *dst_copy_body = (COMPRESSED_REFERENCE *)((POINTER_SIZE_INT)dst_array + dst_first_elem_offset + elem_size*dst_start);
>>>>>> +       #else
>>>>>> +       #endif
>>>>>> +       */
>>>>>> +          REF* src_copy_body = (REF*)((POINTER_SIZE_INT)src_array + src_first_elem_offset + elem_size*src_start);
>>>>>> +          REF* dst_copy_body = (REF*)((POINTER_SIZE_INT)dst_array + dst_first_elem_offset + elem_size*dst_start);
>>>>>> +
>>>>>> +
>>>>>> +       if(class_is_instanceof(src_class, dst_class)) {
>>>>>> +         //rem obj before is for OTF GC barriers
>>>>>> +         if(WB_REM_OLD_VAR == write_barrier_function) {
>>>>>> +            for (unsigned int count = 0; count < length; count++) {
>>>>>> +               write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>>>>>> +            }
>>>>>> +         } else if(WB_REM_OBJ_SNAPSHOT == write_barrier_function) {
>>>>>> +            write_barrier_rem_obj_snapshot(dst_array);
>>>>>> +         }
>>>>>> +
>>>>>> +         memmove(dst_copy_body, src_copy_body, length * elem_size);
>>>>>> +
>>>>>> +       } else { //for the condition src is not the type of dst
>>>>>> +          Class_Handle dst_elem_clss = class_get_array_element_class(dst_class);
>>>>>> +          if(WB_REM_OBJ_SNAPSHOT == write_barrier_function) {
>>>>>> +            write_barrier_rem_obj_snapshot(dst_array);
>>>>>> +          }
>>>>>> +
>>>>>> +          for (unsigned int count = 0; count < length; count++) {
>>>>>> +             // 1, null elements copy direct
>>>>>> +             if (src_copy_body[count] == NULL) {
>>>>>> +                  if(WB_REM_OLD_VAR == write_barrier_function) {
>>>>>> +                      write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>>>>>> +                 }
>>>>>> +                  dst_copy_body[count] = NULL;
>>>>>> +                  continue;
>>>>>> +               }
>>>>>> +
>>>>>> +             // 2, For non-null elements check if types are compatible.
>>>>>> +/*
>>>>>> +#ifdef COMPRESS_REFERENCE
>>>>>> +             ManagedObject *src_elem = (ManagedObject *)uncompress_compressed_reference(src_elem_offset);
>>>>>> +             Class_Handle src_elem_clss = src_elem->vt()->clss;
>>>>>> +#else
>>>>>> +#endif
>>>>>> +*/
>>>>>> +             Class_Handle src_elem_clss = obj_get_gcvt(ref_to_obj_ptr(src_copy_body[count]))->gc_clss;
>>>>>> +
>>>>>> +             if (!class_is_instanceof(src_elem_clss, dst_elem_clss)) {
>>>>>> +                  if(WB_REM_SOURCE_OBJ == write_barrier_function) {
>>>>>> +                      write_barrier_rem_source_obj(dst_array);
>>>>>> +                  }
>>>>>> +                  return FALSE;
>>>>>> +             }
>>>>>> +
>>>>>> +             if(WB_REM_OLD_VAR == write_barrier_function) {
>>>>>> +                 write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>>>>>> +             }
>>>>>> +              dst_copy_body[count] = src_copy_body[count];
>>>>>> +        }
>>>>>> +      }
>>>>>> +
>>>>>> +    //rem obj after is for mostly concurrent
>>>>>> +    if(WB_REM_SOURCE_OBJ == write_barrier_function) {
>>>>>> +        write_barrier_rem_source_obj(dst_array);
>>>>>> +    }
>>>>>> +
>>>>>> +    return TRUE;
>>>>>> +}
>>>>>> +
>>>>>> +
>>>>>> +void gc_heap_wrote_object (Managed_Object_Handle p_obj_written )
>>>>>>  {
>>>>>> -  /*Concurrent Mark: Since object clone and array copy do not modify object slots,
>>>>>> -      we treat it as an new object. It has already been marked when dest object was created.
>>>>>> -      We use WB_REM_SOURCE_OBJ function here to debug.
>>>>>> -    */
>>>>>> -
>>>>>> -  if(WB_REM_SOURCE_OBJ == write_barrier_function){
>>>>>> -    Mutator *mutator = (Mutator *)gc_get_tls();
>>>>>> -    lock(mutator->dirty_set_lock);
>>>>>> -
>>>>>> -    obj_dirty_in_table((Partial_Reveal_Object *) p_obj_written);
>>>>>> -    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_written);
>>>>>> -
>>>>>> -    unlock(mutator->dirty_set_lock);
>>>>>> -  }
>>>>>>
>>>>>>   if( !gc_is_gen_mode() || !object_has_ref_field((Partial_Reveal_Object*)p_obj_written))
>>>>>>     return;
>>>>>> @@ -283,6 +395,13 @@
>>>>>>       write_barrier_rem_slot_oldvar(p_slot);
>>>>>>       *p_slot = p_target;
>>>>>>       break;
>>>>>> +    //just debugging
>>>>>> +    /*
>>>>>> +    case WB_CON_DEBUG:
>>>>>> +       write_barrier_for_check(p_obj_holding_ref, p_slot, p_target);
>>>>>> +       //*p_slot = p_target;
>>>>>> +       break;
>>>>>> +    */
>>>>>>     default:
>>>>>>       assert(0);
>>>>>>       return;
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h Tue Oct 28 20:01:01 2008
>>>>>> @@ -32,7 +32,8 @@
>>>>>>   WB_REM_SOURCE_REF    = 0x02,
>>>>>>   WB_REM_OLD_VAR       = 0x03,
>>>>>>   WB_REM_NEW_VAR       = 0x04,
>>>>>> -  WB_REM_OBJ_SNAPSHOT  = 0x05
>>>>>> +  WB_REM_OBJ_SNAPSHOT  = 0x05,
>>>>>> +  WB_CON_DEBUG = 0x06
>>>>>>  };
>>>>>>
>>>>>>  inline void gc_set_barrier_function(unsigned int wb_function)
>>>>>> @@ -43,4 +44,3 @@
>>>>>>  #endif /* _GC_FOR_BARRIER_H_ */
>>>>>>
>>>>>>
>>>>>> -
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp Tue Oct 28 20:01:01 2008
>>>>>> @@ -203,4 +203,3 @@
>>>>>>
>>>>>>
>>>>>>
>>>>>> -
>>>>>>
>>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
>>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>>> ==============================================================================
>>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp (original)
>>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp Tue Oct 28 20:01:01 2008
>>>>>> @@ -30,7 +30,7 @@
>>>>>>  #include "../mark_sweep/gc_ms.h"
>>>>>>  #include "../move_compact/gc_mc.h"
>>>>>>  #include "interior_pointer.h"
>>>>>> -#include "../thread/marker.h"
>>>>>> +#include "../thread/conclctor.h"
>>>>>>  #include "../thread/collector.h"
>>>>>>  #include "../verify/verify_live_heap.h"
>>>>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>>>> @@ -115,7 +115,10 @@
>>>>>>   collection_scheduler_initialize(gc);
>>>>>>
>>>>>>   if(gc_is_specify_con_gc()){
>>>>>> -    marker_initialize(gc);
>>>>>> +     gc->gc_concurrent_status = GC_CON_NIL;
>>>>>> +    conclctor_initialize(gc);
>>>>>> +  } else {
>>>>>> +     gc->gc_concurrent_status = GC_CON_DISABLE;
>>>>>>   }
>>>>>>
>>>>>>   collector_initialize(gc);
>>>>>> @@ -134,6 +137,9 @@
>>>>>>  {
>>>>>>   INFO2("gc.process", "GC: call GC wrapup ....");
>>>>>>   GC* gc =  p_global_gc;
>>>>>> +  // destruct threads first, and then destruct data structures
>>>>>> +  conclctor_destruct(gc);
>>>>>> +  collector_destruct(gc);
>>>>>>
>>>>>>  #if defined(USE_UNIQUE_MARK_SWEEP_GC)
>>>>>>  gc_ms_destruct((GC_MS*)gc);
>>>>>> @@ -148,8 +154,6 @@
>>>>>>  #ifndef BUILD_IN_REFERENT
>>>>>>   gc_finref_metadata_destruct(gc);
>>>>>>  #endif
>>>>>> -  collector_destruct(gc);
>>>>>> -  marker_destruct(gc);
>>>>>>
>>>>>>   if( verify_live_heap ){
>>>>>>     gc_terminate_heap_verification(gc);
>>>>>> @@ -446,4 +450,3 @@
>>>>>>
>>>>>>
>>>>>>
>>>>>> -
>>>>>>
>>>>>>
>>>>>>
>>>>>
>>>>>
>>>>> --
>>>>> Unless stated otherwise above:
>>>>> IBM United Kingdom Limited - Registered in England and Wales with number 741598.
>>>>> Registered office: PO Box 41, North Harbour, Portsmouth, Hampshire PO6 3AU
>>>>>
>>>>
>>>>
>>>> --
>>>> http://xiao-feng.blogspot.com
>>>>
>>>
>>>
>>> --
>>> Unless stated otherwise above:
>>> IBM United Kingdom Limited - Registered in England and Wales with number 741598.
>>> Registered office: PO Box 41, North Harbour, Portsmouth, Hampshire PO6 3AU
>>>
>>
>>
>> --
>> http://xiao-feng.blogspot.com
>>
> 
> 
> 

Re: svn commit: r708756 [1/3] - in /harmony/enhanced/drlvm/trunk/vm: gc_gen/build/ gc_gen/src/common/ gc_gen/src/finalizer_weakref/ gc_gen/src/gen/ gc_gen/src/los/ gc_gen/src/mark_sweep/ gc_gen/src/thread/ gc_gen/src/trace_forward/ gc_gen/src/verify/

Posted by Sian January <si...@googlemail.com>.
The website says "During feature freeze new functionality, big changes
and code redesign are forbidden; only bugs fixes and code tidy-up are
allowed".

So I think we already agreed that "big changes" are also forbidden,
but maybe we need to be a bit clearer about this when we do the freeze
each time, or rename "feature-freeze" to something else to make it
more obvious?

Or perhaps this discussion will be a good enough reminder to everyone :-)


2008/10/29 Xiao-Feng Li <xi...@gmail.com>:
> On Wed, Oct 29, 2008 at 6:33 PM, Sian January
> <si...@googlemail.com> wrote:
>> Thanks for your quick reply Xiao-Feng.
>>
>> I haven't studied the code that much, so if it's all disabled by
>> default as you say then I think it's ok to leave it in.
>>
>> In future I do think it would be better practice to discuss it on the
>> dev list and get some agreement before committing something this size
>> during feature freeze week.  This is because with some large changes
>> there can be unforeseen effects that can impact the code in ways that
>> the original author hadn't realised.  Also if we had several major
>> changes and then saw regressions it could be difficult to work out
>> what had caused them and it could badly delay the release.
>
> Agree. To discuss beforehand is a better practice. I will surely follow it.
>
> To improve the process, we can introduce a guideline that, say, any
> patch bigger than 10KB (?) should be considered as a work of new
> feature, hence not allowed for commit during feature-freeze period.
> That could help to clarify the confusion on what a new feature is.
>
> Suggestion?
>
> Thanks,
> xiaofeng
>
>> Does anyone else have a different opinion on either rolling back the
>> code or on general practice during feature freeze?
>>
>> Thanks,
>>
>> Sian
>>
>>
>> 2008/10/29 Xiao-Feng Li <xi...@gmail.com>:
>>> Sian, thanks for your notice.
>>>
>>> This patch is indeed big. Most of the code are guarded by a macro
>>> USE_UNIQUE_MARK_SWEEP_GC and has no impact on the existing code base.
>>> It is disabled by default, and I tested it before I committed it.
>>>
>>> Actually it is not a new feature, but a fix of existing concurrent GC
>>> scheduler. Well, I admit it looks like a new feature since it changes
>>> lot of code...
>>>
>>> If it has any impact on stability, I will roll back it immediately.
>>> Thanks for your patience.
>>>
>>> Thanks,
>>> xiaofeng
>>>
>>> On Wed, Oct 29, 2008 at 5:22 PM, Sian January
>>> <si...@googlemail.com> wrote:
>>>> Hi Xiao-Feng,
>>>>
>>>> This commit looks like quite a large new feature to me.  Since we're
>>>> in feature freeze this week for M8 I really think it should be backed
>>>> out until after the milestone, as we should be focussing on testing
>>>> and stability at the moment.
>>>>
>>>> Thanks,
>>>>
>>>> Sian
>>>>
>>>>
>>>> 2008/10/29  <xl...@apache.org>:
>>>>> Author: xli
>>>>> Date: Tue Oct 28 20:01:01 2008
>>>>> New Revision: 708756
>>>>>
>>>>> URL: http://svn.apache.org/viewvc?rev=708756&view=rev
>>>>> Log:
>>>>> HARMONY-5989 : Concurrent GC (Tick) enhancement in scheduling
>>>>>
>>>>> Added:
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.cpp   (with props)
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.h   (with props)
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_concurrent_mark.cpp   (with props)
>>>>> Removed:
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h
>>>>> Modified:
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/include/open/gc.h
>>>>>    harmony/enhanced/drlvm/trunk/vm/vmcore/build/vmcore.exp
>>>>>    harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp
>>>>>    harmony/enhanced/drlvm/trunk/vm/vmcore/src/object/vm_arrays.cpp
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp Tue Oct 28 20:01:01 2008
>>>>> @@ -34,6 +34,7 @@
>>>>>     gc_heap_write_global_slot;
>>>>>     gc_heap_write_ref;
>>>>>     gc_heap_wrote_object;
>>>>> +    gc_heap_copy_object_array;
>>>>>     gc_init;
>>>>>     gc_is_object_pinned;
>>>>>     gc_iterate_heap;
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp Tue Oct 28 20:01:01 2008
>>>>> @@ -31,24 +31,16 @@
>>>>>   return;
>>>>>  }
>>>>>
>>>>> -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
>>>>> -{
>>>>> -  if(gc_is_specify_con_gc()){
>>>>> -    gc_update_con_collection_scheduler(gc, time_mutator, time_collection);
>>>>> -  }
>>>>> -  return;
>>>>> -}
>>>>>
>>>>>  Boolean gc_sched_collection(GC* gc, unsigned int gc_cause)
>>>>>  {
>>>>>   /*collection scheduler only schedules concurrent collection now.*/
>>>>>   if(GC_CAUSE_CONCURRENT_GC == gc_cause){
>>>>>     assert(gc_is_specify_con_gc());
>>>>> -    return gc_sched_con_collection(gc, gc_cause);
>>>>> +    return gc_con_perform_collection( gc );
>>>>>   }else{
>>>>>     return FALSE;
>>>>>   }
>>>>>  }
>>>>>
>>>>>
>>>>> -
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h Tue Oct 28 20:01:01 2008
>>>>> @@ -26,12 +26,8 @@
>>>>>  void collection_scheduler_initialize(GC* gc);
>>>>>  void collection_scheduler_destruct(GC* gc);
>>>>>
>>>>> -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
>>>>>  Boolean gc_sched_collection(GC* gc, unsigned int gc_cause);
>>>>>
>>>>>  #endif
>>>>>
>>>>>
>>>>> -
>>>>> -
>>>>> -
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp Tue Oct 28 20:01:01 2008
>>>>> @@ -22,7 +22,7 @@
>>>>>  #include "collection_scheduler.h"
>>>>>  #include "concurrent_collection_scheduler.h"
>>>>>  #include "gc_concurrent.h"
>>>>> -#include "../thread/marker.h"
>>>>> +#include "../thread/conclctor.h"
>>>>>  #include "../verify/verify_live_heap.h"
>>>>>
>>>>>  #define NUM_TRIAL_COLLECTION 2
>>>>> @@ -53,6 +53,7 @@
>>>>>  Boolean gc_use_space_scheduler()
>>>>>  { return cc_scheduler_kind & SPACE_BASED_SCHEDULER; }
>>>>>
>>>>> +
>>>>>  static int64 time_delay_to_start_mark = MAX_DELAY_TIME;
>>>>>  static POINTER_SIZE_INT space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
>>>>>
>>>>> @@ -75,6 +76,7 @@
>>>>>   STD_FREE(gc->collection_scheduler);
>>>>>  }
>>>>>
>>>>> +
>>>>>  void gc_decide_cc_scheduler_kind(char* cc_scheduler)
>>>>>  {
>>>>>   string_to_upper(cc_scheduler);
>>>>> @@ -93,281 +95,248 @@
>>>>>   gc_enable_time_scheduler();
>>>>>  }
>>>>>
>>>>> -static Boolean time_to_start_mark(GC* gc)
>>>>> -{
>>>>> -  if(!gc_use_time_scheduler()) return FALSE;
>>>>> -
>>>>> -  int64 time_current = time_now();
>>>>> -  return (time_current - get_collection_end_time()) > time_delay_to_start_mark;
>>>>> -}
>>>>> -
>>>>> -static Boolean space_to_start_mark(GC* gc)
>>>>> -{
>>>>> -  if(!gc_use_space_scheduler()) return FALSE;
>>>>> +/*====================== new scheduler ===================*/
>>>>> +extern unsigned int NUM_CON_MARKERS;
>>>>> +extern unsigned int NUM_CON_SWEEPERS;
>>>>> +unsigned int gc_get_mutator_number(GC *gc);
>>>>> +
>>>>> +#define MOSTLY_CON_MARKER_DIVISION 0.5
>>>>> +unsigned int mostly_con_final_marker_num=1;
>>>>> +unsigned int mostly_con_long_marker_num=1;
>>>>> +
>>>>> +unsigned int gc_get_marker_number(GC* gc) {
>>>>> +  unsigned int mutator_num = gc_get_mutator_number(gc);
>>>>> +  unsigned int marker_specified = NUM_CON_MARKERS;
>>>>> +  if(marker_specified == 0) {
>>>>> +    if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>>>>> +       marker_specified = min(gc->num_conclctors, mutator_num>>1);
>>>>> +       INFO2("gc.con.scheduler", "[Marker Num] mutator num="<<mutator_num<<", assign marker num="<<marker_specified);
>>>>> +    } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>>>>> +       marker_specified = min(gc->num_conclctors, mutator_num>>1);
>>>>> +       mostly_con_final_marker_num = max(marker_specified, mostly_con_final_marker_num); // in the STW phase, so all the conclctor can be used
>>>>> +       mostly_con_long_marker_num = (unsigned int)(marker_specified*MOSTLY_CON_MARKER_DIVISION);
>>>>> +       //INFO2("gc.con.scheduler", "[Marker Num] common marker="<<marker_specified<<", final marker="<<mostly_con_final_marker_num);
>>>>> +    }
>>>>> +  }
>>>>>
>>>>> -  POINTER_SIZE_INT size_new_obj = gc_get_new_object_size(gc,FALSE);
>>>>> -  return (size_new_obj > space_threshold_to_start_mark);
>>>>> +  assert(marker_specified);
>>>>> +  return marker_specified;
>>>>>  }
>>>>>
>>>>> -static Boolean gc_need_start_con_mark(GC* gc)
>>>>> -{
>>>>> -  if(!gc_is_specify_con_mark() || gc_mark_is_concurrent()) return FALSE;
>>>>> -
>>>>> -  if(time_to_start_mark(gc) || space_to_start_mark(gc))
>>>>> -    return TRUE;
>>>>> -  else
>>>>> -    return FALSE;
>>>>> +#define CON_SWEEPER_DIVISION 0.8
>>>>> +unsigned int gc_get_sweeper_numer(GC *gc) {
>>>>> +  unsigned int sweeper_specified = NUM_CON_SWEEPERS;
>>>>> +  if(sweeper_specified == 0)
>>>>> +    sweeper_specified = (unsigned int)(gc->num_conclctors*CON_SWEEPER_DIVISION);
>>>>> +  //INFO2("gc.con.scheduler", "[Sweeper Num] assign sweeper num="<<sweeper_specified);
>>>>> +  assert(sweeper_specified);
>>>>> +  return sweeper_specified;
>>>>>  }
>>>>>
>>>>> -static Boolean gc_need_start_con_sweep(GC* gc)
>>>>> -{
>>>>> -  if(!gc_is_specify_con_sweep() || gc_sweep_is_concurrent()) return FALSE;
>>>>>
>>>>> -  /*if mark is concurrent and STW GC has not started, we should start concurrent sweep*/
>>>>> -  if(gc_mark_is_concurrent() && !gc_con_is_in_marking(gc))
>>>>> -    return TRUE;
>>>>> -  else
>>>>> -    return FALSE;
>>>>> -}
>>>>>
>>>>> -static Boolean gc_need_reset_after_con_collect(GC* gc)
>>>>> -{
>>>>> -  if(gc_sweep_is_concurrent() && !gc_con_is_in_sweeping(gc))
>>>>> -    return TRUE;
>>>>> -  else
>>>>> -    return FALSE;
>>>>> -}
>>>>>
>>>>> -static Boolean gc_need_start_con_enum(GC* gc)
>>>>> -{
>>>>> -  /*TODO: support on-the-fly root set enumeration.*/
>>>>> -  return FALSE;
>>>>> -}
>>>>> +#define DEFAULT_CONSERCATIVE_FACTOR (1.0f)
>>>>> +#define CONSERCATIVE_FACTOR_FULLY_CONCURRENT (0.95f)
>>>>> +static float conservative_factor = DEFAULT_CONSERCATIVE_FACTOR;
>>>>>
>>>>> -#define SPACE_UTIL_RATIO_CORRETION 0.2f
>>>>> -#define TIME_CORRECTION_OTF_MARK 0.65f
>>>>> -#define TIME_CORRECTION_OTF_MARK_SWEEP 1.0f
>>>>> -#define TIME_CORRECTION_MOSTLY_MARK 0.5f
>>>>> -
>>>>> -static void con_collection_scheduler_update_stat(GC* gc, int64 time_mutator, int64 time_collection)
>>>>> -{
>>>>> -  Space* space = NULL;
>>>>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>>>>> -
>>>>> -#ifdef USE_UNIQUE_MARK_SWEEP_GC
>>>>> -  space = (Space*) gc_get_wspace(gc);
>>>>> -#endif
>>>>> -  if(!space) return;
>>>>> +/* for checking heap effcient*/
>>>>> +#define SMALL_DELTA 1000 //minimal check frequency is about delta us
>>>>> +#define SPACE_CHECK_STAGE_TWO_TIME (SMALL_DELTA<<6)
>>>>> +#define SPACE_CHECK_STAGE_ONE_TIME (SMALL_DELTA<<12)
>>>>>
>>>>> -  Space_Statistics* space_stat = space->space_statistic;
>>>>> -
>>>>> -  unsigned int slot_index = cc_scheduler->last_window_index;
>>>>> -  unsigned int num_slot   = cc_scheduler->num_window_slots;
>>>>> -
>>>>> -  cc_scheduler->trace_load_window[slot_index] = space_stat->num_live_obj;
>>>>> -  cc_scheduler->alloc_load_window[slot_index] = space_stat->size_new_obj;
>>>>> -  cc_scheduler->space_utilization_ratio[slot_index] = space_stat->space_utilization_ratio;
>>>>> +#define DEFAULT_ALLOC_RATE (1<<19) //500k/ms
>>>>> +#define DEFAULT_MARKING_TIME (1<<9) //512 ms
>>>>>
>>>>> -  cc_scheduler->last_mutator_time = time_mutator;
>>>>> -  cc_scheduler->last_collector_time = time_collection;
>>>>> -
>>>>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
>>>>> -    return;
>>>>> -
>>>>> -  cc_scheduler->alloc_rate_window[slot_index]
>>>>> -    = time_mutator == 0 ? 0 : (float)cc_scheduler->alloc_load_window[slot_index] / time_mutator;
>>>>> +static int64 last_check_time_point = time_now();
>>>>> +static int64 check_delay_time = time_now(); //  initial value is just for modifying
>>>>>
>>>>> -  if(gc_mark_is_concurrent()){
>>>>> -    cc_scheduler->trace_rate_window[slot_index]
>>>>> -      = time_collection == 0 ? MAX_TRACING_RATE : (float)cc_scheduler->trace_load_window[slot_index] / time_collection;
>>>>> -  }else{
>>>>> -    cc_scheduler->trace_rate_window[slot_index] = MIN_TRACING_RATE;
>>>>> -  }
>>>>> -
>>>>> -  cc_scheduler->num_window_slots = num_slot >= STAT_SAMPLE_WINDOW_SIZE ? num_slot : (++num_slot);
>>>>> -  cc_scheduler->last_window_index = (++slot_index)% STAT_SAMPLE_WINDOW_SIZE;
>>>>> +//just debugging
>>>>> +int64 get_last_check_point()
>>>>> +{
>>>>> +   return last_check_time_point;
>>>>>  }
>>>>>
>>>>> -static void con_collection_scheduler_update_start_point(GC* gc, int64 time_mutator, int64 time_collection)
>>>>> -{
>>>>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
>>>>> -    return;
>>>>> +static unsigned int alloc_space_threshold = 0;
>>>>>
>>>>> -  Space* space = NULL;
>>>>> -#ifdef USE_UNIQUE_MARK_SWEEP_GC
>>>>> -  space = (Space*) gc_get_wspace(gc);
>>>>> -#endif
>>>>> -  if(!space) return;
>>>>> -
>>>>> -  Space_Statistics* space_stat = space->space_statistic;
>>>>> -
>>>>> -  float sum_alloc_rate = 0;
>>>>> -  float sum_trace_rate = 0;
>>>>> -  float sum_space_util_ratio = 0;
>>>>> +static unsigned int space_check_stage_1; //SPACE_CHECK_EXPECTED_START_TIME
>>>>> +static unsigned int space_check_stage_2; //BIG_DELTA
>>>>>
>>>>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>>>>> +static unsigned int calculate_start_con_space_threshold(Con_Collection_Statistics *con_collection_stat, unsigned int heap_size)
>>>>> +{
>>>>>
>>>>> -  int64 time_this_collection_correction = 0;
>>>>> -#if 0
>>>>> -  float space_util_ratio = space_stat->space_utilization_ratio;
>>>>> -  if(space_util_ratio > (1-SPACE_UTIL_RATIO_CORRETION)){
>>>>> -    time_this_collection_correction = 0;
>>>>> -  }else{
>>>>> -    time_this_collection_correction
>>>>> -      = (int64)(((1 - space_util_ratio - SPACE_UTIL_RATIO_CORRETION)/(space_util_ratio))* time_mutator);
>>>>> -  }
>>>>> -#endif
>>>>> -
>>>>> -  unsigned int i;
>>>>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>>>>> -    sum_alloc_rate += cc_scheduler->alloc_rate_window[i];
>>>>> -    sum_trace_rate += cc_scheduler->trace_rate_window[i];
>>>>> -    sum_space_util_ratio += cc_scheduler->space_utilization_ratio[i];
>>>>> -  }
>>>>> -
>>>>> -  TRACE2("gc.con.cs","Allocation Rate: ");
>>>>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>>>>> -    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->alloc_rate_window[i]);
>>>>> -  }
>>>>> -
>>>>> -  TRACE2("gc.con.cs","Tracing Rate: ");
>>>>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>>>>> -    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->trace_rate_window[i]);
>>>>> -  }
>>>>> -
>>>>> -  float average_alloc_rate = sum_alloc_rate / cc_scheduler->num_window_slots;
>>>>> -  float average_trace_rate = sum_trace_rate / cc_scheduler->num_window_slots;
>>>>> -  float average_space_util_ratio = sum_space_util_ratio / cc_scheduler->num_window_slots;
>>>>> -
>>>>> -  TRACE2("gc.con.cs","averAllocRate: "<<average_alloc_rate<<"averTraceRate: "<<average_trace_rate<<"  average_space_util_ratio: "<<average_space_util_ratio<<" ");
>>>>> -
>>>>> -  if(average_alloc_rate == 0 ){
>>>>> -    time_delay_to_start_mark = MIN_DELAY_TIME;
>>>>> -    space_threshold_to_start_mark = MIN_SPACE_THRESHOLD;
>>>>> -  }else if(average_trace_rate == 0){
>>>>> -    time_delay_to_start_mark = MAX_DELAY_TIME;
>>>>> -    space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
>>>>> -  }else{
>>>>> -    float time_alloc_expected = (space_stat->size_free_space * average_space_util_ratio) / average_alloc_rate;
>>>>> -    float time_trace_expected = space_stat->num_live_obj / average_trace_rate;
>>>>> -    TRACE2("gc.con.cs","[GC][Con] expected alloc time "<<time_alloc_expected<<"  expected collect time  "<<time_trace_expected<<" ");
>>>>> -
>>>>> -    if(time_alloc_expected > time_trace_expected){
>>>>> -      if(gc_is_kind(ALGO_CON_OTF_OBJ)||gc_is_kind(ALGO_CON_OTF_REF)){
>>>>> -        float time_correction = gc_sweep_is_concurrent()? TIME_CORRECTION_OTF_MARK_SWEEP : TIME_CORRECTION_OTF_MARK;
>>>>> -        cc_scheduler->time_delay_to_start_mark = (int64)((time_alloc_expected - time_trace_expected)*time_correction);
>>>>> -      }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>>> -        cc_scheduler->time_delay_to_start_mark = (int64)(time_mutator* TIME_CORRECTION_MOSTLY_MARK);
>>>>> -      }
>>>>> -    }else{
>>>>> -      cc_scheduler->time_delay_to_start_mark = MIN_DELAY_TIME;
>>>>> +  float util_rate = con_collection_stat->heap_utilization_rate;
>>>>> +  unsigned int space_threshold = 0;
>>>>> +  if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>>>>> +    if( con_collection_stat->trace_rate == 0 )  //for initial iteration
>>>>> +         con_collection_stat->trace_rate = con_collection_stat->alloc_rate*20;
>>>>> +    unsigned int alloc_rate = con_collection_stat->alloc_rate;
>>>>> +    if(alloc_rate<con_collection_stat->trace_rate) {       //  THRESHOLD = Heap*utilization_rate*(1-alloc_rate/marking_rate), accurate formaler
>>>>> +      float alloc_marking_rate_ratio = (float)(alloc_rate)/con_collection_stat->trace_rate;
>>>>> +
>>>>> +      space_threshold = (unsigned int)(heap_size*util_rate*(1-alloc_marking_rate_ratio)*conservative_factor);
>>>>> +    } else {  //use default
>>>>> +       unsigned int alloc_while_marking = DEFAULT_MARKING_TIME*con_collection_stat->alloc_rate;
>>>>> +       space_threshold = (unsigned int)(heap_size*util_rate) -alloc_while_marking;
>>>>>     }
>>>>> -
>>>>> -    cc_scheduler->space_threshold_to_start_mark =
>>>>> -      (POINTER_SIZE_INT)(space_stat->size_free_space * ((time_alloc_expected - time_trace_expected) / time_alloc_expected));
>>>>> -
>>>>> -    time_delay_to_start_mark = cc_scheduler->time_delay_to_start_mark + time_this_collection_correction;
>>>>> -    space_threshold_to_start_mark = cc_scheduler->space_threshold_to_start_mark;
>>>>> +  } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>>>>> +    unsigned int alloc_while_marking = DEFAULT_MARKING_TIME*con_collection_stat->alloc_rate;
>>>>> +    space_threshold = (unsigned int)(heap_size*util_rate) -alloc_while_marking;
>>>>>   }
>>>>> -  TRACE2("gc.con.cs","[GC][Con] concurrent marking will delay "<<(unsigned int)(time_delay_to_start_mark>>10)<<" ms ");
>>>>> -  TRACE2("gc.con.cs","[GC][Con] time correction "<<(unsigned int)(time_this_collection_correction>>10)<<" ms ");
>>>>>
>>>>> +  if( space_threshold > con_collection_stat->surviving_size_at_gc_end )
>>>>> +    alloc_space_threshold = space_threshold - con_collection_stat->surviving_size_at_gc_end;
>>>>> +  else
>>>>> +    alloc_space_threshold = MIN_SPACE_THRESHOLD;
>>>>> +
>>>>> +  //INFO2("gc.con.info", "[Threshold] alloc_space_threshold=" << alloc_space_threshold);
>>>>> +  return space_threshold;
>>>>>  }
>>>>>
>>>>> -void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
>>>>> -{
>>>>> -  assert(gc_is_specify_con_gc());
>>>>> -  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) return;
>>>>> -
>>>>> -  con_collection_scheduler_update_stat(gc, time_mutator, time_collection);
>>>>> -  con_collection_scheduler_update_start_point(gc, time_mutator, time_collection);
>>>>> -
>>>>> -  return;
>>>>> -}
>>>>> -
>>>>> -Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause)
>>>>> +/* this parameters are updated at end of GC */
>>>>> +void gc_update_scheduler_parameter( GC *gc )
>>>>>  {
>>>>> -  if(!try_lock(gc->lock_collect_sched)) return FALSE;
>>>>> -  vm_gc_lock_enum();
>>>>> -
>>>>> -  gc_try_finish_con_phase(gc);
>>>>> -
>>>>> -  if(gc_need_start_con_enum(gc)){
>>>>> -    /*TODO:Concurrent rootset enumeration.*/
>>>>> -    assert(0);
>>>>> -  }
>>>>> -
>>>>> -  if(gc_need_start_con_mark(gc)){
>>>>> -    INFO2("gc.con.info", "[GC][Con] concurrent mark start ...");
>>>>> -    gc_start_con_mark(gc);
>>>>> -    vm_gc_unlock_enum();
>>>>> -    unlock(gc->lock_collect_sched);
>>>>> -    return TRUE;
>>>>> -  }
>>>>> -
>>>>> -  if(gc_need_start_con_sweep(gc)){
>>>>> -    gc->num_collections++;
>>>>> -    INFO2("gc.con.info", "[GC][Con] collection number:"<< gc->num_collections<<" ");
>>>>> -    gc_start_con_sweep(gc);
>>>>> -    vm_gc_unlock_enum();
>>>>> -    unlock(gc->lock_collect_sched);
>>>>> -    return TRUE;
>>>>> -  }
>>>>> -
>>>>> -  if(gc_need_reset_after_con_collect(gc)){
>>>>> -    int64 pause_start = time_now();
>>>>> -    int disable_count = vm_suspend_all_threads();
>>>>> -    gc_reset_after_con_collect(gc);
>>>>> -    gc_start_mutator_time_measure(gc);
>>>>> -    set_collection_end_time();
>>>>> -    vm_resume_all_threads(disable_count);
>>>>> -    vm_gc_unlock_enum();
>>>>> -    INFO2("gc.con.time","[GC][Con]pause(reset collection):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
>>>>> -    unlock(gc->lock_collect_sched);
>>>>> -    return TRUE;
>>>>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +   last_check_time_point = time_now();
>>>>> +
>>>>> +   unsigned int alloc_rate = con_collection_stat->alloc_rate;
>>>>> +   space_check_stage_1 = alloc_rate * trans_time_unit(SPACE_CHECK_STAGE_ONE_TIME);
>>>>> +   space_check_stage_2 = alloc_rate * trans_time_unit(SPACE_CHECK_STAGE_TWO_TIME);
>>>>> +   //INFO2( "gc.con.scheduler", "space_check_stage_1=["<<space_check_stage_1<<"], space_check_stage_2=["<<space_check_stage_2<<"]" );
>>>>> +
>>>>> +   check_delay_time = (con_collection_stat->gc_start_time - con_collection_stat->gc_end_time)>>2;
>>>>> +   //INFO2("gc.con.scheduler", "next check time = [" << trans_time_unit(check_delay_time) << "] ms" );
>>>>> +   if(gc_is_specify_con_sweep()) {
>>>>> +         conservative_factor = CONSERCATIVE_FACTOR_FULLY_CONCURRENT;
>>>>> +   }
>>>>> +   calculate_start_con_space_threshold(con_collection_stat, gc->committed_heap_size);
>>>>> +}
>>>>> +
>>>>> +void gc_force_update_scheduler_parameter( GC *gc )
>>>>> +{
>>>>> +    last_check_time_point = time_now();
>>>>> +    //check_delay_time = SPACE_CHECK_STAGE_ONE_TIME;
>>>>> +    check_delay_time = time_now();
>>>>> +    //INFO2("gc.con.scheduler", "next check time = [" << trans_time_unit(check_delay_time) << "] ms" );
>>>>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +    con_collection_stat->alloc_rate = DEFAULT_ALLOC_RATE;
>>>>> +}
>>>>> +
>>>>> +
>>>>> +
>>>>> +static inline Boolean check_start_mark( GC *gc )
>>>>> +{
>>>>> +   unsigned int new_object_occupied_size = gc_get_mutator_new_obj_size(gc);
>>>>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +   /*just debugging*/
>>>>> +   float used_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_object_occupied_size)/gc->committed_heap_size;
>>>>> +   if( alloc_space_threshold < new_object_occupied_size ) {
>>>>> +       INFO2( "gc.con.info", "[Start Con] check has been delayed " << check_delay_time << " us, until ratio at start point="<<used_rate );
>>>>> +       return TRUE;
>>>>> +   }
>>>>> +
>>>>> +   unsigned int free_space = alloc_space_threshold - new_object_occupied_size;
>>>>> +     //INFO2("gc.con.info", "[GC Scheduler debug] alloc_space_threshold="<<alloc_space_threshold<<", new_object_occupied_size"<<new_object_occupied_size);
>>>>> +   int64 last_check_delay = check_delay_time;
>>>>> +
>>>>> +   if( free_space < space_check_stage_2 ) {
>>>>> +       check_delay_time = SMALL_DELTA;
>>>>> +   } else if( free_space < space_check_stage_1 ) {
>>>>> +       if(check_delay_time>SPACE_CHECK_STAGE_TWO_TIME ) { //if time interval is too small, the alloc rate will not be updated
>>>>> +           unsigned int interval_time = trans_time_unit(time_now() - con_collection_stat->gc_end_time);
>>>>> +           unsigned int interval_space = new_object_occupied_size;
>>>>> +           con_collection_stat->alloc_rate = interval_space/interval_time;
>>>>> +       }
>>>>> +       check_delay_time = ((alloc_space_threshold - new_object_occupied_size)/con_collection_stat->alloc_rate)<<9;
>>>>> +   }
>>>>> +   last_check_time_point = time_now();
>>>>> +
>>>>> +   //INFO2("gc.con.info", "[GC Scheduler] check has been delayed=" << last_check_delay << " us, used_rate=" << used_rate << ", free_space=" << free_space << " bytes, next delay=" << check_delay_time << " us" );
>>>>> +   return FALSE;
>>>>> +}
>>>>> +
>>>>> +static SpinLock check_lock;
>>>>> +static inline Boolean space_should_start_mark( GC *gc)
>>>>> +{
>>>>> +  if( ( time_now() -last_check_time_point ) > check_delay_time && try_lock(check_lock) ) { //first condition is checked frequently, second condition is for synchronization
>>>>> +      Boolean should_start = check_start_mark(gc);
>>>>> +      unlock(check_lock);
>>>>> +      return should_start;
>>>>>   }
>>>>> -  vm_gc_unlock_enum();
>>>>> -  unlock(gc->lock_collect_sched);
>>>>>   return FALSE;
>>>>>  }
>>>>>
>>>>> -extern unsigned int NUM_MARKERS;
>>>>> -
>>>>> -unsigned int gc_decide_marker_number(GC* gc)
>>>>> -{
>>>>> -  unsigned int num_active_marker;
>>>>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>>>>> +inline static Boolean gc_con_start_condition( GC* gc ) {
>>>>> +   return space_should_start_mark(gc);
>>>>> +}
>>>>>
>>>>> -  /*If the number of markers is specfied, just return the specified value.*/
>>>>> -  if(NUM_MARKERS != 0) return NUM_MARKERS;
>>>>>
>>>>> -  /*If the number of markers isn't specified, we decide the value dynamically.*/
>>>>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION){
>>>>> -    /*Start trial cycle, collection set to 1 in trial cycle and */
>>>>> -    num_active_marker = 1;
>>>>> -  }else{
>>>>> -    num_active_marker = cc_scheduler->last_marker_num;
>>>>> -    int64 c_time = cc_scheduler->last_collector_time;
>>>>> -    int64 m_time = cc_scheduler->last_mutator_time;
>>>>> -    int64 d_time = cc_scheduler->time_delay_to_start_mark;
>>>>> -
>>>>> -    if(num_active_marker == 0) num_active_marker = 1;
>>>>> -
>>>>> -    if((c_time + d_time) > m_time || (float)d_time < (m_time * 0.25)){
>>>>> -      TRACE2("gc.con.cs","[GC][Con] increase marker number.");
>>>>> -      num_active_marker ++;
>>>>> -      if(num_active_marker > gc->num_markers) num_active_marker = gc->num_markers;
>>>>> -    }else if((float)d_time > (m_time * 0.6)){
>>>>> -      TRACE2("gc.con.cs","[GC][Con] decrease marker number.");
>>>>> -      num_active_marker --;
>>>>> -      if(num_active_marker == 0)  num_active_marker = 1;
>>>>> -    }
>>>>> -
>>>>> -    TRACE2("gc.con.cs","[GC][Con] ctime  "<<(unsigned)(c_time>>10)<<"  mtime  "<<(unsigned)(m_time>>10)<<"  dtime  "<<(unsigned)(d_time>>10));
>>>>> -    TRACE2("gc.con.cs","[GC][Con] marker num : "<<num_active_marker<<" ");
>>>>> +void gc_reset_after_con_collection(GC *gc);
>>>>> +void gc_merge_free_list_global(GC *gc);
>>>>> +void gc_con_stat_information_out(GC *gc);
>>>>> +
>>>>> +unsigned int sub_time = 0;
>>>>> +int64 pause_time = 0;
>>>>> +/*
>>>>> +   concurrent collection entry function, it may start proper phase according to the current state.
>>>>> +*/
>>>>> +Boolean gc_con_perform_collection( GC* gc ) {
>>>>> +  int disable_count;
>>>>> +  int64 pause_start;
>>>>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +  switch( gc->gc_concurrent_status ) {
>>>>> +    case GC_CON_NIL :
>>>>> +      if( !gc_con_start_condition(gc) )
>>>>> +        return FALSE;
>>>>> +      if( !state_transformation( gc, GC_CON_NIL, GC_CON_STW_ENUM ) )
>>>>> +        return FALSE;
>>>>> +
>>>>> +      gc->num_collections++;
>>>>> +      gc->cause = GC_CAUSE_CONCURRENT_GC;
>>>>> +
>>>>> +      con_collection_stat->gc_start_time = time_now();
>>>>> +      disable_count = hythread_reset_suspend_disable();
>>>>> +
>>>>> +      gc_start_con_enumeration(gc); //now, it is a stw enumeration
>>>>> +      con_collection_stat->marking_start_time = time_now();
>>>>> +      state_transformation( gc, GC_CON_STW_ENUM, GC_CON_START_MARKERS );
>>>>> +      gc_start_con_marking(gc);
>>>>> +
>>>>> +      INFO2("gc.con.time","[ER] start con pause, ERSM="<<((unsigned int)(time_now()-con_collection_stat->gc_start_time))<<"  us "); // ERSM means enumerate rootset and start concurrent marking
>>>>> +      vm_resume_threads_after();
>>>>> +      hythread_set_suspend_disable(disable_count);
>>>>> +      break;
>>>>> +
>>>>> +    case GC_CON_BEFORE_SWEEP :
>>>>> +      if(!gc_is_specify_con_sweep())
>>>>> +         return FALSE;
>>>>> +      if( !state_transformation( gc, GC_CON_BEFORE_SWEEP, GC_CON_SWEEPING ) )
>>>>> +         return FALSE;
>>>>> +      gc_ms_start_con_sweep((GC_MS*)gc, gc_get_sweeper_numer(gc));
>>>>> +      break;
>>>>> +
>>>>> +
>>>>> +    case GC_CON_BEFORE_FINISH :
>>>>> +        if( !state_transformation( gc, GC_CON_BEFORE_FINISH, GC_CON_RESET ) )
>>>>> +                 return FALSE;
>>>>> +        /* thread should be suspended before the state transformation,
>>>>> +            it is for the case that the heap is exhausted in the reset state, although it is almost impossible */
>>>>> +        disable_count = vm_suspend_all_threads();
>>>>> +        pause_start = time_now();
>>>>> +
>>>>> +        gc_merge_free_list_global(gc);
>>>>> +        gc_reset_after_con_collection(gc);
>>>>> +        state_transformation( gc, GC_CON_RESET, GC_CON_NIL );
>>>>> +        pause_time = time_now()-pause_start;
>>>>> +
>>>>> +        vm_resume_all_threads(disable_count);
>>>>> +        gc_con_stat_information_out(gc);
>>>>> +        INFO2("gc.con.time","[GC][Con]pause(reset collection):  CRST="<<pause_time<<"  us\n\n"); // CRST means concurrent reset
>>>>> +        break;
>>>>> +    default :
>>>>> +      return FALSE;
>>>>>   }
>>>>> -
>>>>> -  cc_scheduler->last_marker_num = num_active_marker;
>>>>> -  return num_active_marker;
>>>>> +  return TRUE;
>>>>>  }
>>>>>
>>>>> +
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h Tue Oct 28 20:01:01 2008
>>>>> @@ -20,6 +20,7 @@
>>>>>
>>>>>  #define STAT_SAMPLE_WINDOW_SIZE 5
>>>>>
>>>>> +struct GC_MS;
>>>>>  typedef struct Con_Collection_Scheduler {
>>>>>   /*common field*/
>>>>>   GC* gc;
>>>>> @@ -46,10 +47,17 @@
>>>>>  void con_collection_scheduler_initialize(GC* gc);
>>>>>  void con_collection_scheduler_destruct(GC* gc);
>>>>>
>>>>> +void gc_update_scheduler_parameter( GC *gc );
>>>>> +void gc_force_update_scheduler_parameter( GC *gc );
>>>>> +Boolean gc_con_perform_collection( GC* gc );
>>>>>  Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause);
>>>>> -void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
>>>>>
>>>>>  void gc_decide_cc_scheduler_kind(char* cc_scheduler);
>>>>>  void gc_set_default_cc_scheduler_kind();
>>>>> +
>>>>> +extern unsigned int mostly_con_final_marker_num;
>>>>> +extern unsigned int mostly_con_long_marker_num;
>>>>> +
>>>>>  #endif
>>>>>
>>>>> +
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Tue Oct 28 20:01:01 2008
>>>>> @@ -22,7 +22,7 @@
>>>>>  #include "gc_common.h"
>>>>>  #include "gc_metadata.h"
>>>>>  #include "../thread/mutator.h"
>>>>> -#include "../thread/marker.h"
>>>>> +#include "../thread/conclctor.h"
>>>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>>>  #include "../gen/gen.h"
>>>>>  #include "../mark_sweep/gc_ms.h"
>>>>> @@ -74,11 +74,19 @@
>>>>>  static int64 collection_start_time = time_now();
>>>>>  static int64 collection_end_time = time_now();
>>>>>
>>>>> -int64 get_collection_end_time()
>>>>> +int64 get_gc_start_time()
>>>>> +{ return collection_start_time; }
>>>>> +
>>>>> +void set_gc_start_time()
>>>>> +{ collection_start_time = time_now(); }
>>>>> +
>>>>> +int64 get_gc_end_time()
>>>>>  { return collection_end_time; }
>>>>>
>>>>> -void set_collection_end_time()
>>>>> -{ collection_end_time = time_now(); }
>>>>> +void set_gc_end_time()
>>>>> +{
>>>>> +  collection_end_time = time_now();
>>>>> +}
>>>>>
>>>>>  void gc_decide_collection_kind(GC* gc, unsigned int cause)
>>>>>  {
>>>>> @@ -93,17 +101,17 @@
>>>>>
>>>>>  }
>>>>>
>>>>> -void gc_update_space_stat(GC_MS* gc)
>>>>> +void gc_update_space_stat(GC* gc)
>>>>>  {
>>>>>  #ifdef USE_UNIQUE_MARK_SWEEP_GC
>>>>> -    gc_ms_update_space_stat((GC_MS*)gc);
>>>>> +      gc_ms_update_space_stat((GC_MS *)gc);
>>>>>  #endif
>>>>>  }
>>>>>
>>>>> -void gc_reset_space_stat(GC_MS* gc)
>>>>> +void gc_reset_space_stat(GC* gc)
>>>>>  {
>>>>>  #ifdef USE_UNIQUE_MARK_SWEEP_GC
>>>>> -    gc_ms_reset_space_stat((GC_MS*)gc);
>>>>> +      gc_ms_reset_space_stat((GC_MS *)gc);
>>>>>  #endif
>>>>>  }
>>>>>
>>>>> @@ -118,7 +126,7 @@
>>>>>   gc_set_rootset(gc);
>>>>>  }
>>>>>
>>>>> -void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection)
>>>>> +void gc_reset_after_collection(GC* gc)
>>>>>  {
>>>>>   if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
>>>>>
>>>>> @@ -139,11 +147,9 @@
>>>>>  #endif
>>>>>   }
>>>>>
>>>>> -  gc_update_space_stat((GC_MS*)gc);
>>>>> +  gc_update_space_stat(gc);
>>>>>
>>>>> -  gc_update_collection_scheduler(gc, time_mutator, time_collection);
>>>>> -
>>>>> -  gc_reset_space_stat((GC_MS*)gc);
>>>>> +  gc_reset_space_stat(gc);
>>>>>
>>>>>   gc_reset_collector_state(gc);
>>>>>
>>>>> @@ -154,23 +160,25 @@
>>>>>
>>>>>  }
>>>>>
>>>>> +void set_check_delay( int64 mutator_time );
>>>>> +
>>>>>  void gc_reclaim_heap(GC* gc, unsigned int gc_cause)
>>>>>  {
>>>>>   INFO2("gc.process", "\nGC: GC start ...\n");
>>>>>
>>>>> -  collection_start_time = time_now();
>>>>> -  int64 time_mutator = collection_start_time - collection_end_time;
>>>>> -
>>>>> -  gc->num_collections++;
>>>>>   gc->cause = gc_cause;
>>>>>
>>>>>   if(gc_is_specify_con_gc()){
>>>>> -    gc_finish_con_GC(gc, time_mutator);
>>>>> -    collection_end_time = time_now();
>>>>> +    gc_wait_con_finish(gc);
>>>>>     INFO2("gc.process", "GC: GC end\n");
>>>>>     return;
>>>>>   }
>>>>>
>>>>> +   set_gc_start_time();
>>>>> +  int64 time_mutator = get_gc_start_time() - get_gc_end_time();
>>>>> +
>>>>> +  gc->num_collections++;
>>>>> +
>>>>>   /* FIXME:: before mutators suspended, the ops below should be very careful
>>>>>      to avoid racing with mutators. */
>>>>>
>>>>> @@ -207,16 +215,16 @@
>>>>>   gc_gen_reclaim_heap((GC_Gen*)gc, collection_start_time);
>>>>>  #endif
>>>>>
>>>>> -  collection_end_time = time_now();
>>>>> +  set_gc_end_time();
>>>>>
>>>>> -  int64 time_collection = collection_end_time - collection_start_time;
>>>>> +  int64 time_collection = get_gc_end_time() - get_gc_start_time();
>>>>>
>>>>>  #if !defined(USE_UNIQUE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC)
>>>>>   gc_gen_collection_verbose_info((GC_Gen*)gc, time_collection, time_mutator);
>>>>>   gc_gen_space_verbose_info((GC_Gen*)gc);
>>>>>  #endif
>>>>>
>>>>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>>>>> +  gc_reset_after_collection(gc);
>>>>>
>>>>>   gc_assign_free_area_to_mutators(gc);
>>>>>
>>>>> @@ -230,6 +238,3 @@
>>>>>
>>>>>
>>>>>
>>>>> -
>>>>> -
>>>>> -
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Tue Oct 28 20:01:01 2008
>>>>> @@ -39,7 +39,8 @@
>>>>>
>>>>>  #include "../common/gc_for_barrier.h"
>>>>>
>>>>> -/*
>>>>> +
>>>>> + /*
>>>>>  #define USE_UNIQUE_MARK_SWEEP_GC  //define it to only use Mark-Sweep GC (no NOS, no LOS).
>>>>>  #define USE_UNIQUE_MOVE_COMPACT_GC //define it to only use Move-Compact GC (no NOS, no LOS).
>>>>>  */
>>>>> @@ -336,19 +337,7 @@
>>>>>   return TRUE;
>>>>>  }
>>>>>
>>>>> -extern volatile Boolean obj_alloced_live;
>>>>> -inline Boolean is_obj_alloced_live()
>>>>> -{ return obj_alloced_live;  }
>>>>>
>>>>> -inline void gc_enable_alloc_obj_live()
>>>>> -{
>>>>> -  obj_alloced_live = TRUE;
>>>>> -}
>>>>> -
>>>>> -inline void gc_disable_alloc_obj_live()
>>>>> -{
>>>>> -  obj_alloced_live = FALSE;
>>>>> -}
>>>>>
>>>>>  /***************************************************************/
>>>>>
>>>>> @@ -391,7 +380,7 @@
>>>>>  /***************************************************************/
>>>>>
>>>>>  /* all GCs inherit this GC structure */
>>>>> -struct Marker;
>>>>> +struct Conclctor;
>>>>>  struct Mutator;
>>>>>  struct Collector;
>>>>>  struct GC_Metadata;
>>>>> @@ -421,9 +410,12 @@
>>>>>   unsigned int num_collectors;
>>>>>   unsigned int num_active_collectors; /* not all collectors are working */
>>>>>
>>>>> -  Marker** markers;
>>>>> -  unsigned int num_markers;
>>>>> +  /*concurrent markers and collectors*/
>>>>> +  Conclctor** conclctors;
>>>>> +  unsigned int num_conclctors;
>>>>> +  //unsigned int num_active_conclctors;
>>>>>   unsigned int num_active_markers;
>>>>> +  unsigned int num_active_sweepers;
>>>>>
>>>>>   /* metadata is the pool for rootset, tracestack, etc. */
>>>>>   GC_Metadata* metadata;
>>>>> @@ -443,7 +435,7 @@
>>>>>
>>>>>   Space_Tuner* tuner;
>>>>>
>>>>> -  unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
>>>>> +  volatile unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
>>>>>   Collection_Scheduler* collection_scheduler;
>>>>>
>>>>>   SpinLock lock_con_mark;
>>>>> @@ -488,11 +480,15 @@
>>>>>
>>>>>  GC* gc_parse_options();
>>>>>  void gc_reclaim_heap(GC* gc, unsigned int gc_cause);
>>>>> +void gc_relaim_heap_con_mode( GC *gc);
>>>>>  void gc_prepare_rootset(GC* gc);
>>>>>
>>>>>
>>>>> -int64 get_collection_end_time();
>>>>> -void set_collection_end_time();
>>>>> +int64 get_gc_start_time();
>>>>> +void set_gc_start_time();
>>>>> +
>>>>> +int64 get_gc_end_time();
>>>>> +void set_gc_end_time();
>>>>>
>>>>>  /* generational GC related */
>>>>>
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp Tue Oct 28 20:01:01 2008
>>>>> @@ -17,325 +17,582 @@
>>>>>  #include "gc_common.h"
>>>>>  #include "gc_metadata.h"
>>>>>  #include "../thread/mutator.h"
>>>>> -#include "../thread/marker.h"
>>>>> +#include "../thread/conclctor.h"
>>>>>  #include "../thread/collector.h"
>>>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>>>  #include "../gen/gen.h"
>>>>>  #include "../mark_sweep/gc_ms.h"
>>>>> +#include "../mark_sweep/wspace_mark_sweep.h"
>>>>>  #include "interior_pointer.h"
>>>>>  #include "collection_scheduler.h"
>>>>>  #include "gc_concurrent.h"
>>>>>  #include "../common/gc_for_barrier.h"
>>>>> +#include "concurrent_collection_scheduler.h"
>>>>> +#include "../verify/verify_live_heap.h"
>>>>>
>>>>> -volatile Boolean concurrent_in_marking  = FALSE;
>>>>> -volatile Boolean concurrent_in_sweeping = FALSE;
>>>>> -volatile Boolean mark_is_concurrent     = FALSE;
>>>>> -volatile Boolean sweep_is_concurrent    = FALSE;
>>>>> +struct Con_Collection_Statistics;
>>>>>
>>>>>  volatile Boolean gc_sweep_global_normal_chunk = FALSE;
>>>>>
>>>>> -static void gc_check_con_mark(GC* gc)
>>>>> +//just debugging
>>>>> +inline void gc_ms_get_current_heap_usage(GC_MS *gc)
>>>>>  {
>>>>> -  if(!is_mark_finished(gc)){
>>>>> -    lock(gc->lock_con_mark);
>>>>> -    if(gc_is_kind(ALGO_CON_OTF_OBJ)){
>>>>> -      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>>>>> -    }else if(gc_is_kind(ALGO_CON_OTF_REF)){
>>>>> -      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>>>>> -    }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>>> -      //ignore.
>>>>> -    }
>>>>> -    unlock(gc->lock_con_mark);
>>>>> -  }
>>>>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat(gc);
>>>>> +  unsigned int new_obj_size = gc_get_mutator_new_obj_size((GC *)gc);
>>>>> +  unsigned int current_size = con_collection_stat->surviving_size_at_gc_end + new_obj_size;
>>>>> +  INFO2("gc.con.scheduler", "[Heap Usage]surviving_size("<<con_collection_stat->surviving_size_at_gc_end<<")+new_obj_size("<<new_obj_size << ")="<<current_size<<" bytes");
>>>>> +  INFO2("gc.con.scheduler", "[Heap Usage]usage rate ("<< (float)current_size/gc->committed_heap_size<<")");
>>>>>  }
>>>>>
>>>>> -static void gc_wait_con_mark_finish(GC* gc)
>>>>> +void gc_con_update_stat_before_enable_alloc_live(GC *gc)
>>>>>  {
>>>>> -  wait_mark_finish(gc);
>>>>> -  gc_set_barrier_function(WB_REM_NIL);
>>>>> -  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
>>>>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS *)gc);
>>>>> +  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
>>>>>  }
>>>>> +
>>>>> +volatile Boolean obj_alloced_live;
>>>>>
>>>>> -unsigned int gc_decide_marker_number(GC* gc);
>>>>> +void gc_enable_alloc_obj_live(GC *gc)
>>>>> +{
>>>>> +  gc_con_update_stat_before_enable_alloc_live(gc);
>>>>> +  obj_alloced_live = TRUE;
>>>>> +}
>>>>>
>>>>> -void gc_start_con_mark(GC* gc)
>>>>> +void gc_mostly_con_update_stat_after_final_marking(GC *gc)
>>>>>  {
>>>>> -  int disable_count;
>>>>> -  unsigned int num_marker;
>>>>> -
>>>>> -  if(!try_lock(gc->lock_con_mark) || gc_mark_is_concurrent()) return;
>>>>> -
>>>>> -  lock(gc->lock_enum);
>>>>> -  disable_count = hythread_reset_suspend_disable();
>>>>> -  int64 pause_start = time_now();
>>>>> -  gc_set_rootset_type(ROOTSET_IS_OBJ);
>>>>> -  gc_prepare_rootset(gc);
>>>>> -
>>>>> -  gc_set_concurrent_status(gc, GC_CON_MARK_PHASE);
>>>>> +  POINTER_SIZE_INT num_live_obj = 0;
>>>>> +  POINTER_SIZE_INT size_live_obj = 0;
>>>>> +  POINTER_SIZE_INT num_dirty_obj_traced = 0;
>>>>>
>>>>> -  num_marker = gc_decide_marker_number(gc);
>>>>> -
>>>>> -  /*start concurrent mark*/
>>>>> -  if(gc_is_kind(ALGO_CON_OTF_OBJ)){
>>>>> -    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
>>>>> -    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>>> -  }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>>> -    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
>>>>> -    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
>>>>> -  }else if(gc_is_kind(ALGO_CON_OTF_REF)){
>>>>> -    gc_set_barrier_function(WB_REM_OLD_VAR);
>>>>> -    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>>> +  unsigned int num_conclctors = gc->num_conclctors;
>>>>> +  for( unsigned int i=0; i<num_conclctors; i++ ) {
>>>>> +    Conclctor* conclctor = gc->conclctors[i];
>>>>> +    if( conclctor->role != CONCLCTOR_ROLE_MARKER )
>>>>> +      continue;
>>>>> +    num_live_obj += conclctor->live_obj_num;
>>>>> +    size_live_obj += conclctor->live_obj_size;
>>>>> +    num_dirty_obj_traced += conclctor->num_dirty_slots_traced;
>>>>> +    conclctor->live_obj_num = 0;
>>>>> +    conclctor->live_obj_size = 0;
>>>>> +    conclctor->num_dirty_slots_traced = 0;
>>>>>   }
>>>>>
>>>>> -  unlock(gc->lock_enum);
>>>>> -  INFO2("gc.con.time","[GC][Con]pause(enumeration root):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
>>>>> -  vm_resume_threads_after();
>>>>> -  assert(hythread_is_suspend_enabled());
>>>>> -  hythread_set_suspend_disable(disable_count);
>>>>> -
>>>>> -  unlock(gc->lock_con_mark);
>>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +  con_collection_stat->live_size_marked += size_live_obj;
>>>>> +  INFO2("gc.con.scheduler", "[Final Mark Finish] live_marked_size:      "<<con_collection_stat->live_size_marked<<" bytes");
>>>>> +
>>>>>  }
>>>>>
>>>>> -void mostly_con_mark_terminate_reset();
>>>>> -void terminate_mostly_con_mark();
>>>>> -
>>>>> -void gc_finish_con_mark(GC* gc, Boolean need_STW)
>>>>> +unsigned int gc_get_conclcor_num(GC* gc, unsigned int req_role);
>>>>> +//called by the marker when it finishes
>>>>> +void gc_con_update_stat_after_marking(GC *gc)
>>>>>  {
>>>>> -  gc_check_con_mark(gc);
>>>>> -
>>>>> -  if(gc_is_kind(ALGO_CON_MOSTLY))
>>>>> -    terminate_mostly_con_mark();
>>>>> -
>>>>> -  gc_wait_con_mark_finish(gc);
>>>>> +  POINTER_SIZE_INT num_live_obj = 0;
>>>>> +  POINTER_SIZE_INT size_live_obj = 0;
>>>>> +  POINTER_SIZE_INT num_dirty_obj_traced = 0;
>>>>>
>>>>> -  int disable_count;
>>>>> -  if(need_STW){
>>>>> -    /*suspend the mutators.*/
>>>>> -    lock(gc->lock_enum);
>>>>> -    if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>>> -      /*In mostly concurrent algorithm, there's a final marking pause.
>>>>> -            Prepare root set for final marking.*/
>>>>> -      disable_count = hythread_reset_suspend_disable();
>>>>> -      gc_set_rootset_type(ROOTSET_IS_OBJ);
>>>>> -      gc_prepare_rootset(gc);
>>>>> -    }else{
>>>>> -      disable_count = vm_suspend_all_threads();
>>>>> -    }
>>>>> +  unsigned int num_conclctors = gc->num_conclctors;
>>>>> +  for( unsigned int i=0; i<num_conclctors; i++ ) {
>>>>> +    Conclctor* conclctor = gc->conclctors[i];
>>>>> +    if( conclctor->role != CONCLCTOR_ROLE_MARKER )
>>>>> +      continue;
>>>>> +    num_live_obj += conclctor->live_obj_num;
>>>>> +    size_live_obj += conclctor->live_obj_size;
>>>>> +    num_dirty_obj_traced += conclctor->num_dirty_slots_traced;
>>>>> +    conclctor->live_obj_num = 0;
>>>>> +    conclctor->live_obj_size = 0;
>>>>> +    conclctor->num_dirty_slots_traced = 0;
>>>>>   }
>>>>>
>>>>> -  if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>>> -    /*In mostly concurrent algorithm, there's a final marking pause.
>>>>> -          Suspend the mutators once again and finish the marking phase.*/
>>>>> -
>>>>> -    /*prepare dirty object*/
>>>>> -    gc_prepare_dirty_set(gc);
>>>>> -
>>>>> -    gc_set_weakref_sets(gc);
>>>>> -
>>>>> -    /*start STW mark*/
>>>>> -    gc_ms_start_mostly_con_final_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>>>>> -
>>>>> -    mostly_con_mark_terminate_reset();
>>>>> -    gc_clear_dirty_set(gc);
>>>>> -  }
>>>>> -
>>>>> -  gc_reset_dirty_set(gc);
>>>>> -
>>>>> -  if(need_STW){
>>>>> -    unlock(gc->lock_enum);
>>>>> -    if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>>> -      vm_resume_threads_after();
>>>>> -      assert(hythread_is_suspend_enabled());
>>>>> -      hythread_set_suspend_disable(disable_count);
>>>>> -    }else{
>>>>> -      vm_resume_all_threads(disable_count);
>>>>> -    }
>>>>> -  }
>>>>> +  unsigned int write_barrier_marked_size = gc_get_mutator_write_barrier_marked_size(gc);
>>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +  con_collection_stat->live_size_marked = size_live_obj + write_barrier_marked_size;
>>>>> +  //INFO2("gc.con.scheduler", "[Mark Finish] live_marked_size:      "<<con_collection_stat->live_size_marked<<" bytes");
>>>>>
>>>>> +   /*statistics information update (marking_end_time, trace_rate) */
>>>>> +  con_collection_stat->marking_end_time = time_now();
>>>>> +  int64 marking_time = (unsigned int)(con_collection_stat->marking_end_time - con_collection_stat->marking_start_time);
>>>>> +
>>>>> +  unsigned int heap_size =
>>>>> +       con_collection_stat->surviving_size_at_gc_end +
>>>>> +       gc_get_mutator_new_obj_size(gc);
>>>>> +
>>>>> +  con_collection_stat->trace_rate = heap_size/trans_time_unit(marking_time);
>>>>> +
>>>>> +
>>>>> +
>>>>> +  /*
>>>>> +  //statistics just for debugging
>>>>> +  unsigned int marker_num = gc_get_conclcor_num(gc, CONCLCTOR_ROLE_MARKER);
>>>>> +  float heap_used_rate = (float)heap_size/gc->committed_heap_size;
>>>>> +  unsigned int new_obj_size_marking = gc_get_mutator_new_obj_size(gc) - con_collection_stat->alloc_size_before_alloc_live;
>>>>> +  unsigned int alloc_rate_marking = new_obj_size_marking/trans_time_unit(con_collection_stat->marking_end_time - con_collection_stat->marking_start_time);
>>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] tracing time=" <<marking_time<<" us, trace rate=" << con_collection_stat->trace_rate<<"b/ms, current heap used="<<heap_used_rate );
>>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] marker num="<<marker_num << ", alloc factor=" << (float)alloc_rate_marking/con_collection_stat->alloc_rate);
>>>>> +  */
>>>>>  }
>>>>>
>>>>> -void gc_reset_con_mark(GC* gc)
>>>>> +void gc_PSTW_update_stat_after_marking(GC *gc)
>>>>>  {
>>>>> -  gc->num_active_markers = 0;
>>>>> -  gc_mark_unset_concurrent();
>>>>> +  unsigned int size_live_obj = gc_ms_get_live_object_size((GC_MS*)gc);
>>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +  con_collection_stat->live_size_marked = size_live_obj;
>>>>> +  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
>>>>> +
>>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] live_marked:      "<<con_collection_stat->live_size_marked<<" bytes");
>>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
>>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
>>>>>  }
>>>>>
>>>>> -int64 gc_get_con_mark_time(GC* gc)
>>>>> +//Called only when heap is exhuaset
>>>>> +void gc_con_update_stat_heap_exhausted(GC* gc)
>>>>>  {
>>>>> -  int64 time_mark = 0;
>>>>> -  Marker** markers = gc->markers;
>>>>> -  unsigned int i;
>>>>> -  for(i = 0; i < gc->num_active_markers; i++){
>>>>> -    Marker* marker = markers[i];
>>>>> -    if(marker->time_mark > time_mark){
>>>>> -      time_mark = marker->time_mark;
>>>>> -    }
>>>>> -    marker->time_mark = 0;
>>>>> -  }
>>>>> -  return time_mark;
>>>>> +  unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
>>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +  con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
>>>>> +  //INFO2("gc.con.scheduler", "[Heap exhausted] surviving size="<<con_collection_stat->surviving_size_at_gc_end<<" bytes, new_obj_size="<<new_obj_size<<" bytes");
>>>>> +  //INFO2("gc.con.scheduler", "[Heap exhausted] current utilization rate="<<con_collection_stat->heap_utilization_rate);
>>>>>  }
>>>>>
>>>>> -void gc_start_con_sweep(GC* gc)
>>>>> +
>>>>> +//just debugging
>>>>> +unsigned int gc_con_get_live_size_from_sweeper(GC *gc)
>>>>>  {
>>>>> -  if(!try_lock(gc->lock_con_sweep) || gc_sweep_is_concurrent()) return;
>>>>> +  POINTER_SIZE_INT num_live_obj = 0;
>>>>> +  POINTER_SIZE_INT size_live_obj = 0;
>>>>>
>>>>> -  /*FIXME: enable finref*/
>>>>> -  if(!IGNORE_FINREF ){
>>>>> -    gc_set_obj_with_fin(gc);
>>>>> -    Collector* collector = gc->collectors[0];
>>>>> -    collector_identify_finref(collector);
>>>>> -#ifndef BUILD_IN_REFERENT
>>>>> -  }else{
>>>>> -    gc_set_weakref_sets(gc);
>>>>> -    gc_update_weakref_ignore_finref(gc);
>>>>> -#endif
>>>>> +  unsigned int num_collectors = gc->num_active_collectors;
>>>>> +  Collector** collectors = gc->collectors;
>>>>> +  unsigned int i;
>>>>> +  for(i = 0; i < num_collectors; i++){
>>>>> +    Collector* collector = collectors[i];
>>>>> +    num_live_obj += collector->live_obj_num;
>>>>> +    size_live_obj += collector->live_obj_size;
>>>>> +    collector->live_obj_num = 0;
>>>>> +    collector->live_obj_size = 0;
>>>>>   }
>>>>> +
>>>>> +  return size_live_obj;
>>>>> +}
>>>>>
>>>>> -  gc_set_concurrent_status(gc, GC_CON_SWEEP_PHASE);
>>>>> +//Called when Con GC ends, must called in a STW period
>>>>> +void gc_reset_con_space_stat(GC *gc)
>>>>> +{
>>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +  unsigned int new_obj_size = gc_reset_mutator_new_obj_size((GC *)gc);
>>>>>
>>>>> -  gc_set_weakref_sets(gc);
>>>>> +  if( gc_is_kind(ALGO_CON_MOSTLY) ) {
>>>>> +    con_collection_stat->live_alloc_size = 0; //mostly concurrent do not make new alloc obj live
>>>>> +  } else if ( gc_is_kind( ALGO_CON_OTF_OBJ ) || gc_is_kind( ALGO_CON_OTF_REF ) ) {
>>>>> +    con_collection_stat->live_alloc_size = new_obj_size - con_collection_stat->alloc_size_before_alloc_live;
>>>>> +  }
>>>>> +
>>>>> +  /*live obj size at the end of gc = the size of objs belong to {marked_live + alloc_at_marking+alloc_at_sweeping},
>>>>> +  (for mostly concurrent, con_collection_stat->surviving_size_at_gc_end = con_collection_stat->live_size_marked .)*/
>>>>> +  con_collection_stat->surviving_size_at_gc_end = con_collection_stat->live_size_marked + con_collection_stat->live_alloc_size;
>>>>> +  //INFO2( "gc.con.scheduler", "[Mark Live] live_size_marked = " << con_collection_stat->live_size_marked << ", live_alloc_size=" << con_collection_stat->live_alloc_size );
>>>>>
>>>>> -  /*Note: We assumed that adding entry to weakroot_pool is happened in STW rootset enumeration.
>>>>> -      So, when this assumption changed, we should modified the below function.*/
>>>>> -  gc_identify_dead_weak_roots(gc);
>>>>>
>>>>> -  /*start concurrent mark*/
>>>>> -  gc_ms_start_con_sweep((GC_MS*)gc, MIN_NUM_MARKERS);
>>>>> +  /*
>>>>> +  //just debugging
>>>>> +  if( !gc_is_specify_con_sweep() ) {
>>>>> +    unsigned int surviving_sweeper = gc_con_get_live_size_from_sweeper(gc);
>>>>> +    unsigned int surviving_marker = con_collection_stat->surviving_size_at_gc_end;
>>>>> +    INFO2("gc.con.scheduler", "[Surviving size] by sweeper: " << surviving_sweeper << " bytes, by marker:" << surviving_marker << " bytes, diff=" << (surviving_sweeper - surviving_marker) );
>>>>> +  }*/
>>>>>
>>>>> -  unlock(gc->lock_con_sweep);
>>>>> +  int64 current_time = time_now();
>>>>> +
>>>>> +  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
>>>>> +       unsigned int gc_interval_time = 0;
>>>>> +       if( con_collection_stat->pause_start_time != 0 ) //remove the stw time
>>>>> +            gc_interval_time = trans_time_unit(con_collection_stat->pause_start_time - con_collection_stat->gc_end_time);
>>>>> +       else
>>>>> +            gc_interval_time = trans_time_unit(current_time -con_collection_stat->gc_end_time );
>>>>> +       con_collection_stat->alloc_rate = new_obj_size/gc_interval_time;
>>>>> +       gc_update_scheduler_parameter(gc);
>>>>> +  } else {
>>>>> +     gc_force_update_scheduler_parameter(gc);
>>>>> +  }
>>>>> +
>>>>> +  con_collection_stat->gc_end_time = current_time;
>>>>> +
>>>>> +  con_collection_stat->live_size_marked = 0;
>>>>> +  con_collection_stat->live_alloc_size = 0;
>>>>> +  con_collection_stat->alloc_size_before_alloc_live = 0;
>>>>> +  con_collection_stat->marking_start_time = 0;
>>>>> +  con_collection_stat->marking_end_time = 0;
>>>>> +  con_collection_stat->sweeping_time = gc_get_conclctor_time((GC *)gc, CONCLCTOR_ROLE_SWEEPER); //be 0 if not CMCS
>>>>> +  con_collection_stat->pause_start_time = 0;
>>>>> +  assert(con_collection_stat->heap_utilization_rate<1);
>>>>> +
>>>>>  }
>>>>>
>>>>> -void gc_reset_con_sweep(GC* gc)
>>>>> +void gc_con_stat_information_out(GC *gc)
>>>>>  {
>>>>> -  gc->num_active_collectors = 0;
>>>>> -  gc_sweep_unset_concurrent();
>>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
>>>>> +  INFO2("gc.con.scheduler", "[Reset] surviving_at_end:       "<<con_collection_stat->surviving_size_at_gc_end<<" bytes");
>>>>> +  INFO2("gc.con.scheduler", "[Reset] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
>>>>> +  INFO2("gc.con.scheduler", "[Reset] utilization_rate:      "<<con_collection_stat->heap_utilization_rate);
>>>>> +  INFO2("gc.con.scheduler", "[Reset] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
>>>>> +  INFO2("gc.con.scheduler", "[Reset] sweeping time:      "<<con_collection_stat->sweeping_time<<" us");
>>>>> +  INFO2("gc.con.scheduler", "[Reset] gc time:      "<< trans_time_unit(con_collection_stat->gc_end_time - con_collection_stat->gc_start_time) );
>>>>> +  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
>>>>>  }
>>>>>
>>>>> -void gc_wait_con_sweep_finish(GC* gc)
>>>>> +void gc_reset_after_con_collection(GC* gc)
>>>>>  {
>>>>> -  wait_collection_finish(gc);
>>>>> -  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
>>>>> +  assert(gc_is_specify_con_gc());
>>>>> +  int64 reset_start = time_now();
>>>>> +  if(!IGNORE_FINREF ){
>>>>> +    INFO2("gc.process", "GC: finref process after collection ...\n");
>>>>> +    gc_put_finref_to_vm(gc);
>>>>> +    gc_reset_finref_metadata(gc);
>>>>> +    gc_activate_finref_threads((GC*)gc);
>>>>> +#ifndef BUILD_IN_REFERENT
>>>>> +  } else {
>>>>> +    gc_clear_weakref_pools(gc);
>>>>> +    gc_clear_finref_repset_pool(gc);
>>>>> +#endif
>>>>> +  }
>>>>> +  reset_start = time_now();
>>>>> +  gc_reset_con_space_stat(gc);
>>>>> +  gc_clear_conclctor_role(gc);
>>>>> +  vm_reclaim_native_objs();
>>>>>  }
>>>>>
>>>>> -void gc_finish_con_sweep(GC * gc)
>>>>> +
>>>>> +
>>>>> +void gc_set_default_con_algo()
>>>>>  {
>>>>> -  gc_wait_con_sweep_finish(gc);
>>>>> +  assert((GC_PROP & ALGO_CON_MASK) == 0);
>>>>> +  GC_PROP |= ALGO_CON_OTF_OBJ;
>>>>>  }
>>>>>
>>>>> -void gc_try_finish_con_phase(GC * gc)
>>>>> +void gc_decide_con_algo(char* concurrent_algo)
>>>>>  {
>>>>> -  /*Note: we do not finish concurrent mark here if we do not want to start concurrent sweep.*/
>>>>> -  if(gc_con_is_in_marking(gc) && is_mark_finished(gc)){
>>>>> -    /*Although all conditions above are satisfied, we can not guarantee concurrent marking is finished.
>>>>> -          Because, sometimes, the concurrent marking has not started yet. We check the concurrent mark lock
>>>>> -          here to guarantee this occasional case.*/
>>>>> -    if(try_lock(gc->lock_con_mark)){
>>>>> -      unlock(gc->lock_con_mark);
>>>>> -      gc_finish_con_mark(gc, TRUE);
>>>>> -    }
>>>>> -  }
>>>>> -
>>>>> -  if(gc_con_is_in_sweeping(gc) && is_collector_finished(gc)){
>>>>> -    //The reason is same as concurrent mark above.
>>>>> -    if(try_lock(gc->lock_con_sweep)){
>>>>> -      unlock(gc->lock_con_sweep);
>>>>> -      gc_finish_con_sweep(gc);
>>>>> -    }
>>>>> +  string_to_upper(concurrent_algo);
>>>>> +  GC_PROP &= ~ALGO_CON_MASK;
>>>>> +  if(!strcmp(concurrent_algo, "OTF_OBJ")){
>>>>> +    GC_PROP |= ALGO_CON_OTF_OBJ;
>>>>> +  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
>>>>> +    GC_PROP |= ALGO_CON_MOSTLY;
>>>>> +  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
>>>>> +    GC_PROP |= ALGO_CON_OTF_REF;
>>>>>   }
>>>>>  }
>>>>>
>>>>> -void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection);
>>>>>
>>>>> -void gc_reset_after_con_collect(GC* gc)
>>>>> +/*
>>>>> +    gc start enumeration phase, now, it is in a stop-the-world manner
>>>>> +*/
>>>>> +void gc_start_con_enumeration(GC * gc)
>>>>>  {
>>>>> -  assert(gc_is_specify_con_gc());
>>>>> -
>>>>> -  int64 time_mutator = gc_get_mutator_time(gc);
>>>>> -  int64 time_collection = gc_get_collector_time(gc) + gc_get_marker_time(gc);
>>>>> +  gc_set_rootset_type(ROOTSET_IS_OBJ);
>>>>> +  gc_prepare_rootset(gc);
>>>>> +}
>>>>>
>>>>> -  gc_reset_interior_pointer_table();
>>>>> +//unsigned int gc_decide_marker_number(GC* gc);
>>>>> +unsigned int gc_get_marker_number(GC* gc);
>>>>> +/*  gc start marking phase */
>>>>> +void gc_start_con_marking(GC *gc)
>>>>> +{
>>>>> +  unsigned int num_marker;
>>>>> +  num_marker = gc_get_marker_number(gc);
>>>>>
>>>>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>>>>> -
>>>>> -  if(gc_mark_is_concurrent()){
>>>>> -    gc_reset_con_mark(gc);
>>>>> +  if(gc_is_kind(ALGO_CON_OTF_OBJ)) {
>>>>> +    gc_enable_alloc_obj_live(gc);
>>>>> +    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
>>>>> +    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>>> +  } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>>>>> +    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
>>>>> +    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
>>>>> +  } else if(gc_is_kind(ALGO_CON_OTF_REF)) {
>>>>> +    gc_enable_alloc_obj_live(gc);
>>>>> +    gc_set_barrier_function(WB_REM_OLD_VAR);
>>>>> +    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>>>   }
>>>>> +}
>>>>> +
>>>>>
>>>>> -  if(gc_sweep_is_concurrent()){
>>>>> -    gc_reset_con_sweep(gc);
>>>>> +/*
>>>>> +    gc start sweeping phase
>>>>> +*/
>>>>> +void gc_prepare_sweeping(GC *gc) {
>>>>> +  INFO2("gc.con.info", "Concurrent collection, current collection = " << gc->num_collections );
>>>>> +  /*FIXME: enable finref*/
>>>>> +  if(!IGNORE_FINREF ){
>>>>> +    gc_set_obj_with_fin(gc);
>>>>> +    Collector* collector = gc->collectors[0];
>>>>> +    collector_identify_finref(collector);
>>>>> +  #ifndef BUILD_IN_REFERENT
>>>>> +  } else {
>>>>> +    conclctor_set_weakref_sets(gc);
>>>>> +    gc_update_weakref_ignore_finref(gc);
>>>>> +  #endif
>>>>>   }
>>>>> +  gc_identify_dead_weak_roots(gc);
>>>>>  }
>>>>>
>>>>> -void gc_finish_con_GC(GC* gc, int64 time_mutator)
>>>>> -{
>>>>> +int64 get_last_check_point();
>>>>> +// for the case pure stop the world
>>>>> +static void gc_partial_con_PSTW( GC *gc) {
>>>>>   int64 time_collection_start = time_now();
>>>>> -
>>>>> +  INFO2("gc.space.stat","Stop-the-world collection = "<<gc->num_collections<<"");
>>>>> +  INFO2("gc.con.info", "from last check point =" << (unsigned int)(time_collection_start -get_last_check_point()) );
>>>>> +  // stop the world enumeration
>>>>>   gc->num_collections++;
>>>>> -
>>>>> -  lock(gc->lock_enum);
>>>>> -
>>>>>   int disable_count = hythread_reset_suspend_disable();
>>>>>   gc_set_rootset_type(ROOTSET_IS_REF);
>>>>>   gc_prepare_rootset(gc);
>>>>> -  unlock(gc->lock_enum);
>>>>> -
>>>>> -  if(gc_sweep_is_concurrent()){
>>>>> -    if(gc_con_is_in_sweeping())
>>>>> -      gc_finish_con_sweep(gc);
>>>>> -  }else{
>>>>> -    if(gc_con_is_in_marking()){
>>>>> -      gc_finish_con_mark(gc, FALSE);
>>>>> -    }
>>>>> -    gc->in_collection = TRUE;
>>>>> -    gc_reset_mutator_context(gc);
>>>>> -    if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>>> -    gc_ms_reclaim_heap((GC_MS*)gc);
>>>>> -  }
>>>>> -
>>>>> -  int64 time_collection = 0;
>>>>> -  if(gc_mark_is_concurrent()){
>>>>> -    time_collection = gc_get_con_mark_time(gc);
>>>>> -    gc_reset_con_mark(gc);
>>>>> -  }else{
>>>>> -    time_collection = time_now()-time_collection_start;
>>>>> -  }
>>>>> +
>>>>> +  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
>>>>> +      unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
>>>>> +      Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +      con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
>>>>> +  }
>>>>> +
>>>>> +  //reclaim heap
>>>>> +  gc_reset_mutator_context(gc);
>>>>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>>>>> +
>>>>> +  //update live size
>>>>> +  gc_PSTW_update_stat_after_marking(gc);
>>>>> +
>>>>> +  // reset the collection and resume mutators
>>>>> +  gc_reset_after_con_collection(gc);
>>>>>
>>>>> -  if(gc_sweep_is_concurrent()){
>>>>> -    gc_reset_con_sweep(gc);
>>>>> -  }
>>>>> -
>>>>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>>>>> -
>>>>> -  gc_start_mutator_time_measure(gc);
>>>>> -
>>>>> +  set_con_nil(gc); // concurrent scheduling will continue after mutators are resumed
>>>>>   vm_resume_threads_after();
>>>>>   assert(hythread_is_suspend_enabled());
>>>>> -  hythread_set_suspend_disable(disable_count);
>>>>> -  int64 pause_time = time_now()-time_collection_start;
>>>>> -
>>>>> -  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause){
>>>>> -    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
>>>>> -  }else{
>>>>> -    INFO2("gc.con.time","[GC][Con]pause( Heap exhuasted ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
>>>>> -  }
>>>>> -  return;
>>>>> +  hythread_set_suspend_disable(disable_count);
>>>>>  }
>>>>>
>>>>> -void gc_set_default_con_algo()
>>>>> -{
>>>>> -  assert((GC_PROP & ALGO_CON_MASK) == 0);
>>>>> -  GC_PROP |= ALGO_CON_OTF_OBJ;
>>>>> +void terminate_mostly_con_mark();
>>>>> +void wspace_mostly_con_final_mark( GC *gc );
>>>>> +
>>>>> +// for the case concurrent marking is not finished before heap is exhausted
>>>>> +static void gc_partial_con_PMSS(GC *gc) {
>>>>> +  INFO2("gc.con.info", "[PMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
>>>>> +  // wait concurrent marking finishes
>>>>> +  int64 wait_start = time_now();
>>>>> +  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
>>>>> +  //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
>>>>> +  while( gc->gc_concurrent_status == GC_CON_START_MARKERS ||
>>>>> +             gc->gc_concurrent_status == GC_CON_TRACING ||
>>>>> +             gc->gc_concurrent_status == GC_CON_TRACE_DONE)
>>>>> +  {
>>>>> +      vm_thread_yield(); //let the unfinished marker run
>>>>> +  }
>>>>> +
>>>>> +  /*just debugging*/
>>>>> +    gc_ms_get_current_heap_usage((GC_MS *)gc);
>>>>> +    int64 pause_time = time_now() - wait_start;
>>>>> +    INFO2("gc.con.info", "[PMSS]wait marking time="<<pause_time<<" us" );
>>>>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +    unsigned int marking_time_shortage = (unsigned int)(con_collection_stat->marking_end_time - wait_start);
>>>>> +    INFO2("gc.con.info", "[PMSS] marking late time [" << marking_time_shortage << "] us" );
>>>>> +
>>>>> +  // start STW reclaiming heap
>>>>> +  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>>>>> +  gc_reset_mutator_context(gc);
>>>>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>>>>> +
>>>>> +  // reset after partial stop the world collection
>>>>> +  gc_reset_after_con_collection(gc);
>>>>> +  set_con_nil(gc);
>>>>> +}
>>>>> +
>>>>> +// only when current sweep is set to false
>>>>> +static void gc_partial_con_CMSS(GC *gc) {
>>>>> +
>>>>> +  INFO2("gc.con.info", "[CMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
>>>>> +  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
>>>>> +
>>>>> +  /*just debugging*/
>>>>> +    gc_ms_get_current_heap_usage((GC_MS *)gc);
>>>>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +    unsigned int from_marking_end = (unsigned int)(time_now() - con_collection_stat->marking_end_time);
>>>>> +    INFO2("gc.con.info", "[CMSS] marking early time [" << from_marking_end << "] us" );
>>>>> +
>>>>> +  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>>>>> +
>>>>> +  // start reclaiming heap, it will skip the marking phase
>>>>> +  gc_reset_mutator_context(gc);
>>>>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>>>>> +
>>>>> +  // reset after partial stop the world collection
>>>>> +  gc_reset_after_con_collection(gc);
>>>>> +  set_con_nil(gc);
>>>>> +}
>>>>> +
>>>>> +void gc_merge_free_list_global(GC *gc);
>>>>> +//for the case concurrent marking and partial concurrent sweeping
>>>>> +static void gc_partial_con_CMPS( GC *gc ) {
>>>>> +
>>>>> +  while(gc->gc_concurrent_status == GC_CON_SWEEPING || gc->gc_concurrent_status == GC_CON_SWEEP_DONE) {
>>>>> +      vm_thread_yield();  //let the unfinished sweeper run
>>>>> +  }
>>>>> +  gc_merge_free_list_global(gc);
>>>>> +  // reset after partial stop the world collection
>>>>> +  gc_reset_after_con_collection(gc);
>>>>> +  set_con_nil(gc);
>>>>> +}
>>>>> +
>>>>> +
>>>>> +inline static void partial_stop_the_world_info( unsigned int type, unsigned int pause_time ) {
>>>>> +  switch( type ) {
>>>>> +    case GC_PARTIAL_PSTW :
>>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PSTW=" << pause_time << " us");
>>>>> +      break;
>>>>> +    case GC_PARTIAL_PMSS :
>>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PMSS=" << pause_time << " us");
>>>>> +      break;
>>>>> +    case GC_PARTIAL_CMPS :
>>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMPS=" << pause_time << " us");
>>>>> +      break;
>>>>> +    case GC_PARTIAL_CMSS :
>>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMSS=" << pause_time << " us");
>>>>> +      break;
>>>>> +    case GC_PARTIAL_FCSR :
>>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), FCSR=" << pause_time << " us");
>>>>> +      break;
>>>>> +  }
>>>>> +}
>>>>> +
>>>>> +static unsigned int gc_con_heap_full_mostly_con( GC *gc )
>>>>> +{
>>>>> +   while( gc->gc_concurrent_status == GC_CON_START_MARKERS ) { // we should enumerate rootset after old rootset is traced
>>>>> +      vm_thread_yield();
>>>>> +   }
>>>>> +
>>>>> +   int64 final_start = time_now();
>>>>> +   int disable_count = hythread_reset_suspend_disable();
>>>>> +   gc_set_rootset_type(ROOTSET_IS_OBJ);
>>>>> +   gc_prepare_rootset(gc);
>>>>> +
>>>>> +   gc_set_barrier_function(WB_REM_NIL); //in stw phase, so we can remove write barrier at any time
>>>>> +   terminate_mostly_con_mark(); // terminate current mostly concurrent marking
>>>>> +
>>>>> +   //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
>>>>> +   while(gc->gc_concurrent_status == GC_CON_TRACING) {
>>>>> +      vm_thread_yield(); //let the unfinished marker run
>>>>> +   }
>>>>> +
>>>>> +   //final marking phase
>>>>> +   gc_clear_conclctor_role(gc);
>>>>> +   wspace_mostly_con_final_mark(gc);
>>>>> +
>>>>> +   /*just debugging*/
>>>>> +   int64 final_time = time_now() - final_start;
>>>>> +   INFO2("gc.scheduler", "[MOSTLY_CON] final marking time=" << final_time << " us");
>>>>> +   gc_ms_get_current_heap_usage((GC_MS *)gc);
>>>>> +
>>>>> +  // start STW reclaiming heap
>>>>> +   gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>>>>> +   gc_reset_mutator_context(gc);
>>>>> +   if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>>> +   gc_ms_reclaim_heap((GC_MS*)gc);
>>>>> +
>>>>> +   // reset after partial stop the world collection
>>>>> +   gc_reset_after_con_collection(gc);
>>>>> +   set_con_nil(gc);
>>>>> +
>>>>> +   vm_resume_threads_after();
>>>>> +   hythread_set_suspend_disable(disable_count);
>>>>> +   return GC_PARTIAL_PMSS;
>>>>> +
>>>>> +}
>>>>> +
>>>>> +static unsigned int gc_con_heap_full_otf( GC *gc )
>>>>> +{
>>>>> +   unsigned int partial_type; //for time measuring and debugging
>>>>> +   int disable_count = vm_suspend_all_threads();
>>>>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +   con_collection_stat->pause_start_time = time_now();
>>>>> +   switch(gc->gc_concurrent_status) {
>>>>> +       case GC_CON_START_MARKERS :
>>>>> +       case GC_CON_TRACING :
>>>>> +       case GC_CON_TRACE_DONE :
>>>>> +         partial_type = GC_PARTIAL_PMSS;
>>>>> +         gc_partial_con_PMSS(gc);
>>>>> +         break;
>>>>> +       case GC_CON_BEFORE_SWEEP : // only when current sweep is set to false
>>>>> +         partial_type = GC_PARTIAL_CMSS;
>>>>> +         gc_partial_con_CMSS(gc);
>>>>> +         break;
>>>>> +       case GC_CON_SWEEPING :
>>>>> +       case GC_CON_SWEEP_DONE :
>>>>> +         partial_type = GC_PARTIAL_CMPS;
>>>>> +         gc_partial_con_CMPS(gc);
>>>>> +         break;
>>>>> +       case GC_CON_BEFORE_FINISH : //heap can be exhausted when sweeping finishes, very rare
>>>>> +         partial_type = GC_PARTIAL_FCSR;
>>>>> +         gc_merge_free_list_global(gc);
>>>>> +         gc_reset_after_con_collection(gc);
>>>>> +         set_con_nil(gc);
>>>>> +         break;
>>>>> +       case GC_CON_RESET :
>>>>> +       case GC_CON_NIL :
>>>>> +       case GC_CON_STW_ENUM :
>>>>> +         /*do nothing, if still in gc_con_reset, will wait to finish after resuming. this case happens rarely*/
>>>>> +         partial_type = GC_PARTIAL_FCSR;
>>>>> +         break;
>>>>> +       /* other state is illegal here */
>>>>> +       default:
>>>>> +         INFO2("gc.con.info", "illegal state when the heap is out [" << gc->gc_concurrent_status << "]");
>>>>> +         RAISE_ERROR;
>>>>> +    }
>>>>> +    vm_resume_all_threads(disable_count);
>>>>> +    return partial_type;
>>>>>  }
>>>>>
>>>>> -void gc_decide_con_algo(char* concurrent_algo)
>>>>> -{
>>>>> -  string_to_upper(concurrent_algo);
>>>>> -  GC_PROP &= ~ALGO_CON_MASK;
>>>>> -  if(!strcmp(concurrent_algo, "OTF_OBJ")){
>>>>> -    GC_PROP |= ALGO_CON_OTF_OBJ;
>>>>> -  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
>>>>> -    GC_PROP |= ALGO_CON_MOSTLY;
>>>>> -  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
>>>>> -    GC_PROP |= ALGO_CON_OTF_REF;
>>>>> +void gc_con_stat_information_out(GC *gc);
>>>>> +/*
>>>>> +this method is called before STW gc start, there is a big lock outside
>>>>> +*/
>>>>> +void gc_wait_con_finish( GC* gc ) {
>>>>> +  int64 time_collection_start = time_now();
>>>>> +  unsigned int partial_type; //for time measuring and debugging
>>>>> +
>>>>> +   /* cocurrent gc is idle */
>>>>> +   if( state_transformation( gc, GC_CON_NIL, GC_CON_DISABLE ) ) { // for the race condition of con schduling and STW gc
>>>>> +        Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>>> +        con_collection_stat->gc_start_time = time_now();
>>>>> +        con_collection_stat->pause_start_time = con_collection_stat->gc_start_time;
>>>>> +        partial_type = GC_PARTIAL_PSTW;
>>>>> +        gc_partial_con_PSTW( gc );
>>>>> +   } else {
>>>>> +      while(gc->gc_concurrent_status == GC_CON_STW_ENUM ) { //wait concurrent gc finish enumeration
>>>>> +          hythread_safe_point();
>>>>> +          vm_thread_yield();
>>>>> +       }
>>>>> +       if( gc_is_kind(ALGO_CON_MOSTLY) )
>>>>> +         partial_type = gc_con_heap_full_mostly_con(gc);
>>>>> +       else if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>>>>> +         partial_type = gc_con_heap_full_otf(gc);
>>>>> +         if(gc->gc_concurrent_status == GC_CON_RESET) {
>>>>> +            while( gc->gc_concurrent_status == GC_CON_RESET ) { //wait concurrent to finish
>>>>> +              hythread_safe_point();
>>>>> +              vm_thread_yield();
>>>>> +            }
>>>>> +         }
>>>>> +       }
>>>>> +       else
>>>>> +         RAISE_ERROR;
>>>>> +   }
>>>>> +
>>>>> +  int64 pause_time = time_now()-time_collection_start;
>>>>> +  gc_con_stat_information_out(gc);
>>>>> +  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) {
>>>>> +    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<(unsigned int)(pause_time)<<"  us ");
>>>>> +  } else {
>>>>> +    partial_stop_the_world_info( partial_type, (unsigned int)pause_time );
>>>>>   }
>>>>>  }
>>>>> +
>>>>> +
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h Tue Oct 28 20:01:01 2008
>>>>> @@ -19,21 +19,69 @@
>>>>>  #define _GC_CONCURRENT_H_
>>>>>  #include "gc_common.h"
>>>>>
>>>>> -enum GC_CONCURRENT_STATUS{
>>>>> -  GC_CON_STATUS_NIL = 0x00,
>>>>> -  GC_CON_MARK_PHASE = 0x01,
>>>>> -  GC_MOSTLY_CON_FINAL_MARK_PHASE = 0x11, // for mostly concurrent only.
>>>>> -  GC_CON_SWEEP_PHASE = 0x02
>>>>> +
>>>>> +#define RATE_CALCULATE_DENOMINATOR_FACTOR 10; //trans us to ms
>>>>> +inline unsigned int trans_time_unit(int64 x)
>>>>> +{
>>>>> +  int64 result = x>>10;
>>>>> +  if(result) return (unsigned int)result;
>>>>> +  return 1;
>>>>> +}
>>>>> +
>>>>> +#define RAISE_ERROR  assert(0);
>>>>> +/* concurrent collection states in new design */
>>>>> +enum GC_CONCURRENT_STATUS {
>>>>> +  GC_CON_NIL = 0x00,
>>>>> +  GC_CON_STW_ENUM = 0x01,
>>>>> +  GC_CON_START_MARKERS = 0x02,
>>>>> +  GC_CON_TRACING = 0x03,
>>>>> +  GC_CON_TRACE_DONE = 0x04,
>>>>> +  GC_CON_BEFORE_SWEEP = 0x05,
>>>>> +  GC_CON_SWEEPING = 0x06,
>>>>> +  GC_CON_SWEEP_DONE = 0x07,
>>>>> +  GC_CON_BEFORE_FINISH = 0x08,
>>>>> +  GC_CON_RESET = 0x09,
>>>>> +  GC_CON_DISABLE = 0x0A,
>>>>> +};
>>>>> +
>>>>> +// this type is just for debugging and time measuring
>>>>> +enum GC_PARTIAL_STW_TYPE {
>>>>> +  GC_PARTIAL_PSTW = 0x00,  //pure stop the world
>>>>> +  GC_PARTIAL_PMSS = 0x01,  //concurrent marking has finished and stop the world sweeping
>>>>> +  GC_PARTIAL_CMSS = 0x02,  // partial concurrent marking and stop the world sweeping
>>>>> +  GC_PARTIAL_CMPS = 0x03,  //concurrent marking and sweeping
>>>>> +  GC_PARTIAL_FCSR = 0x04, //fully concurrent marking and sweeping, but stw finish reset
>>>>>  };
>>>>>
>>>>>  enum HANDSHAKE_SINGAL{
>>>>>   HSIG_MUTATOR_SAFE = 0x0,
>>>>> -
>>>>>   HSIG_DISABLE_SWEEP_LOCAL_CHUNKS  = 0x01,
>>>>>   HSIG_DISABLE_SWEEP_GLOBAL_CHUNKS = 0x02,
>>>>>   HSIG_MUTATOR_ENTER_ALLOC_MARK    = 0x03,
>>>>>  };
>>>>>
>>>>> +typedef struct Con_Collection_Statistics {
>>>>> +    POINTER_SIZE_INT live_size_marked;     //marked objects size
>>>>> +    POINTER_SIZE_INT alloc_size_before_alloc_live;  //alloc objects size before marking
>>>>> +    POINTER_SIZE_INT live_alloc_size;
>>>>> +    POINTER_SIZE_INT surviving_size_at_gc_end; //total live object size when gc is ended
>>>>> +
>>>>> +    POINTER_SIZE_INT trace_rate;  //bytes per ms
>>>>> +    POINTER_SIZE_INT alloc_rate;       //bytes per ms
>>>>> +
>>>>> +    float heap_utilization_rate;
>>>>> +
>>>>> +    int64 gc_start_time;
>>>>> +    int64 gc_end_time;
>>>>> +
>>>>> +    int64 marking_start_time;
>>>>> +    int64 marking_end_time;
>>>>> +
>>>>> +    int64 sweeping_time;
>>>>> +    int64 pause_start_time;
>>>>> +
>>>>> +} Con_Space_Statistics;
>>>>> +
>>>>>  inline void gc_set_con_gc(unsigned int con_phase)
>>>>>  { GC_PROP |= con_phase;  }
>>>>>
>>>>> @@ -58,107 +106,101 @@
>>>>>  inline Boolean gc_is_specify_con_sweep()
>>>>>  { return (GC_PROP & ALGO_CON_SWEEP) == ALGO_CON_SWEEP; }
>>>>>
>>>>> -extern volatile Boolean concurrent_in_marking;
>>>>> -extern volatile Boolean concurrent_in_sweeping;
>>>>> -extern volatile Boolean mark_is_concurrent;
>>>>> -extern volatile Boolean sweep_is_concurrent;
>>>>>
>>>>> -inline Boolean gc_mark_is_concurrent()
>>>>> -{
>>>>> -  return mark_is_concurrent;
>>>>> -}
>>>>> +extern volatile Boolean obj_alloced_live;
>>>>>
>>>>> -inline void gc_mark_set_concurrent()
>>>>> -{
>>>>> -  if(gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF))
>>>>> -    gc_enable_alloc_obj_live();
>>>>> -  mark_is_concurrent = TRUE;
>>>>> -}
>>>>> +inline Boolean is_obj_alloced_live()
>>>>> +{ return obj_alloced_live;  }
>>>>>
>>>>> -inline void gc_mark_unset_concurrent()
>>>>> -{
>>>>> -  gc_disable_alloc_obj_live();
>>>>> -  mark_is_concurrent = FALSE;
>>>>> +inline void gc_disable_alloc_obj_live(GC *gc)
>>>>> +{
>>>>> +  obj_alloced_live = FALSE;
>>>>>  }
>>>>>
>>>>> -inline Boolean gc_con_is_in_marking()
>>>>> +void gc_enable_alloc_obj_live(GC * gc);
>>>>> +
>>>>> +/*
>>>>> +    tranform the states across the collection process,
>>>>> +  which should be a atomic operation because there are several collector run parallel
>>>>> +*/
>>>>> +inline Boolean state_transformation( GC* gc, unsigned int from_state, unsigned int to_state )
>>>>>  {
>>>>> -  return concurrent_in_marking;
>>>>> +  unsigned int old_state = apr_atomic_cas32( &gc->gc_concurrent_status, to_state, from_state );
>>>>> +  if( old_state != from_state )
>>>>> +    return FALSE;
>>>>> +  else
>>>>> +    return TRUE;
>>>>>  }
>>>>>
>>>>> -inline Boolean gc_con_is_in_marking(GC* gc)
>>>>> -{
>>>>> -  return gc->gc_concurrent_status == GC_CON_MARK_PHASE;
>>>>> +/* set concurrent to idle,
>>>>> +    Or enable concurrent gc, called when STW gc finishes
>>>>> + */
>>>>> +inline void set_con_nil( GC *gc ) {
>>>>> +  apr_atomic_set32( &gc->gc_concurrent_status, GC_CON_NIL );
>>>>>  }
>>>>>
>>>>> -inline Boolean gc_sweep_is_concurrent()
>>>>> -{
>>>>> -  return sweep_is_concurrent;
>>>>> +
>>>>> +/* gc start enumeration phase, now, it is in a stop-the-world manner */
>>>>> +void gc_start_con_enumeration(GC * gc);
>>>>> +
>>>>> +/* gc start marking phase */
>>>>> +void gc_start_con_marking(GC *gc);
>>>>> +
>>>>> +
>>>>> +/* prepare for sweeping */
>>>>> +void gc_prepare_sweeping(GC *gc);
>>>>> +
>>>>> +/* gc start sweeping phase */
>>>>> +void gc_start_con_sweeping(GC *gc);
>>>>> +
>>>>> +/* gc finish concurrent collection */
>>>>> +void gc_con_final_work(GC* gc);
>>>>> +
>>>>> +
>>>>> +/* gc wait cocurrent collection finishes */
>>>>> +void gc_wait_con_finish( GC* gc );
>>>>> +
>>>>> +/* is in gc marking phase */
>>>>> +inline Boolean in_con_marking_phase( GC *gc ) {
>>>>> +  unsigned int status = gc->gc_concurrent_status;
>>>>> +  return (status == GC_CON_TRACING) || (status == GC_CON_TRACE_DONE);
>>>>>  }
>>>>>
>>>>> -inline void gc_sweep_set_concurrent()
>>>>> -{
>>>>> -  sweep_is_concurrent = TRUE;
>>>>> +/* is in gc sweeping phase */
>>>>> +inline Boolean in_con_sweeping_phase( GC *gc ) {
>>>>> +  unsigned int status = gc->gc_concurrent_status;
>>>>> +  return (status == GC_CON_SWEEPING) || (status == GC_CON_SWEEP_DONE);
>>>>>  }
>>>>>
>>>>> -inline void gc_sweep_unset_concurrent()
>>>>> -{
>>>>> -  sweep_is_concurrent = FALSE;
>>>>> +inline Boolean in_con_idle( GC *gc ) {
>>>>> +  return gc->gc_concurrent_status == GC_CON_NIL;
>>>>>  }
>>>>>
>>>>> -inline Boolean gc_con_is_in_sweeping()
>>>>> -{
>>>>> -  return concurrent_in_sweeping;
>>>>> +inline Boolean gc_con_is_in_STW( GC *gc ) {
>>>>> +  return gc->gc_concurrent_status == GC_CON_DISABLE;
>>>>>  }
>>>>>
>>>>> -inline Boolean gc_con_is_in_sweeping(GC* gc)
>>>>> -{
>>>>> -  return gc->gc_concurrent_status == GC_CON_SWEEP_PHASE;
>>>>> +/* is gc ready to sweeping */
>>>>> +inline Boolean in_con_ready_sweep( GC *gc ) {
>>>>> +  return gc->gc_concurrent_status == GC_CON_BEFORE_SWEEP;
>>>>>  }
>>>>>
>>>>> -inline void gc_set_concurrent_status(GC*gc, unsigned int status)
>>>>> -{
>>>>> -  /*Reset status*/
>>>>> -  concurrent_in_marking = FALSE;
>>>>> -  concurrent_in_sweeping = FALSE;
>>>>> -
>>>>> -  gc->gc_concurrent_status = status;
>>>>> -  switch(status){
>>>>> -    case GC_CON_MARK_PHASE:
>>>>> -      gc_mark_set_concurrent();
>>>>> -      concurrent_in_marking = TRUE;
>>>>> -      break;
>>>>> -    case GC_CON_SWEEP_PHASE:
>>>>> -      gc_sweep_set_concurrent();
>>>>> -      concurrent_in_sweeping = TRUE;
>>>>> -      break;
>>>>> -    default:
>>>>> -      assert(!concurrent_in_marking && !concurrent_in_sweeping);
>>>>> -  }
>>>>> +/* is gc sweeping */
>>>>> +inline Boolean in_con_sweep( GC *gc ) {
>>>>> +  return ( gc->gc_concurrent_status == GC_CON_SWEEPING || gc->gc_concurrent_status == GC_CON_SWEEP_DONE );
>>>>>
>>>>> -  return;
>>>>>  }
>>>>>
>>>>> -void gc_reset_con_mark(GC* gc);
>>>>> -void gc_start_con_mark(GC* gc);
>>>>> -void gc_finish_con_mark(GC* gc, Boolean need_STW);
>>>>> -int64 gc_get_con_mark_time(GC* gc);
>>>>> -
>>>>> -void gc_start_con_sweep(GC* gc);
>>>>> -void gc_finish_con_sweep(GC * gc);
>>>>> +void gc_con_update_stat_after_marking( GC *gc );
>>>>>
>>>>> -void gc_reset_after_con_collect(GC* gc);
>>>>> -void gc_try_finish_con_phase(GC * gc);
>>>>>
>>>>>  void gc_decide_con_algo(char* concurrent_algo);
>>>>>  void gc_set_default_con_algo();
>>>>>
>>>>> -void gc_reset_con_sweep(GC* gc);
>>>>> -
>>>>> -void gc_finish_con_GC(GC* gc, int64 time_mutator);
>>>>>
>>>>>  extern volatile Boolean gc_sweep_global_normal_chunk;
>>>>>
>>>>> +
>>>>>  inline Boolean gc_is_sweep_global_normal_chunk()
>>>>>  { return gc_sweep_global_normal_chunk; }
>>>>>
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp Tue Oct 28 20:01:01 2008
>>>>> @@ -18,13 +18,17 @@
>>>>>  /**
>>>>>  * @author Xiao-Feng Li, 2006/10/05
>>>>>  */
>>>>> -
>>>>> +
>>>>> +#include <open/vm_class_info.h>
>>>>> +#include <open/vm_class_manipulation.h>
>>>>>  #include "../gen/gen.h"
>>>>>  #include "../thread/mutator.h"
>>>>>  #include "gc_for_barrier.h"
>>>>>  #include "../mark_sweep/wspace_mark_sweep.h"
>>>>>  #include "../common/gc_concurrent.h"
>>>>> +#include "../common/gc_common.h"
>>>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>>> +#include "../verify/verify_live_heap.h"
>>>>>
>>>>>
>>>>>  /* All the write barrier interfaces need cleanup */
>>>>> @@ -117,10 +121,8 @@
>>>>>     Mutator *mutator = (Mutator *)gc_get_tls();
>>>>>
>>>>>     //FIXME: Release lock.
>>>>> -    lock(mutator->dirty_set_lock);
>>>>>     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>>>> -    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
>>>>> -    unlock(mutator->dirty_set_lock);
>>>>> +    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
>>>>>   }
>>>>>  }
>>>>>
>>>>> @@ -204,7 +206,8 @@
>>>>>           mutator_dirtyset_add_entry(mutator, obj_to_snapshot);
>>>>>       }
>>>>>     }
>>>>> -    obj_mark_black_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>>>> +    obj_mark_gray_in_table((Partial_Reveal_Object *) p_obj_holding_ref);  // now, the black-only obj (no gray bit been set) will also be scaned by marker, here mark it to gray to prevent this, just a workaround
>>>>> +    obj_mark_black_in_table((Partial_Reveal_Object *) p_obj_holding_ref, mutator);
>>>>>     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>>>>   }
>>>>>  }
>>>>> @@ -215,32 +218,141 @@
>>>>>   REF* p_obj_slot = (REF*) p_slot ;
>>>>>   Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)read_slot(p_obj_slot);
>>>>>   if(p_obj && obj_need_remember_oldvar(p_obj)){
>>>>> +    mutator->dirty_obj_num++;
>>>>>     mutator_dirtyset_add_entry(mutator, p_obj);
>>>>>   }
>>>>>  }
>>>>>
>>>>> +/*
>>>>> +static void write_barrier_for_check(Managed_Object_Handle p_obj_holding_ref,Managed_Object_Handle *p_slot, Managed_Object_Handle p_target)
>>>>> +{
>>>>> +  //Mutator *mutator = (Mutator *)gc_get_tls();
>>>>> +
>>>>> +  Partial_Reveal_Object* src_obj = (Partial_Reveal_Object*)p_obj_holding_ref;
>>>>> +  Partial_Reveal_Object* sub_obj = (Partial_Reveal_Object*)read_slot((REF*) p_slot);
>>>>> +  Partial_Reveal_Object* target_obj = (Partial_Reveal_Object*)p_target;
>>>>> +
>>>>> +  if(src_obj && (!obj_is_mark_black_in_table(src_obj))){
>>>>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Src]");
>>>>> +     analyze_bad_obj(src_obj);
>>>>> +     RAISE_ERROR;
>>>>> +  }
>>>>> +
>>>>> +  if(sub_obj && (!obj_is_mark_black_in_table(sub_obj))){
>>>>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Sub]");
>>>>> +     analyze_bad_obj(sub_obj);
>>>>> +     INFO2("gc.verifier", "[source object]");
>>>>> +     analyze_bad_obj(src_obj);
>>>>> +     //RAISE_ERROR;
>>>>> +     return;
>>>>> +  }
>>>>> +
>>>>> +  if(target_obj && (!obj_is_mark_black_in_table(target_obj))){
>>>>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Target]");
>>>>> +     analyze_bad_obj(target_obj);
>>>>> +     RAISE_ERROR;
>>>>> +  }
>>>>> +
>>>>> +  *p_slot = p_target;
>>>>> +}
>>>>> +*/
>>>>>  //===========================================
>>>>>
>>>>>  /* The following routines were supposed to be the only way to alter any value in gc heap. */
>>>>>  void gc_heap_write_ref (Managed_Object_Handle p_obj_holding_ref, unsigned offset, Managed_Object_Handle p_target)
>>>>>  {  assert(0); }
>>>>>
>>>>> -void gc_heap_wrote_object (Managed_Object_Handle p_obj_written)
>>>>> +
>>>>> +Boolean gc_heap_copy_object_array(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length)
>>>>> +{
>>>>> +
>>>>> +
>>>>> +    GC_VTable_Info *src_gcvt = obj_get_gcvt((Partial_Reveal_Object*)src_array);
>>>>> +    GC_VTable_Info *dst_gcvt = obj_get_gcvt((Partial_Reveal_Object*)dst_array);
>>>>> +
>>>>> +    Class_Handle src_class = src_gcvt->gc_clss;
>>>>> +    Class_Handle dst_class = dst_gcvt->gc_clss;
>>>>> +
>>>>> +
>>>>> +       //element size of src should be same as element size of dst
>>>>> +       assert(src_gcvt->array_elem_size == dst_gcvt->array_elem_size);
>>>>> +       unsigned int elem_size = src_gcvt->array_elem_size;
>>>>> +       unsigned int src_first_elem_offset = array_first_element_offset((Partial_Reveal_Array*)src_array);
>>>>> +       unsigned int dst_first_elem_offset = array_first_element_offset((Partial_Reveal_Array*)dst_array);
>>>>> +       /*
>>>>> +       #ifdef COMPRESS_REFERENCE
>>>>> +          COMPRESSED_REFERENCE *src_copy_body = (COMPRESSED_REFERENCE *)((POINTER_SIZE_INT)src_array + src_first_elem_offset + elem_size*src_start);
>>>>> +          COMPRESSED_REFERENCE *dst_copy_body = (COMPRESSED_REFERENCE *)((POINTER_SIZE_INT)dst_array + dst_first_elem_offset + elem_size*dst_start);
>>>>> +       #else
>>>>> +       #endif
>>>>> +       */
>>>>> +          REF* src_copy_body = (REF*)((POINTER_SIZE_INT)src_array + src_first_elem_offset + elem_size*src_start);
>>>>> +          REF* dst_copy_body = (REF*)((POINTER_SIZE_INT)dst_array + dst_first_elem_offset + elem_size*dst_start);
>>>>> +
>>>>> +
>>>>> +       if(class_is_instanceof(src_class, dst_class)) {
>>>>> +         //rem obj before is for OTF GC barriers
>>>>> +         if(WB_REM_OLD_VAR == write_barrier_function) {
>>>>> +            for (unsigned int count = 0; count < length; count++) {
>>>>> +               write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>>>>> +            }
>>>>> +         } else if(WB_REM_OBJ_SNAPSHOT == write_barrier_function) {
>>>>> +            write_barrier_rem_obj_snapshot(dst_array);
>>>>> +         }
>>>>> +
>>>>> +         memmove(dst_copy_body, src_copy_body, length * elem_size);
>>>>> +
>>>>> +       } else { //for the condition src is not the type of dst
>>>>> +          Class_Handle dst_elem_clss = class_get_array_element_class(dst_class);
>>>>> +          if(WB_REM_OBJ_SNAPSHOT == write_barrier_function) {
>>>>> +            write_barrier_rem_obj_snapshot(dst_array);
>>>>> +          }
>>>>> +
>>>>> +          for (unsigned int count = 0; count < length; count++) {
>>>>> +             // 1, null elements copy direct
>>>>> +             if (src_copy_body[count] == NULL) {
>>>>> +                  if(WB_REM_OLD_VAR == write_barrier_function) {
>>>>> +                      write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>>>>> +                 }
>>>>> +                  dst_copy_body[count] = NULL;
>>>>> +                  continue;
>>>>> +               }
>>>>> +
>>>>> +             // 2, For non-null elements check if types are compatible.
>>>>> +/*
>>>>> +#ifdef COMPRESS_REFERENCE
>>>>> +             ManagedObject *src_elem = (ManagedObject *)uncompress_compressed_reference(src_elem_offset);
>>>>> +             Class_Handle src_elem_clss = src_elem->vt()->clss;
>>>>> +#else
>>>>> +#endif
>>>>> +*/
>>>>> +             Class_Handle src_elem_clss = obj_get_gcvt(ref_to_obj_ptr(src_copy_body[count]))->gc_clss;
>>>>> +
>>>>> +             if (!class_is_instanceof(src_elem_clss, dst_elem_clss)) {
>>>>> +                  if(WB_REM_SOURCE_OBJ == write_barrier_function) {
>>>>> +                      write_barrier_rem_source_obj(dst_array);
>>>>> +                  }
>>>>> +                  return FALSE;
>>>>> +             }
>>>>> +
>>>>> +             if(WB_REM_OLD_VAR == write_barrier_function) {
>>>>> +                 write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>>>>> +             }
>>>>> +              dst_copy_body[count] = src_copy_body[count];
>>>>> +        }
>>>>> +      }
>>>>> +
>>>>> +    //rem obj after is for mostly concurrent
>>>>> +    if(WB_REM_SOURCE_OBJ == write_barrier_function) {
>>>>> +        write_barrier_rem_source_obj(dst_array);
>>>>> +    }
>>>>> +
>>>>> +    return TRUE;
>>>>> +}
>>>>> +
>>>>> +
>>>>> +void gc_heap_wrote_object (Managed_Object_Handle p_obj_written )
>>>>>  {
>>>>> -  /*Concurrent Mark: Since object clone and array copy do not modify object slots,
>>>>> -      we treat it as an new object. It has already been marked when dest object was created.
>>>>> -      We use WB_REM_SOURCE_OBJ function here to debug.
>>>>> -    */
>>>>> -
>>>>> -  if(WB_REM_SOURCE_OBJ == write_barrier_function){
>>>>> -    Mutator *mutator = (Mutator *)gc_get_tls();
>>>>> -    lock(mutator->dirty_set_lock);
>>>>> -
>>>>> -    obj_dirty_in_table((Partial_Reveal_Object *) p_obj_written);
>>>>> -    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_written);
>>>>> -
>>>>> -    unlock(mutator->dirty_set_lock);
>>>>> -  }
>>>>>
>>>>>   if( !gc_is_gen_mode() || !object_has_ref_field((Partial_Reveal_Object*)p_obj_written))
>>>>>     return;
>>>>> @@ -283,6 +395,13 @@
>>>>>       write_barrier_rem_slot_oldvar(p_slot);
>>>>>       *p_slot = p_target;
>>>>>       break;
>>>>> +    //just debugging
>>>>> +    /*
>>>>> +    case WB_CON_DEBUG:
>>>>> +       write_barrier_for_check(p_obj_holding_ref, p_slot, p_target);
>>>>> +       //*p_slot = p_target;
>>>>> +       break;
>>>>> +    */
>>>>>     default:
>>>>>       assert(0);
>>>>>       return;
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h Tue Oct 28 20:01:01 2008
>>>>> @@ -32,7 +32,8 @@
>>>>>   WB_REM_SOURCE_REF    = 0x02,
>>>>>   WB_REM_OLD_VAR       = 0x03,
>>>>>   WB_REM_NEW_VAR       = 0x04,
>>>>> -  WB_REM_OBJ_SNAPSHOT  = 0x05
>>>>> +  WB_REM_OBJ_SNAPSHOT  = 0x05,
>>>>> +  WB_CON_DEBUG = 0x06
>>>>>  };
>>>>>
>>>>>  inline void gc_set_barrier_function(unsigned int wb_function)
>>>>> @@ -43,4 +44,3 @@
>>>>>  #endif /* _GC_FOR_BARRIER_H_ */
>>>>>
>>>>>
>>>>> -
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp Tue Oct 28 20:01:01 2008
>>>>> @@ -203,4 +203,3 @@
>>>>>
>>>>>
>>>>>
>>>>> -
>>>>>
>>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
>>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>>> ==============================================================================
>>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp (original)
>>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp Tue Oct 28 20:01:01 2008
>>>>> @@ -30,7 +30,7 @@
>>>>>  #include "../mark_sweep/gc_ms.h"
>>>>>  #include "../move_compact/gc_mc.h"
>>>>>  #include "interior_pointer.h"
>>>>> -#include "../thread/marker.h"
>>>>> +#include "../thread/conclctor.h"
>>>>>  #include "../thread/collector.h"
>>>>>  #include "../verify/verify_live_heap.h"
>>>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>>> @@ -115,7 +115,10 @@
>>>>>   collection_scheduler_initialize(gc);
>>>>>
>>>>>   if(gc_is_specify_con_gc()){
>>>>> -    marker_initialize(gc);
>>>>> +     gc->gc_concurrent_status = GC_CON_NIL;
>>>>> +    conclctor_initialize(gc);
>>>>> +  } else {
>>>>> +     gc->gc_concurrent_status = GC_CON_DISABLE;
>>>>>   }
>>>>>
>>>>>   collector_initialize(gc);
>>>>> @@ -134,6 +137,9 @@
>>>>>  {
>>>>>   INFO2("gc.process", "GC: call GC wrapup ....");
>>>>>   GC* gc =  p_global_gc;
>>>>> +  // destruct threads first, and then destruct data structures
>>>>> +  conclctor_destruct(gc);
>>>>> +  collector_destruct(gc);
>>>>>
>>>>>  #if defined(USE_UNIQUE_MARK_SWEEP_GC)
>>>>>  gc_ms_destruct((GC_MS*)gc);
>>>>> @@ -148,8 +154,6 @@
>>>>>  #ifndef BUILD_IN_REFERENT
>>>>>   gc_finref_metadata_destruct(gc);
>>>>>  #endif
>>>>> -  collector_destruct(gc);
>>>>> -  marker_destruct(gc);
>>>>>
>>>>>   if( verify_live_heap ){
>>>>>     gc_terminate_heap_verification(gc);
>>>>> @@ -446,4 +450,3 @@
>>>>>
>>>>>
>>>>>
>>>>> -
>>>>>
>>>>>
>>>>>
>>>>
>>>>
>>>>
>>>> --
>>>> Unless stated otherwise above:
>>>> IBM United Kingdom Limited - Registered in England and Wales with number 741598.
>>>> Registered office: PO Box 41, North Harbour, Portsmouth, Hampshire PO6 3AU
>>>>
>>>
>>>
>>>
>>> --
>>> http://xiao-feng.blogspot.com
>>>
>>
>>
>>
>> --
>> Unless stated otherwise above:
>> IBM United Kingdom Limited - Registered in England and Wales with number 741598.
>> Registered office: PO Box 41, North Harbour, Portsmouth, Hampshire PO6 3AU
>>
>
>
>
> --
> http://xiao-feng.blogspot.com
>



-- 
Unless stated otherwise above:
IBM United Kingdom Limited - Registered in England and Wales with number 741598.
Registered office: PO Box 41, North Harbour, Portsmouth, Hampshire PO6 3AU

Re: svn commit: r708756 [1/3] - in /harmony/enhanced/drlvm/trunk/vm: gc_gen/build/ gc_gen/src/common/ gc_gen/src/finalizer_weakref/ gc_gen/src/gen/ gc_gen/src/los/ gc_gen/src/mark_sweep/ gc_gen/src/thread/ gc_gen/src/trace_forward/ gc_gen/src/verify/

Posted by Xiao-Feng Li <xi...@gmail.com>.
On Wed, Oct 29, 2008 at 6:33 PM, Sian January
<si...@googlemail.com> wrote:
> Thanks for your quick reply Xiao-Feng.
>
> I haven't studied the code that much, so if it's all disabled by
> default as you say then I think it's ok to leave it in.
>
> In future I do think it would be better practice to discuss it on the
> dev list and get some agreement before committing something this size
> during feature freeze week.  This is because with some large changes
> there can be unforeseen effects that can impact the code in ways that
> the original author hadn't realised.  Also if we had several major
> changes and then saw regressions it could be difficult to work out
> what had caused them and it could badly delay the release.

Agree. To discuss beforehand is a better practice. I will surely follow it.

To improve the process, we can introduce a guideline that, say, any
patch bigger than 10KB (?) should be considered as a work of new
feature, hence not allowed for commit during feature-freeze period.
That could help to clarify the confusion on what a new feature is.

Suggestion?

Thanks,
xiaofeng

> Does anyone else have a different opinion on either rolling back the
> code or on general practice during feature freeze?
>
> Thanks,
>
> Sian
>
>
> 2008/10/29 Xiao-Feng Li <xi...@gmail.com>:
>> Sian, thanks for your notice.
>>
>> This patch is indeed big. Most of the code are guarded by a macro
>> USE_UNIQUE_MARK_SWEEP_GC and has no impact on the existing code base.
>> It is disabled by default, and I tested it before I committed it.
>>
>> Actually it is not a new feature, but a fix of existing concurrent GC
>> scheduler. Well, I admit it looks like a new feature since it changes
>> lot of code...
>>
>> If it has any impact on stability, I will roll back it immediately.
>> Thanks for your patience.
>>
>> Thanks,
>> xiaofeng
>>
>> On Wed, Oct 29, 2008 at 5:22 PM, Sian January
>> <si...@googlemail.com> wrote:
>>> Hi Xiao-Feng,
>>>
>>> This commit looks like quite a large new feature to me.  Since we're
>>> in feature freeze this week for M8 I really think it should be backed
>>> out until after the milestone, as we should be focussing on testing
>>> and stability at the moment.
>>>
>>> Thanks,
>>>
>>> Sian
>>>
>>>
>>> 2008/10/29  <xl...@apache.org>:
>>>> Author: xli
>>>> Date: Tue Oct 28 20:01:01 2008
>>>> New Revision: 708756
>>>>
>>>> URL: http://svn.apache.org/viewvc?rev=708756&view=rev
>>>> Log:
>>>> HARMONY-5989 : Concurrent GC (Tick) enhancement in scheduling
>>>>
>>>> Added:
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.cpp   (with props)
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.h   (with props)
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_concurrent_mark.cpp   (with props)
>>>> Removed:
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h
>>>> Modified:
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.h
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/include/open/gc.h
>>>>    harmony/enhanced/drlvm/trunk/vm/vmcore/build/vmcore.exp
>>>>    harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp
>>>>    harmony/enhanced/drlvm/trunk/vm/vmcore/src/object/vm_arrays.cpp
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp Tue Oct 28 20:01:01 2008
>>>> @@ -34,6 +34,7 @@
>>>>     gc_heap_write_global_slot;
>>>>     gc_heap_write_ref;
>>>>     gc_heap_wrote_object;
>>>> +    gc_heap_copy_object_array;
>>>>     gc_init;
>>>>     gc_is_object_pinned;
>>>>     gc_iterate_heap;
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp Tue Oct 28 20:01:01 2008
>>>> @@ -31,24 +31,16 @@
>>>>   return;
>>>>  }
>>>>
>>>> -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
>>>> -{
>>>> -  if(gc_is_specify_con_gc()){
>>>> -    gc_update_con_collection_scheduler(gc, time_mutator, time_collection);
>>>> -  }
>>>> -  return;
>>>> -}
>>>>
>>>>  Boolean gc_sched_collection(GC* gc, unsigned int gc_cause)
>>>>  {
>>>>   /*collection scheduler only schedules concurrent collection now.*/
>>>>   if(GC_CAUSE_CONCURRENT_GC == gc_cause){
>>>>     assert(gc_is_specify_con_gc());
>>>> -    return gc_sched_con_collection(gc, gc_cause);
>>>> +    return gc_con_perform_collection( gc );
>>>>   }else{
>>>>     return FALSE;
>>>>   }
>>>>  }
>>>>
>>>>
>>>> -
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h Tue Oct 28 20:01:01 2008
>>>> @@ -26,12 +26,8 @@
>>>>  void collection_scheduler_initialize(GC* gc);
>>>>  void collection_scheduler_destruct(GC* gc);
>>>>
>>>> -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
>>>>  Boolean gc_sched_collection(GC* gc, unsigned int gc_cause);
>>>>
>>>>  #endif
>>>>
>>>>
>>>> -
>>>> -
>>>> -
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp Tue Oct 28 20:01:01 2008
>>>> @@ -22,7 +22,7 @@
>>>>  #include "collection_scheduler.h"
>>>>  #include "concurrent_collection_scheduler.h"
>>>>  #include "gc_concurrent.h"
>>>> -#include "../thread/marker.h"
>>>> +#include "../thread/conclctor.h"
>>>>  #include "../verify/verify_live_heap.h"
>>>>
>>>>  #define NUM_TRIAL_COLLECTION 2
>>>> @@ -53,6 +53,7 @@
>>>>  Boolean gc_use_space_scheduler()
>>>>  { return cc_scheduler_kind & SPACE_BASED_SCHEDULER; }
>>>>
>>>> +
>>>>  static int64 time_delay_to_start_mark = MAX_DELAY_TIME;
>>>>  static POINTER_SIZE_INT space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
>>>>
>>>> @@ -75,6 +76,7 @@
>>>>   STD_FREE(gc->collection_scheduler);
>>>>  }
>>>>
>>>> +
>>>>  void gc_decide_cc_scheduler_kind(char* cc_scheduler)
>>>>  {
>>>>   string_to_upper(cc_scheduler);
>>>> @@ -93,281 +95,248 @@
>>>>   gc_enable_time_scheduler();
>>>>  }
>>>>
>>>> -static Boolean time_to_start_mark(GC* gc)
>>>> -{
>>>> -  if(!gc_use_time_scheduler()) return FALSE;
>>>> -
>>>> -  int64 time_current = time_now();
>>>> -  return (time_current - get_collection_end_time()) > time_delay_to_start_mark;
>>>> -}
>>>> -
>>>> -static Boolean space_to_start_mark(GC* gc)
>>>> -{
>>>> -  if(!gc_use_space_scheduler()) return FALSE;
>>>> +/*====================== new scheduler ===================*/
>>>> +extern unsigned int NUM_CON_MARKERS;
>>>> +extern unsigned int NUM_CON_SWEEPERS;
>>>> +unsigned int gc_get_mutator_number(GC *gc);
>>>> +
>>>> +#define MOSTLY_CON_MARKER_DIVISION 0.5
>>>> +unsigned int mostly_con_final_marker_num=1;
>>>> +unsigned int mostly_con_long_marker_num=1;
>>>> +
>>>> +unsigned int gc_get_marker_number(GC* gc) {
>>>> +  unsigned int mutator_num = gc_get_mutator_number(gc);
>>>> +  unsigned int marker_specified = NUM_CON_MARKERS;
>>>> +  if(marker_specified == 0) {
>>>> +    if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>>>> +       marker_specified = min(gc->num_conclctors, mutator_num>>1);
>>>> +       INFO2("gc.con.scheduler", "[Marker Num] mutator num="<<mutator_num<<", assign marker num="<<marker_specified);
>>>> +    } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>>>> +       marker_specified = min(gc->num_conclctors, mutator_num>>1);
>>>> +       mostly_con_final_marker_num = max(marker_specified, mostly_con_final_marker_num); // in the STW phase, so all the conclctor can be used
>>>> +       mostly_con_long_marker_num = (unsigned int)(marker_specified*MOSTLY_CON_MARKER_DIVISION);
>>>> +       //INFO2("gc.con.scheduler", "[Marker Num] common marker="<<marker_specified<<", final marker="<<mostly_con_final_marker_num);
>>>> +    }
>>>> +  }
>>>>
>>>> -  POINTER_SIZE_INT size_new_obj = gc_get_new_object_size(gc,FALSE);
>>>> -  return (size_new_obj > space_threshold_to_start_mark);
>>>> +  assert(marker_specified);
>>>> +  return marker_specified;
>>>>  }
>>>>
>>>> -static Boolean gc_need_start_con_mark(GC* gc)
>>>> -{
>>>> -  if(!gc_is_specify_con_mark() || gc_mark_is_concurrent()) return FALSE;
>>>> -
>>>> -  if(time_to_start_mark(gc) || space_to_start_mark(gc))
>>>> -    return TRUE;
>>>> -  else
>>>> -    return FALSE;
>>>> +#define CON_SWEEPER_DIVISION 0.8
>>>> +unsigned int gc_get_sweeper_numer(GC *gc) {
>>>> +  unsigned int sweeper_specified = NUM_CON_SWEEPERS;
>>>> +  if(sweeper_specified == 0)
>>>> +    sweeper_specified = (unsigned int)(gc->num_conclctors*CON_SWEEPER_DIVISION);
>>>> +  //INFO2("gc.con.scheduler", "[Sweeper Num] assign sweeper num="<<sweeper_specified);
>>>> +  assert(sweeper_specified);
>>>> +  return sweeper_specified;
>>>>  }
>>>>
>>>> -static Boolean gc_need_start_con_sweep(GC* gc)
>>>> -{
>>>> -  if(!gc_is_specify_con_sweep() || gc_sweep_is_concurrent()) return FALSE;
>>>>
>>>> -  /*if mark is concurrent and STW GC has not started, we should start concurrent sweep*/
>>>> -  if(gc_mark_is_concurrent() && !gc_con_is_in_marking(gc))
>>>> -    return TRUE;
>>>> -  else
>>>> -    return FALSE;
>>>> -}
>>>>
>>>> -static Boolean gc_need_reset_after_con_collect(GC* gc)
>>>> -{
>>>> -  if(gc_sweep_is_concurrent() && !gc_con_is_in_sweeping(gc))
>>>> -    return TRUE;
>>>> -  else
>>>> -    return FALSE;
>>>> -}
>>>>
>>>> -static Boolean gc_need_start_con_enum(GC* gc)
>>>> -{
>>>> -  /*TODO: support on-the-fly root set enumeration.*/
>>>> -  return FALSE;
>>>> -}
>>>> +#define DEFAULT_CONSERCATIVE_FACTOR (1.0f)
>>>> +#define CONSERCATIVE_FACTOR_FULLY_CONCURRENT (0.95f)
>>>> +static float conservative_factor = DEFAULT_CONSERCATIVE_FACTOR;
>>>>
>>>> -#define SPACE_UTIL_RATIO_CORRETION 0.2f
>>>> -#define TIME_CORRECTION_OTF_MARK 0.65f
>>>> -#define TIME_CORRECTION_OTF_MARK_SWEEP 1.0f
>>>> -#define TIME_CORRECTION_MOSTLY_MARK 0.5f
>>>> -
>>>> -static void con_collection_scheduler_update_stat(GC* gc, int64 time_mutator, int64 time_collection)
>>>> -{
>>>> -  Space* space = NULL;
>>>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>>>> -
>>>> -#ifdef USE_UNIQUE_MARK_SWEEP_GC
>>>> -  space = (Space*) gc_get_wspace(gc);
>>>> -#endif
>>>> -  if(!space) return;
>>>> +/* for checking heap effcient*/
>>>> +#define SMALL_DELTA 1000 //minimal check frequency is about delta us
>>>> +#define SPACE_CHECK_STAGE_TWO_TIME (SMALL_DELTA<<6)
>>>> +#define SPACE_CHECK_STAGE_ONE_TIME (SMALL_DELTA<<12)
>>>>
>>>> -  Space_Statistics* space_stat = space->space_statistic;
>>>> -
>>>> -  unsigned int slot_index = cc_scheduler->last_window_index;
>>>> -  unsigned int num_slot   = cc_scheduler->num_window_slots;
>>>> -
>>>> -  cc_scheduler->trace_load_window[slot_index] = space_stat->num_live_obj;
>>>> -  cc_scheduler->alloc_load_window[slot_index] = space_stat->size_new_obj;
>>>> -  cc_scheduler->space_utilization_ratio[slot_index] = space_stat->space_utilization_ratio;
>>>> +#define DEFAULT_ALLOC_RATE (1<<19) //500k/ms
>>>> +#define DEFAULT_MARKING_TIME (1<<9) //512 ms
>>>>
>>>> -  cc_scheduler->last_mutator_time = time_mutator;
>>>> -  cc_scheduler->last_collector_time = time_collection;
>>>> -
>>>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
>>>> -    return;
>>>> -
>>>> -  cc_scheduler->alloc_rate_window[slot_index]
>>>> -    = time_mutator == 0 ? 0 : (float)cc_scheduler->alloc_load_window[slot_index] / time_mutator;
>>>> +static int64 last_check_time_point = time_now();
>>>> +static int64 check_delay_time = time_now(); //  initial value is just for modifying
>>>>
>>>> -  if(gc_mark_is_concurrent()){
>>>> -    cc_scheduler->trace_rate_window[slot_index]
>>>> -      = time_collection == 0 ? MAX_TRACING_RATE : (float)cc_scheduler->trace_load_window[slot_index] / time_collection;
>>>> -  }else{
>>>> -    cc_scheduler->trace_rate_window[slot_index] = MIN_TRACING_RATE;
>>>> -  }
>>>> -
>>>> -  cc_scheduler->num_window_slots = num_slot >= STAT_SAMPLE_WINDOW_SIZE ? num_slot : (++num_slot);
>>>> -  cc_scheduler->last_window_index = (++slot_index)% STAT_SAMPLE_WINDOW_SIZE;
>>>> +//just debugging
>>>> +int64 get_last_check_point()
>>>> +{
>>>> +   return last_check_time_point;
>>>>  }
>>>>
>>>> -static void con_collection_scheduler_update_start_point(GC* gc, int64 time_mutator, int64 time_collection)
>>>> -{
>>>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
>>>> -    return;
>>>> +static unsigned int alloc_space_threshold = 0;
>>>>
>>>> -  Space* space = NULL;
>>>> -#ifdef USE_UNIQUE_MARK_SWEEP_GC
>>>> -  space = (Space*) gc_get_wspace(gc);
>>>> -#endif
>>>> -  if(!space) return;
>>>> -
>>>> -  Space_Statistics* space_stat = space->space_statistic;
>>>> -
>>>> -  float sum_alloc_rate = 0;
>>>> -  float sum_trace_rate = 0;
>>>> -  float sum_space_util_ratio = 0;
>>>> +static unsigned int space_check_stage_1; //SPACE_CHECK_EXPECTED_START_TIME
>>>> +static unsigned int space_check_stage_2; //BIG_DELTA
>>>>
>>>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>>>> +static unsigned int calculate_start_con_space_threshold(Con_Collection_Statistics *con_collection_stat, unsigned int heap_size)
>>>> +{
>>>>
>>>> -  int64 time_this_collection_correction = 0;
>>>> -#if 0
>>>> -  float space_util_ratio = space_stat->space_utilization_ratio;
>>>> -  if(space_util_ratio > (1-SPACE_UTIL_RATIO_CORRETION)){
>>>> -    time_this_collection_correction = 0;
>>>> -  }else{
>>>> -    time_this_collection_correction
>>>> -      = (int64)(((1 - space_util_ratio - SPACE_UTIL_RATIO_CORRETION)/(space_util_ratio))* time_mutator);
>>>> -  }
>>>> -#endif
>>>> -
>>>> -  unsigned int i;
>>>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>>>> -    sum_alloc_rate += cc_scheduler->alloc_rate_window[i];
>>>> -    sum_trace_rate += cc_scheduler->trace_rate_window[i];
>>>> -    sum_space_util_ratio += cc_scheduler->space_utilization_ratio[i];
>>>> -  }
>>>> -
>>>> -  TRACE2("gc.con.cs","Allocation Rate: ");
>>>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>>>> -    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->alloc_rate_window[i]);
>>>> -  }
>>>> -
>>>> -  TRACE2("gc.con.cs","Tracing Rate: ");
>>>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>>>> -    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->trace_rate_window[i]);
>>>> -  }
>>>> -
>>>> -  float average_alloc_rate = sum_alloc_rate / cc_scheduler->num_window_slots;
>>>> -  float average_trace_rate = sum_trace_rate / cc_scheduler->num_window_slots;
>>>> -  float average_space_util_ratio = sum_space_util_ratio / cc_scheduler->num_window_slots;
>>>> -
>>>> -  TRACE2("gc.con.cs","averAllocRate: "<<average_alloc_rate<<"averTraceRate: "<<average_trace_rate<<"  average_space_util_ratio: "<<average_space_util_ratio<<" ");
>>>> -
>>>> -  if(average_alloc_rate == 0 ){
>>>> -    time_delay_to_start_mark = MIN_DELAY_TIME;
>>>> -    space_threshold_to_start_mark = MIN_SPACE_THRESHOLD;
>>>> -  }else if(average_trace_rate == 0){
>>>> -    time_delay_to_start_mark = MAX_DELAY_TIME;
>>>> -    space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
>>>> -  }else{
>>>> -    float time_alloc_expected = (space_stat->size_free_space * average_space_util_ratio) / average_alloc_rate;
>>>> -    float time_trace_expected = space_stat->num_live_obj / average_trace_rate;
>>>> -    TRACE2("gc.con.cs","[GC][Con] expected alloc time "<<time_alloc_expected<<"  expected collect time  "<<time_trace_expected<<" ");
>>>> -
>>>> -    if(time_alloc_expected > time_trace_expected){
>>>> -      if(gc_is_kind(ALGO_CON_OTF_OBJ)||gc_is_kind(ALGO_CON_OTF_REF)){
>>>> -        float time_correction = gc_sweep_is_concurrent()? TIME_CORRECTION_OTF_MARK_SWEEP : TIME_CORRECTION_OTF_MARK;
>>>> -        cc_scheduler->time_delay_to_start_mark = (int64)((time_alloc_expected - time_trace_expected)*time_correction);
>>>> -      }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>> -        cc_scheduler->time_delay_to_start_mark = (int64)(time_mutator* TIME_CORRECTION_MOSTLY_MARK);
>>>> -      }
>>>> -    }else{
>>>> -      cc_scheduler->time_delay_to_start_mark = MIN_DELAY_TIME;
>>>> +  float util_rate = con_collection_stat->heap_utilization_rate;
>>>> +  unsigned int space_threshold = 0;
>>>> +  if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>>>> +    if( con_collection_stat->trace_rate == 0 )  //for initial iteration
>>>> +         con_collection_stat->trace_rate = con_collection_stat->alloc_rate*20;
>>>> +    unsigned int alloc_rate = con_collection_stat->alloc_rate;
>>>> +    if(alloc_rate<con_collection_stat->trace_rate) {       //  THRESHOLD = Heap*utilization_rate*(1-alloc_rate/marking_rate), accurate formaler
>>>> +      float alloc_marking_rate_ratio = (float)(alloc_rate)/con_collection_stat->trace_rate;
>>>> +
>>>> +      space_threshold = (unsigned int)(heap_size*util_rate*(1-alloc_marking_rate_ratio)*conservative_factor);
>>>> +    } else {  //use default
>>>> +       unsigned int alloc_while_marking = DEFAULT_MARKING_TIME*con_collection_stat->alloc_rate;
>>>> +       space_threshold = (unsigned int)(heap_size*util_rate) -alloc_while_marking;
>>>>     }
>>>> -
>>>> -    cc_scheduler->space_threshold_to_start_mark =
>>>> -      (POINTER_SIZE_INT)(space_stat->size_free_space * ((time_alloc_expected - time_trace_expected) / time_alloc_expected));
>>>> -
>>>> -    time_delay_to_start_mark = cc_scheduler->time_delay_to_start_mark + time_this_collection_correction;
>>>> -    space_threshold_to_start_mark = cc_scheduler->space_threshold_to_start_mark;
>>>> +  } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>>>> +    unsigned int alloc_while_marking = DEFAULT_MARKING_TIME*con_collection_stat->alloc_rate;
>>>> +    space_threshold = (unsigned int)(heap_size*util_rate) -alloc_while_marking;
>>>>   }
>>>> -  TRACE2("gc.con.cs","[GC][Con] concurrent marking will delay "<<(unsigned int)(time_delay_to_start_mark>>10)<<" ms ");
>>>> -  TRACE2("gc.con.cs","[GC][Con] time correction "<<(unsigned int)(time_this_collection_correction>>10)<<" ms ");
>>>>
>>>> +  if( space_threshold > con_collection_stat->surviving_size_at_gc_end )
>>>> +    alloc_space_threshold = space_threshold - con_collection_stat->surviving_size_at_gc_end;
>>>> +  else
>>>> +    alloc_space_threshold = MIN_SPACE_THRESHOLD;
>>>> +
>>>> +  //INFO2("gc.con.info", "[Threshold] alloc_space_threshold=" << alloc_space_threshold);
>>>> +  return space_threshold;
>>>>  }
>>>>
>>>> -void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
>>>> -{
>>>> -  assert(gc_is_specify_con_gc());
>>>> -  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) return;
>>>> -
>>>> -  con_collection_scheduler_update_stat(gc, time_mutator, time_collection);
>>>> -  con_collection_scheduler_update_start_point(gc, time_mutator, time_collection);
>>>> -
>>>> -  return;
>>>> -}
>>>> -
>>>> -Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause)
>>>> +/* this parameters are updated at end of GC */
>>>> +void gc_update_scheduler_parameter( GC *gc )
>>>>  {
>>>> -  if(!try_lock(gc->lock_collect_sched)) return FALSE;
>>>> -  vm_gc_lock_enum();
>>>> -
>>>> -  gc_try_finish_con_phase(gc);
>>>> -
>>>> -  if(gc_need_start_con_enum(gc)){
>>>> -    /*TODO:Concurrent rootset enumeration.*/
>>>> -    assert(0);
>>>> -  }
>>>> -
>>>> -  if(gc_need_start_con_mark(gc)){
>>>> -    INFO2("gc.con.info", "[GC][Con] concurrent mark start ...");
>>>> -    gc_start_con_mark(gc);
>>>> -    vm_gc_unlock_enum();
>>>> -    unlock(gc->lock_collect_sched);
>>>> -    return TRUE;
>>>> -  }
>>>> -
>>>> -  if(gc_need_start_con_sweep(gc)){
>>>> -    gc->num_collections++;
>>>> -    INFO2("gc.con.info", "[GC][Con] collection number:"<< gc->num_collections<<" ");
>>>> -    gc_start_con_sweep(gc);
>>>> -    vm_gc_unlock_enum();
>>>> -    unlock(gc->lock_collect_sched);
>>>> -    return TRUE;
>>>> -  }
>>>> -
>>>> -  if(gc_need_reset_after_con_collect(gc)){
>>>> -    int64 pause_start = time_now();
>>>> -    int disable_count = vm_suspend_all_threads();
>>>> -    gc_reset_after_con_collect(gc);
>>>> -    gc_start_mutator_time_measure(gc);
>>>> -    set_collection_end_time();
>>>> -    vm_resume_all_threads(disable_count);
>>>> -    vm_gc_unlock_enum();
>>>> -    INFO2("gc.con.time","[GC][Con]pause(reset collection):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
>>>> -    unlock(gc->lock_collect_sched);
>>>> -    return TRUE;
>>>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +   last_check_time_point = time_now();
>>>> +
>>>> +   unsigned int alloc_rate = con_collection_stat->alloc_rate;
>>>> +   space_check_stage_1 = alloc_rate * trans_time_unit(SPACE_CHECK_STAGE_ONE_TIME);
>>>> +   space_check_stage_2 = alloc_rate * trans_time_unit(SPACE_CHECK_STAGE_TWO_TIME);
>>>> +   //INFO2( "gc.con.scheduler", "space_check_stage_1=["<<space_check_stage_1<<"], space_check_stage_2=["<<space_check_stage_2<<"]" );
>>>> +
>>>> +   check_delay_time = (con_collection_stat->gc_start_time - con_collection_stat->gc_end_time)>>2;
>>>> +   //INFO2("gc.con.scheduler", "next check time = [" << trans_time_unit(check_delay_time) << "] ms" );
>>>> +   if(gc_is_specify_con_sweep()) {
>>>> +         conservative_factor = CONSERCATIVE_FACTOR_FULLY_CONCURRENT;
>>>> +   }
>>>> +   calculate_start_con_space_threshold(con_collection_stat, gc->committed_heap_size);
>>>> +}
>>>> +
>>>> +void gc_force_update_scheduler_parameter( GC *gc )
>>>> +{
>>>> +    last_check_time_point = time_now();
>>>> +    //check_delay_time = SPACE_CHECK_STAGE_ONE_TIME;
>>>> +    check_delay_time = time_now();
>>>> +    //INFO2("gc.con.scheduler", "next check time = [" << trans_time_unit(check_delay_time) << "] ms" );
>>>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +    con_collection_stat->alloc_rate = DEFAULT_ALLOC_RATE;
>>>> +}
>>>> +
>>>> +
>>>> +
>>>> +static inline Boolean check_start_mark( GC *gc )
>>>> +{
>>>> +   unsigned int new_object_occupied_size = gc_get_mutator_new_obj_size(gc);
>>>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +   /*just debugging*/
>>>> +   float used_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_object_occupied_size)/gc->committed_heap_size;
>>>> +   if( alloc_space_threshold < new_object_occupied_size ) {
>>>> +       INFO2( "gc.con.info", "[Start Con] check has been delayed " << check_delay_time << " us, until ratio at start point="<<used_rate );
>>>> +       return TRUE;
>>>> +   }
>>>> +
>>>> +   unsigned int free_space = alloc_space_threshold - new_object_occupied_size;
>>>> +     //INFO2("gc.con.info", "[GC Scheduler debug] alloc_space_threshold="<<alloc_space_threshold<<", new_object_occupied_size"<<new_object_occupied_size);
>>>> +   int64 last_check_delay = check_delay_time;
>>>> +
>>>> +   if( free_space < space_check_stage_2 ) {
>>>> +       check_delay_time = SMALL_DELTA;
>>>> +   } else if( free_space < space_check_stage_1 ) {
>>>> +       if(check_delay_time>SPACE_CHECK_STAGE_TWO_TIME ) { //if time interval is too small, the alloc rate will not be updated
>>>> +           unsigned int interval_time = trans_time_unit(time_now() - con_collection_stat->gc_end_time);
>>>> +           unsigned int interval_space = new_object_occupied_size;
>>>> +           con_collection_stat->alloc_rate = interval_space/interval_time;
>>>> +       }
>>>> +       check_delay_time = ((alloc_space_threshold - new_object_occupied_size)/con_collection_stat->alloc_rate)<<9;
>>>> +   }
>>>> +   last_check_time_point = time_now();
>>>> +
>>>> +   //INFO2("gc.con.info", "[GC Scheduler] check has been delayed=" << last_check_delay << " us, used_rate=" << used_rate << ", free_space=" << free_space << " bytes, next delay=" << check_delay_time << " us" );
>>>> +   return FALSE;
>>>> +}
>>>> +
>>>> +static SpinLock check_lock;
>>>> +static inline Boolean space_should_start_mark( GC *gc)
>>>> +{
>>>> +  if( ( time_now() -last_check_time_point ) > check_delay_time && try_lock(check_lock) ) { //first condition is checked frequently, second condition is for synchronization
>>>> +      Boolean should_start = check_start_mark(gc);
>>>> +      unlock(check_lock);
>>>> +      return should_start;
>>>>   }
>>>> -  vm_gc_unlock_enum();
>>>> -  unlock(gc->lock_collect_sched);
>>>>   return FALSE;
>>>>  }
>>>>
>>>> -extern unsigned int NUM_MARKERS;
>>>> -
>>>> -unsigned int gc_decide_marker_number(GC* gc)
>>>> -{
>>>> -  unsigned int num_active_marker;
>>>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>>>> +inline static Boolean gc_con_start_condition( GC* gc ) {
>>>> +   return space_should_start_mark(gc);
>>>> +}
>>>>
>>>> -  /*If the number of markers is specfied, just return the specified value.*/
>>>> -  if(NUM_MARKERS != 0) return NUM_MARKERS;
>>>>
>>>> -  /*If the number of markers isn't specified, we decide the value dynamically.*/
>>>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION){
>>>> -    /*Start trial cycle, collection set to 1 in trial cycle and */
>>>> -    num_active_marker = 1;
>>>> -  }else{
>>>> -    num_active_marker = cc_scheduler->last_marker_num;
>>>> -    int64 c_time = cc_scheduler->last_collector_time;
>>>> -    int64 m_time = cc_scheduler->last_mutator_time;
>>>> -    int64 d_time = cc_scheduler->time_delay_to_start_mark;
>>>> -
>>>> -    if(num_active_marker == 0) num_active_marker = 1;
>>>> -
>>>> -    if((c_time + d_time) > m_time || (float)d_time < (m_time * 0.25)){
>>>> -      TRACE2("gc.con.cs","[GC][Con] increase marker number.");
>>>> -      num_active_marker ++;
>>>> -      if(num_active_marker > gc->num_markers) num_active_marker = gc->num_markers;
>>>> -    }else if((float)d_time > (m_time * 0.6)){
>>>> -      TRACE2("gc.con.cs","[GC][Con] decrease marker number.");
>>>> -      num_active_marker --;
>>>> -      if(num_active_marker == 0)  num_active_marker = 1;
>>>> -    }
>>>> -
>>>> -    TRACE2("gc.con.cs","[GC][Con] ctime  "<<(unsigned)(c_time>>10)<<"  mtime  "<<(unsigned)(m_time>>10)<<"  dtime  "<<(unsigned)(d_time>>10));
>>>> -    TRACE2("gc.con.cs","[GC][Con] marker num : "<<num_active_marker<<" ");
>>>> +void gc_reset_after_con_collection(GC *gc);
>>>> +void gc_merge_free_list_global(GC *gc);
>>>> +void gc_con_stat_information_out(GC *gc);
>>>> +
>>>> +unsigned int sub_time = 0;
>>>> +int64 pause_time = 0;
>>>> +/*
>>>> +   concurrent collection entry function, it may start proper phase according to the current state.
>>>> +*/
>>>> +Boolean gc_con_perform_collection( GC* gc ) {
>>>> +  int disable_count;
>>>> +  int64 pause_start;
>>>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +  switch( gc->gc_concurrent_status ) {
>>>> +    case GC_CON_NIL :
>>>> +      if( !gc_con_start_condition(gc) )
>>>> +        return FALSE;
>>>> +      if( !state_transformation( gc, GC_CON_NIL, GC_CON_STW_ENUM ) )
>>>> +        return FALSE;
>>>> +
>>>> +      gc->num_collections++;
>>>> +      gc->cause = GC_CAUSE_CONCURRENT_GC;
>>>> +
>>>> +      con_collection_stat->gc_start_time = time_now();
>>>> +      disable_count = hythread_reset_suspend_disable();
>>>> +
>>>> +      gc_start_con_enumeration(gc); //now, it is a stw enumeration
>>>> +      con_collection_stat->marking_start_time = time_now();
>>>> +      state_transformation( gc, GC_CON_STW_ENUM, GC_CON_START_MARKERS );
>>>> +      gc_start_con_marking(gc);
>>>> +
>>>> +      INFO2("gc.con.time","[ER] start con pause, ERSM="<<((unsigned int)(time_now()-con_collection_stat->gc_start_time))<<"  us "); // ERSM means enumerate rootset and start concurrent marking
>>>> +      vm_resume_threads_after();
>>>> +      hythread_set_suspend_disable(disable_count);
>>>> +      break;
>>>> +
>>>> +    case GC_CON_BEFORE_SWEEP :
>>>> +      if(!gc_is_specify_con_sweep())
>>>> +         return FALSE;
>>>> +      if( !state_transformation( gc, GC_CON_BEFORE_SWEEP, GC_CON_SWEEPING ) )
>>>> +         return FALSE;
>>>> +      gc_ms_start_con_sweep((GC_MS*)gc, gc_get_sweeper_numer(gc));
>>>> +      break;
>>>> +
>>>> +
>>>> +    case GC_CON_BEFORE_FINISH :
>>>> +        if( !state_transformation( gc, GC_CON_BEFORE_FINISH, GC_CON_RESET ) )
>>>> +                 return FALSE;
>>>> +        /* thread should be suspended before the state transformation,
>>>> +            it is for the case that the heap is exhausted in the reset state, although it is almost impossible */
>>>> +        disable_count = vm_suspend_all_threads();
>>>> +        pause_start = time_now();
>>>> +
>>>> +        gc_merge_free_list_global(gc);
>>>> +        gc_reset_after_con_collection(gc);
>>>> +        state_transformation( gc, GC_CON_RESET, GC_CON_NIL );
>>>> +        pause_time = time_now()-pause_start;
>>>> +
>>>> +        vm_resume_all_threads(disable_count);
>>>> +        gc_con_stat_information_out(gc);
>>>> +        INFO2("gc.con.time","[GC][Con]pause(reset collection):  CRST="<<pause_time<<"  us\n\n"); // CRST means concurrent reset
>>>> +        break;
>>>> +    default :
>>>> +      return FALSE;
>>>>   }
>>>> -
>>>> -  cc_scheduler->last_marker_num = num_active_marker;
>>>> -  return num_active_marker;
>>>> +  return TRUE;
>>>>  }
>>>>
>>>> +
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h Tue Oct 28 20:01:01 2008
>>>> @@ -20,6 +20,7 @@
>>>>
>>>>  #define STAT_SAMPLE_WINDOW_SIZE 5
>>>>
>>>> +struct GC_MS;
>>>>  typedef struct Con_Collection_Scheduler {
>>>>   /*common field*/
>>>>   GC* gc;
>>>> @@ -46,10 +47,17 @@
>>>>  void con_collection_scheduler_initialize(GC* gc);
>>>>  void con_collection_scheduler_destruct(GC* gc);
>>>>
>>>> +void gc_update_scheduler_parameter( GC *gc );
>>>> +void gc_force_update_scheduler_parameter( GC *gc );
>>>> +Boolean gc_con_perform_collection( GC* gc );
>>>>  Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause);
>>>> -void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
>>>>
>>>>  void gc_decide_cc_scheduler_kind(char* cc_scheduler);
>>>>  void gc_set_default_cc_scheduler_kind();
>>>> +
>>>> +extern unsigned int mostly_con_final_marker_num;
>>>> +extern unsigned int mostly_con_long_marker_num;
>>>> +
>>>>  #endif
>>>>
>>>> +
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Tue Oct 28 20:01:01 2008
>>>> @@ -22,7 +22,7 @@
>>>>  #include "gc_common.h"
>>>>  #include "gc_metadata.h"
>>>>  #include "../thread/mutator.h"
>>>> -#include "../thread/marker.h"
>>>> +#include "../thread/conclctor.h"
>>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>>  #include "../gen/gen.h"
>>>>  #include "../mark_sweep/gc_ms.h"
>>>> @@ -74,11 +74,19 @@
>>>>  static int64 collection_start_time = time_now();
>>>>  static int64 collection_end_time = time_now();
>>>>
>>>> -int64 get_collection_end_time()
>>>> +int64 get_gc_start_time()
>>>> +{ return collection_start_time; }
>>>> +
>>>> +void set_gc_start_time()
>>>> +{ collection_start_time = time_now(); }
>>>> +
>>>> +int64 get_gc_end_time()
>>>>  { return collection_end_time; }
>>>>
>>>> -void set_collection_end_time()
>>>> -{ collection_end_time = time_now(); }
>>>> +void set_gc_end_time()
>>>> +{
>>>> +  collection_end_time = time_now();
>>>> +}
>>>>
>>>>  void gc_decide_collection_kind(GC* gc, unsigned int cause)
>>>>  {
>>>> @@ -93,17 +101,17 @@
>>>>
>>>>  }
>>>>
>>>> -void gc_update_space_stat(GC_MS* gc)
>>>> +void gc_update_space_stat(GC* gc)
>>>>  {
>>>>  #ifdef USE_UNIQUE_MARK_SWEEP_GC
>>>> -    gc_ms_update_space_stat((GC_MS*)gc);
>>>> +      gc_ms_update_space_stat((GC_MS *)gc);
>>>>  #endif
>>>>  }
>>>>
>>>> -void gc_reset_space_stat(GC_MS* gc)
>>>> +void gc_reset_space_stat(GC* gc)
>>>>  {
>>>>  #ifdef USE_UNIQUE_MARK_SWEEP_GC
>>>> -    gc_ms_reset_space_stat((GC_MS*)gc);
>>>> +      gc_ms_reset_space_stat((GC_MS *)gc);
>>>>  #endif
>>>>  }
>>>>
>>>> @@ -118,7 +126,7 @@
>>>>   gc_set_rootset(gc);
>>>>  }
>>>>
>>>> -void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection)
>>>> +void gc_reset_after_collection(GC* gc)
>>>>  {
>>>>   if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
>>>>
>>>> @@ -139,11 +147,9 @@
>>>>  #endif
>>>>   }
>>>>
>>>> -  gc_update_space_stat((GC_MS*)gc);
>>>> +  gc_update_space_stat(gc);
>>>>
>>>> -  gc_update_collection_scheduler(gc, time_mutator, time_collection);
>>>> -
>>>> -  gc_reset_space_stat((GC_MS*)gc);
>>>> +  gc_reset_space_stat(gc);
>>>>
>>>>   gc_reset_collector_state(gc);
>>>>
>>>> @@ -154,23 +160,25 @@
>>>>
>>>>  }
>>>>
>>>> +void set_check_delay( int64 mutator_time );
>>>> +
>>>>  void gc_reclaim_heap(GC* gc, unsigned int gc_cause)
>>>>  {
>>>>   INFO2("gc.process", "\nGC: GC start ...\n");
>>>>
>>>> -  collection_start_time = time_now();
>>>> -  int64 time_mutator = collection_start_time - collection_end_time;
>>>> -
>>>> -  gc->num_collections++;
>>>>   gc->cause = gc_cause;
>>>>
>>>>   if(gc_is_specify_con_gc()){
>>>> -    gc_finish_con_GC(gc, time_mutator);
>>>> -    collection_end_time = time_now();
>>>> +    gc_wait_con_finish(gc);
>>>>     INFO2("gc.process", "GC: GC end\n");
>>>>     return;
>>>>   }
>>>>
>>>> +   set_gc_start_time();
>>>> +  int64 time_mutator = get_gc_start_time() - get_gc_end_time();
>>>> +
>>>> +  gc->num_collections++;
>>>> +
>>>>   /* FIXME:: before mutators suspended, the ops below should be very careful
>>>>      to avoid racing with mutators. */
>>>>
>>>> @@ -207,16 +215,16 @@
>>>>   gc_gen_reclaim_heap((GC_Gen*)gc, collection_start_time);
>>>>  #endif
>>>>
>>>> -  collection_end_time = time_now();
>>>> +  set_gc_end_time();
>>>>
>>>> -  int64 time_collection = collection_end_time - collection_start_time;
>>>> +  int64 time_collection = get_gc_end_time() - get_gc_start_time();
>>>>
>>>>  #if !defined(USE_UNIQUE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC)
>>>>   gc_gen_collection_verbose_info((GC_Gen*)gc, time_collection, time_mutator);
>>>>   gc_gen_space_verbose_info((GC_Gen*)gc);
>>>>  #endif
>>>>
>>>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>>>> +  gc_reset_after_collection(gc);
>>>>
>>>>   gc_assign_free_area_to_mutators(gc);
>>>>
>>>> @@ -230,6 +238,3 @@
>>>>
>>>>
>>>>
>>>> -
>>>> -
>>>> -
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Tue Oct 28 20:01:01 2008
>>>> @@ -39,7 +39,8 @@
>>>>
>>>>  #include "../common/gc_for_barrier.h"
>>>>
>>>> -/*
>>>> +
>>>> + /*
>>>>  #define USE_UNIQUE_MARK_SWEEP_GC  //define it to only use Mark-Sweep GC (no NOS, no LOS).
>>>>  #define USE_UNIQUE_MOVE_COMPACT_GC //define it to only use Move-Compact GC (no NOS, no LOS).
>>>>  */
>>>> @@ -336,19 +337,7 @@
>>>>   return TRUE;
>>>>  }
>>>>
>>>> -extern volatile Boolean obj_alloced_live;
>>>> -inline Boolean is_obj_alloced_live()
>>>> -{ return obj_alloced_live;  }
>>>>
>>>> -inline void gc_enable_alloc_obj_live()
>>>> -{
>>>> -  obj_alloced_live = TRUE;
>>>> -}
>>>> -
>>>> -inline void gc_disable_alloc_obj_live()
>>>> -{
>>>> -  obj_alloced_live = FALSE;
>>>> -}
>>>>
>>>>  /***************************************************************/
>>>>
>>>> @@ -391,7 +380,7 @@
>>>>  /***************************************************************/
>>>>
>>>>  /* all GCs inherit this GC structure */
>>>> -struct Marker;
>>>> +struct Conclctor;
>>>>  struct Mutator;
>>>>  struct Collector;
>>>>  struct GC_Metadata;
>>>> @@ -421,9 +410,12 @@
>>>>   unsigned int num_collectors;
>>>>   unsigned int num_active_collectors; /* not all collectors are working */
>>>>
>>>> -  Marker** markers;
>>>> -  unsigned int num_markers;
>>>> +  /*concurrent markers and collectors*/
>>>> +  Conclctor** conclctors;
>>>> +  unsigned int num_conclctors;
>>>> +  //unsigned int num_active_conclctors;
>>>>   unsigned int num_active_markers;
>>>> +  unsigned int num_active_sweepers;
>>>>
>>>>   /* metadata is the pool for rootset, tracestack, etc. */
>>>>   GC_Metadata* metadata;
>>>> @@ -443,7 +435,7 @@
>>>>
>>>>   Space_Tuner* tuner;
>>>>
>>>> -  unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
>>>> +  volatile unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
>>>>   Collection_Scheduler* collection_scheduler;
>>>>
>>>>   SpinLock lock_con_mark;
>>>> @@ -488,11 +480,15 @@
>>>>
>>>>  GC* gc_parse_options();
>>>>  void gc_reclaim_heap(GC* gc, unsigned int gc_cause);
>>>> +void gc_relaim_heap_con_mode( GC *gc);
>>>>  void gc_prepare_rootset(GC* gc);
>>>>
>>>>
>>>> -int64 get_collection_end_time();
>>>> -void set_collection_end_time();
>>>> +int64 get_gc_start_time();
>>>> +void set_gc_start_time();
>>>> +
>>>> +int64 get_gc_end_time();
>>>> +void set_gc_end_time();
>>>>
>>>>  /* generational GC related */
>>>>
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp Tue Oct 28 20:01:01 2008
>>>> @@ -17,325 +17,582 @@
>>>>  #include "gc_common.h"
>>>>  #include "gc_metadata.h"
>>>>  #include "../thread/mutator.h"
>>>> -#include "../thread/marker.h"
>>>> +#include "../thread/conclctor.h"
>>>>  #include "../thread/collector.h"
>>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>>  #include "../gen/gen.h"
>>>>  #include "../mark_sweep/gc_ms.h"
>>>> +#include "../mark_sweep/wspace_mark_sweep.h"
>>>>  #include "interior_pointer.h"
>>>>  #include "collection_scheduler.h"
>>>>  #include "gc_concurrent.h"
>>>>  #include "../common/gc_for_barrier.h"
>>>> +#include "concurrent_collection_scheduler.h"
>>>> +#include "../verify/verify_live_heap.h"
>>>>
>>>> -volatile Boolean concurrent_in_marking  = FALSE;
>>>> -volatile Boolean concurrent_in_sweeping = FALSE;
>>>> -volatile Boolean mark_is_concurrent     = FALSE;
>>>> -volatile Boolean sweep_is_concurrent    = FALSE;
>>>> +struct Con_Collection_Statistics;
>>>>
>>>>  volatile Boolean gc_sweep_global_normal_chunk = FALSE;
>>>>
>>>> -static void gc_check_con_mark(GC* gc)
>>>> +//just debugging
>>>> +inline void gc_ms_get_current_heap_usage(GC_MS *gc)
>>>>  {
>>>> -  if(!is_mark_finished(gc)){
>>>> -    lock(gc->lock_con_mark);
>>>> -    if(gc_is_kind(ALGO_CON_OTF_OBJ)){
>>>> -      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>>>> -    }else if(gc_is_kind(ALGO_CON_OTF_REF)){
>>>> -      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>>>> -    }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>> -      //ignore.
>>>> -    }
>>>> -    unlock(gc->lock_con_mark);
>>>> -  }
>>>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat(gc);
>>>> +  unsigned int new_obj_size = gc_get_mutator_new_obj_size((GC *)gc);
>>>> +  unsigned int current_size = con_collection_stat->surviving_size_at_gc_end + new_obj_size;
>>>> +  INFO2("gc.con.scheduler", "[Heap Usage]surviving_size("<<con_collection_stat->surviving_size_at_gc_end<<")+new_obj_size("<<new_obj_size << ")="<<current_size<<" bytes");
>>>> +  INFO2("gc.con.scheduler", "[Heap Usage]usage rate ("<< (float)current_size/gc->committed_heap_size<<")");
>>>>  }
>>>>
>>>> -static void gc_wait_con_mark_finish(GC* gc)
>>>> +void gc_con_update_stat_before_enable_alloc_live(GC *gc)
>>>>  {
>>>> -  wait_mark_finish(gc);
>>>> -  gc_set_barrier_function(WB_REM_NIL);
>>>> -  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
>>>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS *)gc);
>>>> +  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
>>>>  }
>>>> +
>>>> +volatile Boolean obj_alloced_live;
>>>>
>>>> -unsigned int gc_decide_marker_number(GC* gc);
>>>> +void gc_enable_alloc_obj_live(GC *gc)
>>>> +{
>>>> +  gc_con_update_stat_before_enable_alloc_live(gc);
>>>> +  obj_alloced_live = TRUE;
>>>> +}
>>>>
>>>> -void gc_start_con_mark(GC* gc)
>>>> +void gc_mostly_con_update_stat_after_final_marking(GC *gc)
>>>>  {
>>>> -  int disable_count;
>>>> -  unsigned int num_marker;
>>>> -
>>>> -  if(!try_lock(gc->lock_con_mark) || gc_mark_is_concurrent()) return;
>>>> -
>>>> -  lock(gc->lock_enum);
>>>> -  disable_count = hythread_reset_suspend_disable();
>>>> -  int64 pause_start = time_now();
>>>> -  gc_set_rootset_type(ROOTSET_IS_OBJ);
>>>> -  gc_prepare_rootset(gc);
>>>> -
>>>> -  gc_set_concurrent_status(gc, GC_CON_MARK_PHASE);
>>>> +  POINTER_SIZE_INT num_live_obj = 0;
>>>> +  POINTER_SIZE_INT size_live_obj = 0;
>>>> +  POINTER_SIZE_INT num_dirty_obj_traced = 0;
>>>>
>>>> -  num_marker = gc_decide_marker_number(gc);
>>>> -
>>>> -  /*start concurrent mark*/
>>>> -  if(gc_is_kind(ALGO_CON_OTF_OBJ)){
>>>> -    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
>>>> -    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>> -  }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>> -    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
>>>> -    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
>>>> -  }else if(gc_is_kind(ALGO_CON_OTF_REF)){
>>>> -    gc_set_barrier_function(WB_REM_OLD_VAR);
>>>> -    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>> +  unsigned int num_conclctors = gc->num_conclctors;
>>>> +  for( unsigned int i=0; i<num_conclctors; i++ ) {
>>>> +    Conclctor* conclctor = gc->conclctors[i];
>>>> +    if( conclctor->role != CONCLCTOR_ROLE_MARKER )
>>>> +      continue;
>>>> +    num_live_obj += conclctor->live_obj_num;
>>>> +    size_live_obj += conclctor->live_obj_size;
>>>> +    num_dirty_obj_traced += conclctor->num_dirty_slots_traced;
>>>> +    conclctor->live_obj_num = 0;
>>>> +    conclctor->live_obj_size = 0;
>>>> +    conclctor->num_dirty_slots_traced = 0;
>>>>   }
>>>>
>>>> -  unlock(gc->lock_enum);
>>>> -  INFO2("gc.con.time","[GC][Con]pause(enumeration root):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
>>>> -  vm_resume_threads_after();
>>>> -  assert(hythread_is_suspend_enabled());
>>>> -  hythread_set_suspend_disable(disable_count);
>>>> -
>>>> -  unlock(gc->lock_con_mark);
>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +  con_collection_stat->live_size_marked += size_live_obj;
>>>> +  INFO2("gc.con.scheduler", "[Final Mark Finish] live_marked_size:      "<<con_collection_stat->live_size_marked<<" bytes");
>>>> +
>>>>  }
>>>>
>>>> -void mostly_con_mark_terminate_reset();
>>>> -void terminate_mostly_con_mark();
>>>> -
>>>> -void gc_finish_con_mark(GC* gc, Boolean need_STW)
>>>> +unsigned int gc_get_conclcor_num(GC* gc, unsigned int req_role);
>>>> +//called by the marker when it finishes
>>>> +void gc_con_update_stat_after_marking(GC *gc)
>>>>  {
>>>> -  gc_check_con_mark(gc);
>>>> -
>>>> -  if(gc_is_kind(ALGO_CON_MOSTLY))
>>>> -    terminate_mostly_con_mark();
>>>> -
>>>> -  gc_wait_con_mark_finish(gc);
>>>> +  POINTER_SIZE_INT num_live_obj = 0;
>>>> +  POINTER_SIZE_INT size_live_obj = 0;
>>>> +  POINTER_SIZE_INT num_dirty_obj_traced = 0;
>>>>
>>>> -  int disable_count;
>>>> -  if(need_STW){
>>>> -    /*suspend the mutators.*/
>>>> -    lock(gc->lock_enum);
>>>> -    if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>> -      /*In mostly concurrent algorithm, there's a final marking pause.
>>>> -            Prepare root set for final marking.*/
>>>> -      disable_count = hythread_reset_suspend_disable();
>>>> -      gc_set_rootset_type(ROOTSET_IS_OBJ);
>>>> -      gc_prepare_rootset(gc);
>>>> -    }else{
>>>> -      disable_count = vm_suspend_all_threads();
>>>> -    }
>>>> +  unsigned int num_conclctors = gc->num_conclctors;
>>>> +  for( unsigned int i=0; i<num_conclctors; i++ ) {
>>>> +    Conclctor* conclctor = gc->conclctors[i];
>>>> +    if( conclctor->role != CONCLCTOR_ROLE_MARKER )
>>>> +      continue;
>>>> +    num_live_obj += conclctor->live_obj_num;
>>>> +    size_live_obj += conclctor->live_obj_size;
>>>> +    num_dirty_obj_traced += conclctor->num_dirty_slots_traced;
>>>> +    conclctor->live_obj_num = 0;
>>>> +    conclctor->live_obj_size = 0;
>>>> +    conclctor->num_dirty_slots_traced = 0;
>>>>   }
>>>>
>>>> -  if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>> -    /*In mostly concurrent algorithm, there's a final marking pause.
>>>> -          Suspend the mutators once again and finish the marking phase.*/
>>>> -
>>>> -    /*prepare dirty object*/
>>>> -    gc_prepare_dirty_set(gc);
>>>> -
>>>> -    gc_set_weakref_sets(gc);
>>>> -
>>>> -    /*start STW mark*/
>>>> -    gc_ms_start_mostly_con_final_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>>>> -
>>>> -    mostly_con_mark_terminate_reset();
>>>> -    gc_clear_dirty_set(gc);
>>>> -  }
>>>> -
>>>> -  gc_reset_dirty_set(gc);
>>>> -
>>>> -  if(need_STW){
>>>> -    unlock(gc->lock_enum);
>>>> -    if(gc_is_kind(ALGO_CON_MOSTLY)){
>>>> -      vm_resume_threads_after();
>>>> -      assert(hythread_is_suspend_enabled());
>>>> -      hythread_set_suspend_disable(disable_count);
>>>> -    }else{
>>>> -      vm_resume_all_threads(disable_count);
>>>> -    }
>>>> -  }
>>>> +  unsigned int write_barrier_marked_size = gc_get_mutator_write_barrier_marked_size(gc);
>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +  con_collection_stat->live_size_marked = size_live_obj + write_barrier_marked_size;
>>>> +  //INFO2("gc.con.scheduler", "[Mark Finish] live_marked_size:      "<<con_collection_stat->live_size_marked<<" bytes");
>>>>
>>>> +   /*statistics information update (marking_end_time, trace_rate) */
>>>> +  con_collection_stat->marking_end_time = time_now();
>>>> +  int64 marking_time = (unsigned int)(con_collection_stat->marking_end_time - con_collection_stat->marking_start_time);
>>>> +
>>>> +  unsigned int heap_size =
>>>> +       con_collection_stat->surviving_size_at_gc_end +
>>>> +       gc_get_mutator_new_obj_size(gc);
>>>> +
>>>> +  con_collection_stat->trace_rate = heap_size/trans_time_unit(marking_time);
>>>> +
>>>> +
>>>> +
>>>> +  /*
>>>> +  //statistics just for debugging
>>>> +  unsigned int marker_num = gc_get_conclcor_num(gc, CONCLCTOR_ROLE_MARKER);
>>>> +  float heap_used_rate = (float)heap_size/gc->committed_heap_size;
>>>> +  unsigned int new_obj_size_marking = gc_get_mutator_new_obj_size(gc) - con_collection_stat->alloc_size_before_alloc_live;
>>>> +  unsigned int alloc_rate_marking = new_obj_size_marking/trans_time_unit(con_collection_stat->marking_end_time - con_collection_stat->marking_start_time);
>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] tracing time=" <<marking_time<<" us, trace rate=" << con_collection_stat->trace_rate<<"b/ms, current heap used="<<heap_used_rate );
>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] marker num="<<marker_num << ", alloc factor=" << (float)alloc_rate_marking/con_collection_stat->alloc_rate);
>>>> +  */
>>>>  }
>>>>
>>>> -void gc_reset_con_mark(GC* gc)
>>>> +void gc_PSTW_update_stat_after_marking(GC *gc)
>>>>  {
>>>> -  gc->num_active_markers = 0;
>>>> -  gc_mark_unset_concurrent();
>>>> +  unsigned int size_live_obj = gc_ms_get_live_object_size((GC_MS*)gc);
>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +  con_collection_stat->live_size_marked = size_live_obj;
>>>> +  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
>>>> +
>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] live_marked:      "<<con_collection_stat->live_size_marked<<" bytes");
>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
>>>> +  INFO2("gc.con.scheduler", "[Mark Finish] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
>>>>  }
>>>>
>>>> -int64 gc_get_con_mark_time(GC* gc)
>>>> +//Called only when heap is exhuaset
>>>> +void gc_con_update_stat_heap_exhausted(GC* gc)
>>>>  {
>>>> -  int64 time_mark = 0;
>>>> -  Marker** markers = gc->markers;
>>>> -  unsigned int i;
>>>> -  for(i = 0; i < gc->num_active_markers; i++){
>>>> -    Marker* marker = markers[i];
>>>> -    if(marker->time_mark > time_mark){
>>>> -      time_mark = marker->time_mark;
>>>> -    }
>>>> -    marker->time_mark = 0;
>>>> -  }
>>>> -  return time_mark;
>>>> +  unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +  con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
>>>> +  //INFO2("gc.con.scheduler", "[Heap exhausted] surviving size="<<con_collection_stat->surviving_size_at_gc_end<<" bytes, new_obj_size="<<new_obj_size<<" bytes");
>>>> +  //INFO2("gc.con.scheduler", "[Heap exhausted] current utilization rate="<<con_collection_stat->heap_utilization_rate);
>>>>  }
>>>>
>>>> -void gc_start_con_sweep(GC* gc)
>>>> +
>>>> +//just debugging
>>>> +unsigned int gc_con_get_live_size_from_sweeper(GC *gc)
>>>>  {
>>>> -  if(!try_lock(gc->lock_con_sweep) || gc_sweep_is_concurrent()) return;
>>>> +  POINTER_SIZE_INT num_live_obj = 0;
>>>> +  POINTER_SIZE_INT size_live_obj = 0;
>>>>
>>>> -  /*FIXME: enable finref*/
>>>> -  if(!IGNORE_FINREF ){
>>>> -    gc_set_obj_with_fin(gc);
>>>> -    Collector* collector = gc->collectors[0];
>>>> -    collector_identify_finref(collector);
>>>> -#ifndef BUILD_IN_REFERENT
>>>> -  }else{
>>>> -    gc_set_weakref_sets(gc);
>>>> -    gc_update_weakref_ignore_finref(gc);
>>>> -#endif
>>>> +  unsigned int num_collectors = gc->num_active_collectors;
>>>> +  Collector** collectors = gc->collectors;
>>>> +  unsigned int i;
>>>> +  for(i = 0; i < num_collectors; i++){
>>>> +    Collector* collector = collectors[i];
>>>> +    num_live_obj += collector->live_obj_num;
>>>> +    size_live_obj += collector->live_obj_size;
>>>> +    collector->live_obj_num = 0;
>>>> +    collector->live_obj_size = 0;
>>>>   }
>>>> +
>>>> +  return size_live_obj;
>>>> +}
>>>>
>>>> -  gc_set_concurrent_status(gc, GC_CON_SWEEP_PHASE);
>>>> +//Called when Con GC ends, must called in a STW period
>>>> +void gc_reset_con_space_stat(GC *gc)
>>>> +{
>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +  unsigned int new_obj_size = gc_reset_mutator_new_obj_size((GC *)gc);
>>>>
>>>> -  gc_set_weakref_sets(gc);
>>>> +  if( gc_is_kind(ALGO_CON_MOSTLY) ) {
>>>> +    con_collection_stat->live_alloc_size = 0; //mostly concurrent do not make new alloc obj live
>>>> +  } else if ( gc_is_kind( ALGO_CON_OTF_OBJ ) || gc_is_kind( ALGO_CON_OTF_REF ) ) {
>>>> +    con_collection_stat->live_alloc_size = new_obj_size - con_collection_stat->alloc_size_before_alloc_live;
>>>> +  }
>>>> +
>>>> +  /*live obj size at the end of gc = the size of objs belong to {marked_live + alloc_at_marking+alloc_at_sweeping},
>>>> +  (for mostly concurrent, con_collection_stat->surviving_size_at_gc_end = con_collection_stat->live_size_marked .)*/
>>>> +  con_collection_stat->surviving_size_at_gc_end = con_collection_stat->live_size_marked + con_collection_stat->live_alloc_size;
>>>> +  //INFO2( "gc.con.scheduler", "[Mark Live] live_size_marked = " << con_collection_stat->live_size_marked << ", live_alloc_size=" << con_collection_stat->live_alloc_size );
>>>>
>>>> -  /*Note: We assumed that adding entry to weakroot_pool is happened in STW rootset enumeration.
>>>> -      So, when this assumption changed, we should modified the below function.*/
>>>> -  gc_identify_dead_weak_roots(gc);
>>>>
>>>> -  /*start concurrent mark*/
>>>> -  gc_ms_start_con_sweep((GC_MS*)gc, MIN_NUM_MARKERS);
>>>> +  /*
>>>> +  //just debugging
>>>> +  if( !gc_is_specify_con_sweep() ) {
>>>> +    unsigned int surviving_sweeper = gc_con_get_live_size_from_sweeper(gc);
>>>> +    unsigned int surviving_marker = con_collection_stat->surviving_size_at_gc_end;
>>>> +    INFO2("gc.con.scheduler", "[Surviving size] by sweeper: " << surviving_sweeper << " bytes, by marker:" << surviving_marker << " bytes, diff=" << (surviving_sweeper - surviving_marker) );
>>>> +  }*/
>>>>
>>>> -  unlock(gc->lock_con_sweep);
>>>> +  int64 current_time = time_now();
>>>> +
>>>> +  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
>>>> +       unsigned int gc_interval_time = 0;
>>>> +       if( con_collection_stat->pause_start_time != 0 ) //remove the stw time
>>>> +            gc_interval_time = trans_time_unit(con_collection_stat->pause_start_time - con_collection_stat->gc_end_time);
>>>> +       else
>>>> +            gc_interval_time = trans_time_unit(current_time -con_collection_stat->gc_end_time );
>>>> +       con_collection_stat->alloc_rate = new_obj_size/gc_interval_time;
>>>> +       gc_update_scheduler_parameter(gc);
>>>> +  } else {
>>>> +     gc_force_update_scheduler_parameter(gc);
>>>> +  }
>>>> +
>>>> +  con_collection_stat->gc_end_time = current_time;
>>>> +
>>>> +  con_collection_stat->live_size_marked = 0;
>>>> +  con_collection_stat->live_alloc_size = 0;
>>>> +  con_collection_stat->alloc_size_before_alloc_live = 0;
>>>> +  con_collection_stat->marking_start_time = 0;
>>>> +  con_collection_stat->marking_end_time = 0;
>>>> +  con_collection_stat->sweeping_time = gc_get_conclctor_time((GC *)gc, CONCLCTOR_ROLE_SWEEPER); //be 0 if not CMCS
>>>> +  con_collection_stat->pause_start_time = 0;
>>>> +  assert(con_collection_stat->heap_utilization_rate<1);
>>>> +
>>>>  }
>>>>
>>>> -void gc_reset_con_sweep(GC* gc)
>>>> +void gc_con_stat_information_out(GC *gc)
>>>>  {
>>>> -  gc->num_active_collectors = 0;
>>>> -  gc_sweep_unset_concurrent();
>>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
>>>> +  INFO2("gc.con.scheduler", "[Reset] surviving_at_end:       "<<con_collection_stat->surviving_size_at_gc_end<<" bytes");
>>>> +  INFO2("gc.con.scheduler", "[Reset] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
>>>> +  INFO2("gc.con.scheduler", "[Reset] utilization_rate:      "<<con_collection_stat->heap_utilization_rate);
>>>> +  INFO2("gc.con.scheduler", "[Reset] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
>>>> +  INFO2("gc.con.scheduler", "[Reset] sweeping time:      "<<con_collection_stat->sweeping_time<<" us");
>>>> +  INFO2("gc.con.scheduler", "[Reset] gc time:      "<< trans_time_unit(con_collection_stat->gc_end_time - con_collection_stat->gc_start_time) );
>>>> +  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
>>>>  }
>>>>
>>>> -void gc_wait_con_sweep_finish(GC* gc)
>>>> +void gc_reset_after_con_collection(GC* gc)
>>>>  {
>>>> -  wait_collection_finish(gc);
>>>> -  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
>>>> +  assert(gc_is_specify_con_gc());
>>>> +  int64 reset_start = time_now();
>>>> +  if(!IGNORE_FINREF ){
>>>> +    INFO2("gc.process", "GC: finref process after collection ...\n");
>>>> +    gc_put_finref_to_vm(gc);
>>>> +    gc_reset_finref_metadata(gc);
>>>> +    gc_activate_finref_threads((GC*)gc);
>>>> +#ifndef BUILD_IN_REFERENT
>>>> +  } else {
>>>> +    gc_clear_weakref_pools(gc);
>>>> +    gc_clear_finref_repset_pool(gc);
>>>> +#endif
>>>> +  }
>>>> +  reset_start = time_now();
>>>> +  gc_reset_con_space_stat(gc);
>>>> +  gc_clear_conclctor_role(gc);
>>>> +  vm_reclaim_native_objs();
>>>>  }
>>>>
>>>> -void gc_finish_con_sweep(GC * gc)
>>>> +
>>>> +
>>>> +void gc_set_default_con_algo()
>>>>  {
>>>> -  gc_wait_con_sweep_finish(gc);
>>>> +  assert((GC_PROP & ALGO_CON_MASK) == 0);
>>>> +  GC_PROP |= ALGO_CON_OTF_OBJ;
>>>>  }
>>>>
>>>> -void gc_try_finish_con_phase(GC * gc)
>>>> +void gc_decide_con_algo(char* concurrent_algo)
>>>>  {
>>>> -  /*Note: we do not finish concurrent mark here if we do not want to start concurrent sweep.*/
>>>> -  if(gc_con_is_in_marking(gc) && is_mark_finished(gc)){
>>>> -    /*Although all conditions above are satisfied, we can not guarantee concurrent marking is finished.
>>>> -          Because, sometimes, the concurrent marking has not started yet. We check the concurrent mark lock
>>>> -          here to guarantee this occasional case.*/
>>>> -    if(try_lock(gc->lock_con_mark)){
>>>> -      unlock(gc->lock_con_mark);
>>>> -      gc_finish_con_mark(gc, TRUE);
>>>> -    }
>>>> -  }
>>>> -
>>>> -  if(gc_con_is_in_sweeping(gc) && is_collector_finished(gc)){
>>>> -    //The reason is same as concurrent mark above.
>>>> -    if(try_lock(gc->lock_con_sweep)){
>>>> -      unlock(gc->lock_con_sweep);
>>>> -      gc_finish_con_sweep(gc);
>>>> -    }
>>>> +  string_to_upper(concurrent_algo);
>>>> +  GC_PROP &= ~ALGO_CON_MASK;
>>>> +  if(!strcmp(concurrent_algo, "OTF_OBJ")){
>>>> +    GC_PROP |= ALGO_CON_OTF_OBJ;
>>>> +  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
>>>> +    GC_PROP |= ALGO_CON_MOSTLY;
>>>> +  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
>>>> +    GC_PROP |= ALGO_CON_OTF_REF;
>>>>   }
>>>>  }
>>>>
>>>> -void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection);
>>>>
>>>> -void gc_reset_after_con_collect(GC* gc)
>>>> +/*
>>>> +    gc start enumeration phase, now, it is in a stop-the-world manner
>>>> +*/
>>>> +void gc_start_con_enumeration(GC * gc)
>>>>  {
>>>> -  assert(gc_is_specify_con_gc());
>>>> -
>>>> -  int64 time_mutator = gc_get_mutator_time(gc);
>>>> -  int64 time_collection = gc_get_collector_time(gc) + gc_get_marker_time(gc);
>>>> +  gc_set_rootset_type(ROOTSET_IS_OBJ);
>>>> +  gc_prepare_rootset(gc);
>>>> +}
>>>>
>>>> -  gc_reset_interior_pointer_table();
>>>> +//unsigned int gc_decide_marker_number(GC* gc);
>>>> +unsigned int gc_get_marker_number(GC* gc);
>>>> +/*  gc start marking phase */
>>>> +void gc_start_con_marking(GC *gc)
>>>> +{
>>>> +  unsigned int num_marker;
>>>> +  num_marker = gc_get_marker_number(gc);
>>>>
>>>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>>>> -
>>>> -  if(gc_mark_is_concurrent()){
>>>> -    gc_reset_con_mark(gc);
>>>> +  if(gc_is_kind(ALGO_CON_OTF_OBJ)) {
>>>> +    gc_enable_alloc_obj_live(gc);
>>>> +    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
>>>> +    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>> +  } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>>>> +    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
>>>> +    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
>>>> +  } else if(gc_is_kind(ALGO_CON_OTF_REF)) {
>>>> +    gc_enable_alloc_obj_live(gc);
>>>> +    gc_set_barrier_function(WB_REM_OLD_VAR);
>>>> +    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>>   }
>>>> +}
>>>> +
>>>>
>>>> -  if(gc_sweep_is_concurrent()){
>>>> -    gc_reset_con_sweep(gc);
>>>> +/*
>>>> +    gc start sweeping phase
>>>> +*/
>>>> +void gc_prepare_sweeping(GC *gc) {
>>>> +  INFO2("gc.con.info", "Concurrent collection, current collection = " << gc->num_collections );
>>>> +  /*FIXME: enable finref*/
>>>> +  if(!IGNORE_FINREF ){
>>>> +    gc_set_obj_with_fin(gc);
>>>> +    Collector* collector = gc->collectors[0];
>>>> +    collector_identify_finref(collector);
>>>> +  #ifndef BUILD_IN_REFERENT
>>>> +  } else {
>>>> +    conclctor_set_weakref_sets(gc);
>>>> +    gc_update_weakref_ignore_finref(gc);
>>>> +  #endif
>>>>   }
>>>> +  gc_identify_dead_weak_roots(gc);
>>>>  }
>>>>
>>>> -void gc_finish_con_GC(GC* gc, int64 time_mutator)
>>>> -{
>>>> +int64 get_last_check_point();
>>>> +// for the case pure stop the world
>>>> +static void gc_partial_con_PSTW( GC *gc) {
>>>>   int64 time_collection_start = time_now();
>>>> -
>>>> +  INFO2("gc.space.stat","Stop-the-world collection = "<<gc->num_collections<<"");
>>>> +  INFO2("gc.con.info", "from last check point =" << (unsigned int)(time_collection_start -get_last_check_point()) );
>>>> +  // stop the world enumeration
>>>>   gc->num_collections++;
>>>> -
>>>> -  lock(gc->lock_enum);
>>>> -
>>>>   int disable_count = hythread_reset_suspend_disable();
>>>>   gc_set_rootset_type(ROOTSET_IS_REF);
>>>>   gc_prepare_rootset(gc);
>>>> -  unlock(gc->lock_enum);
>>>> -
>>>> -  if(gc_sweep_is_concurrent()){
>>>> -    if(gc_con_is_in_sweeping())
>>>> -      gc_finish_con_sweep(gc);
>>>> -  }else{
>>>> -    if(gc_con_is_in_marking()){
>>>> -      gc_finish_con_mark(gc, FALSE);
>>>> -    }
>>>> -    gc->in_collection = TRUE;
>>>> -    gc_reset_mutator_context(gc);
>>>> -    if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>> -    gc_ms_reclaim_heap((GC_MS*)gc);
>>>> -  }
>>>> -
>>>> -  int64 time_collection = 0;
>>>> -  if(gc_mark_is_concurrent()){
>>>> -    time_collection = gc_get_con_mark_time(gc);
>>>> -    gc_reset_con_mark(gc);
>>>> -  }else{
>>>> -    time_collection = time_now()-time_collection_start;
>>>> -  }
>>>> +
>>>> +  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
>>>> +      unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
>>>> +      Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +      con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
>>>> +  }
>>>> +
>>>> +  //reclaim heap
>>>> +  gc_reset_mutator_context(gc);
>>>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>>>> +
>>>> +  //update live size
>>>> +  gc_PSTW_update_stat_after_marking(gc);
>>>> +
>>>> +  // reset the collection and resume mutators
>>>> +  gc_reset_after_con_collection(gc);
>>>>
>>>> -  if(gc_sweep_is_concurrent()){
>>>> -    gc_reset_con_sweep(gc);
>>>> -  }
>>>> -
>>>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>>>> -
>>>> -  gc_start_mutator_time_measure(gc);
>>>> -
>>>> +  set_con_nil(gc); // concurrent scheduling will continue after mutators are resumed
>>>>   vm_resume_threads_after();
>>>>   assert(hythread_is_suspend_enabled());
>>>> -  hythread_set_suspend_disable(disable_count);
>>>> -  int64 pause_time = time_now()-time_collection_start;
>>>> -
>>>> -  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause){
>>>> -    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
>>>> -  }else{
>>>> -    INFO2("gc.con.time","[GC][Con]pause( Heap exhuasted ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
>>>> -  }
>>>> -  return;
>>>> +  hythread_set_suspend_disable(disable_count);
>>>>  }
>>>>
>>>> -void gc_set_default_con_algo()
>>>> -{
>>>> -  assert((GC_PROP & ALGO_CON_MASK) == 0);
>>>> -  GC_PROP |= ALGO_CON_OTF_OBJ;
>>>> +void terminate_mostly_con_mark();
>>>> +void wspace_mostly_con_final_mark( GC *gc );
>>>> +
>>>> +// for the case concurrent marking is not finished before heap is exhausted
>>>> +static void gc_partial_con_PMSS(GC *gc) {
>>>> +  INFO2("gc.con.info", "[PMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
>>>> +  // wait concurrent marking finishes
>>>> +  int64 wait_start = time_now();
>>>> +  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
>>>> +  //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
>>>> +  while( gc->gc_concurrent_status == GC_CON_START_MARKERS ||
>>>> +             gc->gc_concurrent_status == GC_CON_TRACING ||
>>>> +             gc->gc_concurrent_status == GC_CON_TRACE_DONE)
>>>> +  {
>>>> +      vm_thread_yield(); //let the unfinished marker run
>>>> +  }
>>>> +
>>>> +  /*just debugging*/
>>>> +    gc_ms_get_current_heap_usage((GC_MS *)gc);
>>>> +    int64 pause_time = time_now() - wait_start;
>>>> +    INFO2("gc.con.info", "[PMSS]wait marking time="<<pause_time<<" us" );
>>>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +    unsigned int marking_time_shortage = (unsigned int)(con_collection_stat->marking_end_time - wait_start);
>>>> +    INFO2("gc.con.info", "[PMSS] marking late time [" << marking_time_shortage << "] us" );
>>>> +
>>>> +  // start STW reclaiming heap
>>>> +  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>>>> +  gc_reset_mutator_context(gc);
>>>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>>>> +
>>>> +  // reset after partial stop the world collection
>>>> +  gc_reset_after_con_collection(gc);
>>>> +  set_con_nil(gc);
>>>> +}
>>>> +
>>>> +// only when current sweep is set to false
>>>> +static void gc_partial_con_CMSS(GC *gc) {
>>>> +
>>>> +  INFO2("gc.con.info", "[CMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
>>>> +  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
>>>> +
>>>> +  /*just debugging*/
>>>> +    gc_ms_get_current_heap_usage((GC_MS *)gc);
>>>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +    unsigned int from_marking_end = (unsigned int)(time_now() - con_collection_stat->marking_end_time);
>>>> +    INFO2("gc.con.info", "[CMSS] marking early time [" << from_marking_end << "] us" );
>>>> +
>>>> +  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>>>> +
>>>> +  // start reclaiming heap, it will skip the marking phase
>>>> +  gc_reset_mutator_context(gc);
>>>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>>>> +
>>>> +  // reset after partial stop the world collection
>>>> +  gc_reset_after_con_collection(gc);
>>>> +  set_con_nil(gc);
>>>> +}
>>>> +
>>>> +void gc_merge_free_list_global(GC *gc);
>>>> +//for the case concurrent marking and partial concurrent sweeping
>>>> +static void gc_partial_con_CMPS( GC *gc ) {
>>>> +
>>>> +  while(gc->gc_concurrent_status == GC_CON_SWEEPING || gc->gc_concurrent_status == GC_CON_SWEEP_DONE) {
>>>> +      vm_thread_yield();  //let the unfinished sweeper run
>>>> +  }
>>>> +  gc_merge_free_list_global(gc);
>>>> +  // reset after partial stop the world collection
>>>> +  gc_reset_after_con_collection(gc);
>>>> +  set_con_nil(gc);
>>>> +}
>>>> +
>>>> +
>>>> +inline static void partial_stop_the_world_info( unsigned int type, unsigned int pause_time ) {
>>>> +  switch( type ) {
>>>> +    case GC_PARTIAL_PSTW :
>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PSTW=" << pause_time << " us");
>>>> +      break;
>>>> +    case GC_PARTIAL_PMSS :
>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PMSS=" << pause_time << " us");
>>>> +      break;
>>>> +    case GC_PARTIAL_CMPS :
>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMPS=" << pause_time << " us");
>>>> +      break;
>>>> +    case GC_PARTIAL_CMSS :
>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMSS=" << pause_time << " us");
>>>> +      break;
>>>> +    case GC_PARTIAL_FCSR :
>>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), FCSR=" << pause_time << " us");
>>>> +      break;
>>>> +  }
>>>> +}
>>>> +
>>>> +static unsigned int gc_con_heap_full_mostly_con( GC *gc )
>>>> +{
>>>> +   while( gc->gc_concurrent_status == GC_CON_START_MARKERS ) { // we should enumerate rootset after old rootset is traced
>>>> +      vm_thread_yield();
>>>> +   }
>>>> +
>>>> +   int64 final_start = time_now();
>>>> +   int disable_count = hythread_reset_suspend_disable();
>>>> +   gc_set_rootset_type(ROOTSET_IS_OBJ);
>>>> +   gc_prepare_rootset(gc);
>>>> +
>>>> +   gc_set_barrier_function(WB_REM_NIL); //in stw phase, so we can remove write barrier at any time
>>>> +   terminate_mostly_con_mark(); // terminate current mostly concurrent marking
>>>> +
>>>> +   //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
>>>> +   while(gc->gc_concurrent_status == GC_CON_TRACING) {
>>>> +      vm_thread_yield(); //let the unfinished marker run
>>>> +   }
>>>> +
>>>> +   //final marking phase
>>>> +   gc_clear_conclctor_role(gc);
>>>> +   wspace_mostly_con_final_mark(gc);
>>>> +
>>>> +   /*just debugging*/
>>>> +   int64 final_time = time_now() - final_start;
>>>> +   INFO2("gc.scheduler", "[MOSTLY_CON] final marking time=" << final_time << " us");
>>>> +   gc_ms_get_current_heap_usage((GC_MS *)gc);
>>>> +
>>>> +  // start STW reclaiming heap
>>>> +   gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>>>> +   gc_reset_mutator_context(gc);
>>>> +   if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>>> +   gc_ms_reclaim_heap((GC_MS*)gc);
>>>> +
>>>> +   // reset after partial stop the world collection
>>>> +   gc_reset_after_con_collection(gc);
>>>> +   set_con_nil(gc);
>>>> +
>>>> +   vm_resume_threads_after();
>>>> +   hythread_set_suspend_disable(disable_count);
>>>> +   return GC_PARTIAL_PMSS;
>>>> +
>>>> +}
>>>> +
>>>> +static unsigned int gc_con_heap_full_otf( GC *gc )
>>>> +{
>>>> +   unsigned int partial_type; //for time measuring and debugging
>>>> +   int disable_count = vm_suspend_all_threads();
>>>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +   con_collection_stat->pause_start_time = time_now();
>>>> +   switch(gc->gc_concurrent_status) {
>>>> +       case GC_CON_START_MARKERS :
>>>> +       case GC_CON_TRACING :
>>>> +       case GC_CON_TRACE_DONE :
>>>> +         partial_type = GC_PARTIAL_PMSS;
>>>> +         gc_partial_con_PMSS(gc);
>>>> +         break;
>>>> +       case GC_CON_BEFORE_SWEEP : // only when current sweep is set to false
>>>> +         partial_type = GC_PARTIAL_CMSS;
>>>> +         gc_partial_con_CMSS(gc);
>>>> +         break;
>>>> +       case GC_CON_SWEEPING :
>>>> +       case GC_CON_SWEEP_DONE :
>>>> +         partial_type = GC_PARTIAL_CMPS;
>>>> +         gc_partial_con_CMPS(gc);
>>>> +         break;
>>>> +       case GC_CON_BEFORE_FINISH : //heap can be exhausted when sweeping finishes, very rare
>>>> +         partial_type = GC_PARTIAL_FCSR;
>>>> +         gc_merge_free_list_global(gc);
>>>> +         gc_reset_after_con_collection(gc);
>>>> +         set_con_nil(gc);
>>>> +         break;
>>>> +       case GC_CON_RESET :
>>>> +       case GC_CON_NIL :
>>>> +       case GC_CON_STW_ENUM :
>>>> +         /*do nothing, if still in gc_con_reset, will wait to finish after resuming. this case happens rarely*/
>>>> +         partial_type = GC_PARTIAL_FCSR;
>>>> +         break;
>>>> +       /* other state is illegal here */
>>>> +       default:
>>>> +         INFO2("gc.con.info", "illegal state when the heap is out [" << gc->gc_concurrent_status << "]");
>>>> +         RAISE_ERROR;
>>>> +    }
>>>> +    vm_resume_all_threads(disable_count);
>>>> +    return partial_type;
>>>>  }
>>>>
>>>> -void gc_decide_con_algo(char* concurrent_algo)
>>>> -{
>>>> -  string_to_upper(concurrent_algo);
>>>> -  GC_PROP &= ~ALGO_CON_MASK;
>>>> -  if(!strcmp(concurrent_algo, "OTF_OBJ")){
>>>> -    GC_PROP |= ALGO_CON_OTF_OBJ;
>>>> -  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
>>>> -    GC_PROP |= ALGO_CON_MOSTLY;
>>>> -  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
>>>> -    GC_PROP |= ALGO_CON_OTF_REF;
>>>> +void gc_con_stat_information_out(GC *gc);
>>>> +/*
>>>> +this method is called before STW gc start, there is a big lock outside
>>>> +*/
>>>> +void gc_wait_con_finish( GC* gc ) {
>>>> +  int64 time_collection_start = time_now();
>>>> +  unsigned int partial_type; //for time measuring and debugging
>>>> +
>>>> +   /* cocurrent gc is idle */
>>>> +   if( state_transformation( gc, GC_CON_NIL, GC_CON_DISABLE ) ) { // for the race condition of con schduling and STW gc
>>>> +        Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>>> +        con_collection_stat->gc_start_time = time_now();
>>>> +        con_collection_stat->pause_start_time = con_collection_stat->gc_start_time;
>>>> +        partial_type = GC_PARTIAL_PSTW;
>>>> +        gc_partial_con_PSTW( gc );
>>>> +   } else {
>>>> +      while(gc->gc_concurrent_status == GC_CON_STW_ENUM ) { //wait concurrent gc finish enumeration
>>>> +          hythread_safe_point();
>>>> +          vm_thread_yield();
>>>> +       }
>>>> +       if( gc_is_kind(ALGO_CON_MOSTLY) )
>>>> +         partial_type = gc_con_heap_full_mostly_con(gc);
>>>> +       else if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>>>> +         partial_type = gc_con_heap_full_otf(gc);
>>>> +         if(gc->gc_concurrent_status == GC_CON_RESET) {
>>>> +            while( gc->gc_concurrent_status == GC_CON_RESET ) { //wait concurrent to finish
>>>> +              hythread_safe_point();
>>>> +              vm_thread_yield();
>>>> +            }
>>>> +         }
>>>> +       }
>>>> +       else
>>>> +         RAISE_ERROR;
>>>> +   }
>>>> +
>>>> +  int64 pause_time = time_now()-time_collection_start;
>>>> +  gc_con_stat_information_out(gc);
>>>> +  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) {
>>>> +    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<(unsigned int)(pause_time)<<"  us ");
>>>> +  } else {
>>>> +    partial_stop_the_world_info( partial_type, (unsigned int)pause_time );
>>>>   }
>>>>  }
>>>> +
>>>> +
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h Tue Oct 28 20:01:01 2008
>>>> @@ -19,21 +19,69 @@
>>>>  #define _GC_CONCURRENT_H_
>>>>  #include "gc_common.h"
>>>>
>>>> -enum GC_CONCURRENT_STATUS{
>>>> -  GC_CON_STATUS_NIL = 0x00,
>>>> -  GC_CON_MARK_PHASE = 0x01,
>>>> -  GC_MOSTLY_CON_FINAL_MARK_PHASE = 0x11, // for mostly concurrent only.
>>>> -  GC_CON_SWEEP_PHASE = 0x02
>>>> +
>>>> +#define RATE_CALCULATE_DENOMINATOR_FACTOR 10; //trans us to ms
>>>> +inline unsigned int trans_time_unit(int64 x)
>>>> +{
>>>> +  int64 result = x>>10;
>>>> +  if(result) return (unsigned int)result;
>>>> +  return 1;
>>>> +}
>>>> +
>>>> +#define RAISE_ERROR  assert(0);
>>>> +/* concurrent collection states in new design */
>>>> +enum GC_CONCURRENT_STATUS {
>>>> +  GC_CON_NIL = 0x00,
>>>> +  GC_CON_STW_ENUM = 0x01,
>>>> +  GC_CON_START_MARKERS = 0x02,
>>>> +  GC_CON_TRACING = 0x03,
>>>> +  GC_CON_TRACE_DONE = 0x04,
>>>> +  GC_CON_BEFORE_SWEEP = 0x05,
>>>> +  GC_CON_SWEEPING = 0x06,
>>>> +  GC_CON_SWEEP_DONE = 0x07,
>>>> +  GC_CON_BEFORE_FINISH = 0x08,
>>>> +  GC_CON_RESET = 0x09,
>>>> +  GC_CON_DISABLE = 0x0A,
>>>> +};
>>>> +
>>>> +// this type is just for debugging and time measuring
>>>> +enum GC_PARTIAL_STW_TYPE {
>>>> +  GC_PARTIAL_PSTW = 0x00,  //pure stop the world
>>>> +  GC_PARTIAL_PMSS = 0x01,  //concurrent marking has finished and stop the world sweeping
>>>> +  GC_PARTIAL_CMSS = 0x02,  // partial concurrent marking and stop the world sweeping
>>>> +  GC_PARTIAL_CMPS = 0x03,  //concurrent marking and sweeping
>>>> +  GC_PARTIAL_FCSR = 0x04, //fully concurrent marking and sweeping, but stw finish reset
>>>>  };
>>>>
>>>>  enum HANDSHAKE_SINGAL{
>>>>   HSIG_MUTATOR_SAFE = 0x0,
>>>> -
>>>>   HSIG_DISABLE_SWEEP_LOCAL_CHUNKS  = 0x01,
>>>>   HSIG_DISABLE_SWEEP_GLOBAL_CHUNKS = 0x02,
>>>>   HSIG_MUTATOR_ENTER_ALLOC_MARK    = 0x03,
>>>>  };
>>>>
>>>> +typedef struct Con_Collection_Statistics {
>>>> +    POINTER_SIZE_INT live_size_marked;     //marked objects size
>>>> +    POINTER_SIZE_INT alloc_size_before_alloc_live;  //alloc objects size before marking
>>>> +    POINTER_SIZE_INT live_alloc_size;
>>>> +    POINTER_SIZE_INT surviving_size_at_gc_end; //total live object size when gc is ended
>>>> +
>>>> +    POINTER_SIZE_INT trace_rate;  //bytes per ms
>>>> +    POINTER_SIZE_INT alloc_rate;       //bytes per ms
>>>> +
>>>> +    float heap_utilization_rate;
>>>> +
>>>> +    int64 gc_start_time;
>>>> +    int64 gc_end_time;
>>>> +
>>>> +    int64 marking_start_time;
>>>> +    int64 marking_end_time;
>>>> +
>>>> +    int64 sweeping_time;
>>>> +    int64 pause_start_time;
>>>> +
>>>> +} Con_Space_Statistics;
>>>> +
>>>>  inline void gc_set_con_gc(unsigned int con_phase)
>>>>  { GC_PROP |= con_phase;  }
>>>>
>>>> @@ -58,107 +106,101 @@
>>>>  inline Boolean gc_is_specify_con_sweep()
>>>>  { return (GC_PROP & ALGO_CON_SWEEP) == ALGO_CON_SWEEP; }
>>>>
>>>> -extern volatile Boolean concurrent_in_marking;
>>>> -extern volatile Boolean concurrent_in_sweeping;
>>>> -extern volatile Boolean mark_is_concurrent;
>>>> -extern volatile Boolean sweep_is_concurrent;
>>>>
>>>> -inline Boolean gc_mark_is_concurrent()
>>>> -{
>>>> -  return mark_is_concurrent;
>>>> -}
>>>> +extern volatile Boolean obj_alloced_live;
>>>>
>>>> -inline void gc_mark_set_concurrent()
>>>> -{
>>>> -  if(gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF))
>>>> -    gc_enable_alloc_obj_live();
>>>> -  mark_is_concurrent = TRUE;
>>>> -}
>>>> +inline Boolean is_obj_alloced_live()
>>>> +{ return obj_alloced_live;  }
>>>>
>>>> -inline void gc_mark_unset_concurrent()
>>>> -{
>>>> -  gc_disable_alloc_obj_live();
>>>> -  mark_is_concurrent = FALSE;
>>>> +inline void gc_disable_alloc_obj_live(GC *gc)
>>>> +{
>>>> +  obj_alloced_live = FALSE;
>>>>  }
>>>>
>>>> -inline Boolean gc_con_is_in_marking()
>>>> +void gc_enable_alloc_obj_live(GC * gc);
>>>> +
>>>> +/*
>>>> +    tranform the states across the collection process,
>>>> +  which should be a atomic operation because there are several collector run parallel
>>>> +*/
>>>> +inline Boolean state_transformation( GC* gc, unsigned int from_state, unsigned int to_state )
>>>>  {
>>>> -  return concurrent_in_marking;
>>>> +  unsigned int old_state = apr_atomic_cas32( &gc->gc_concurrent_status, to_state, from_state );
>>>> +  if( old_state != from_state )
>>>> +    return FALSE;
>>>> +  else
>>>> +    return TRUE;
>>>>  }
>>>>
>>>> -inline Boolean gc_con_is_in_marking(GC* gc)
>>>> -{
>>>> -  return gc->gc_concurrent_status == GC_CON_MARK_PHASE;
>>>> +/* set concurrent to idle,
>>>> +    Or enable concurrent gc, called when STW gc finishes
>>>> + */
>>>> +inline void set_con_nil( GC *gc ) {
>>>> +  apr_atomic_set32( &gc->gc_concurrent_status, GC_CON_NIL );
>>>>  }
>>>>
>>>> -inline Boolean gc_sweep_is_concurrent()
>>>> -{
>>>> -  return sweep_is_concurrent;
>>>> +
>>>> +/* gc start enumeration phase, now, it is in a stop-the-world manner */
>>>> +void gc_start_con_enumeration(GC * gc);
>>>> +
>>>> +/* gc start marking phase */
>>>> +void gc_start_con_marking(GC *gc);
>>>> +
>>>> +
>>>> +/* prepare for sweeping */
>>>> +void gc_prepare_sweeping(GC *gc);
>>>> +
>>>> +/* gc start sweeping phase */
>>>> +void gc_start_con_sweeping(GC *gc);
>>>> +
>>>> +/* gc finish concurrent collection */
>>>> +void gc_con_final_work(GC* gc);
>>>> +
>>>> +
>>>> +/* gc wait cocurrent collection finishes */
>>>> +void gc_wait_con_finish( GC* gc );
>>>> +
>>>> +/* is in gc marking phase */
>>>> +inline Boolean in_con_marking_phase( GC *gc ) {
>>>> +  unsigned int status = gc->gc_concurrent_status;
>>>> +  return (status == GC_CON_TRACING) || (status == GC_CON_TRACE_DONE);
>>>>  }
>>>>
>>>> -inline void gc_sweep_set_concurrent()
>>>> -{
>>>> -  sweep_is_concurrent = TRUE;
>>>> +/* is in gc sweeping phase */
>>>> +inline Boolean in_con_sweeping_phase( GC *gc ) {
>>>> +  unsigned int status = gc->gc_concurrent_status;
>>>> +  return (status == GC_CON_SWEEPING) || (status == GC_CON_SWEEP_DONE);
>>>>  }
>>>>
>>>> -inline void gc_sweep_unset_concurrent()
>>>> -{
>>>> -  sweep_is_concurrent = FALSE;
>>>> +inline Boolean in_con_idle( GC *gc ) {
>>>> +  return gc->gc_concurrent_status == GC_CON_NIL;
>>>>  }
>>>>
>>>> -inline Boolean gc_con_is_in_sweeping()
>>>> -{
>>>> -  return concurrent_in_sweeping;
>>>> +inline Boolean gc_con_is_in_STW( GC *gc ) {
>>>> +  return gc->gc_concurrent_status == GC_CON_DISABLE;
>>>>  }
>>>>
>>>> -inline Boolean gc_con_is_in_sweeping(GC* gc)
>>>> -{
>>>> -  return gc->gc_concurrent_status == GC_CON_SWEEP_PHASE;
>>>> +/* is gc ready to sweeping */
>>>> +inline Boolean in_con_ready_sweep( GC *gc ) {
>>>> +  return gc->gc_concurrent_status == GC_CON_BEFORE_SWEEP;
>>>>  }
>>>>
>>>> -inline void gc_set_concurrent_status(GC*gc, unsigned int status)
>>>> -{
>>>> -  /*Reset status*/
>>>> -  concurrent_in_marking = FALSE;
>>>> -  concurrent_in_sweeping = FALSE;
>>>> -
>>>> -  gc->gc_concurrent_status = status;
>>>> -  switch(status){
>>>> -    case GC_CON_MARK_PHASE:
>>>> -      gc_mark_set_concurrent();
>>>> -      concurrent_in_marking = TRUE;
>>>> -      break;
>>>> -    case GC_CON_SWEEP_PHASE:
>>>> -      gc_sweep_set_concurrent();
>>>> -      concurrent_in_sweeping = TRUE;
>>>> -      break;
>>>> -    default:
>>>> -      assert(!concurrent_in_marking && !concurrent_in_sweeping);
>>>> -  }
>>>> +/* is gc sweeping */
>>>> +inline Boolean in_con_sweep( GC *gc ) {
>>>> +  return ( gc->gc_concurrent_status == GC_CON_SWEEPING || gc->gc_concurrent_status == GC_CON_SWEEP_DONE );
>>>>
>>>> -  return;
>>>>  }
>>>>
>>>> -void gc_reset_con_mark(GC* gc);
>>>> -void gc_start_con_mark(GC* gc);
>>>> -void gc_finish_con_mark(GC* gc, Boolean need_STW);
>>>> -int64 gc_get_con_mark_time(GC* gc);
>>>> -
>>>> -void gc_start_con_sweep(GC* gc);
>>>> -void gc_finish_con_sweep(GC * gc);
>>>> +void gc_con_update_stat_after_marking( GC *gc );
>>>>
>>>> -void gc_reset_after_con_collect(GC* gc);
>>>> -void gc_try_finish_con_phase(GC * gc);
>>>>
>>>>  void gc_decide_con_algo(char* concurrent_algo);
>>>>  void gc_set_default_con_algo();
>>>>
>>>> -void gc_reset_con_sweep(GC* gc);
>>>> -
>>>> -void gc_finish_con_GC(GC* gc, int64 time_mutator);
>>>>
>>>>  extern volatile Boolean gc_sweep_global_normal_chunk;
>>>>
>>>> +
>>>>  inline Boolean gc_is_sweep_global_normal_chunk()
>>>>  { return gc_sweep_global_normal_chunk; }
>>>>
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp Tue Oct 28 20:01:01 2008
>>>> @@ -18,13 +18,17 @@
>>>>  /**
>>>>  * @author Xiao-Feng Li, 2006/10/05
>>>>  */
>>>> -
>>>> +
>>>> +#include <open/vm_class_info.h>
>>>> +#include <open/vm_class_manipulation.h>
>>>>  #include "../gen/gen.h"
>>>>  #include "../thread/mutator.h"
>>>>  #include "gc_for_barrier.h"
>>>>  #include "../mark_sweep/wspace_mark_sweep.h"
>>>>  #include "../common/gc_concurrent.h"
>>>> +#include "../common/gc_common.h"
>>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>> +#include "../verify/verify_live_heap.h"
>>>>
>>>>
>>>>  /* All the write barrier interfaces need cleanup */
>>>> @@ -117,10 +121,8 @@
>>>>     Mutator *mutator = (Mutator *)gc_get_tls();
>>>>
>>>>     //FIXME: Release lock.
>>>> -    lock(mutator->dirty_set_lock);
>>>>     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>>> -    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
>>>> -    unlock(mutator->dirty_set_lock);
>>>> +    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
>>>>   }
>>>>  }
>>>>
>>>> @@ -204,7 +206,8 @@
>>>>           mutator_dirtyset_add_entry(mutator, obj_to_snapshot);
>>>>       }
>>>>     }
>>>> -    obj_mark_black_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>>> +    obj_mark_gray_in_table((Partial_Reveal_Object *) p_obj_holding_ref);  // now, the black-only obj (no gray bit been set) will also be scaned by marker, here mark it to gray to prevent this, just a workaround
>>>> +    obj_mark_black_in_table((Partial_Reveal_Object *) p_obj_holding_ref, mutator);
>>>>     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>>>   }
>>>>  }
>>>> @@ -215,32 +218,141 @@
>>>>   REF* p_obj_slot = (REF*) p_slot ;
>>>>   Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)read_slot(p_obj_slot);
>>>>   if(p_obj && obj_need_remember_oldvar(p_obj)){
>>>> +    mutator->dirty_obj_num++;
>>>>     mutator_dirtyset_add_entry(mutator, p_obj);
>>>>   }
>>>>  }
>>>>
>>>> +/*
>>>> +static void write_barrier_for_check(Managed_Object_Handle p_obj_holding_ref,Managed_Object_Handle *p_slot, Managed_Object_Handle p_target)
>>>> +{
>>>> +  //Mutator *mutator = (Mutator *)gc_get_tls();
>>>> +
>>>> +  Partial_Reveal_Object* src_obj = (Partial_Reveal_Object*)p_obj_holding_ref;
>>>> +  Partial_Reveal_Object* sub_obj = (Partial_Reveal_Object*)read_slot((REF*) p_slot);
>>>> +  Partial_Reveal_Object* target_obj = (Partial_Reveal_Object*)p_target;
>>>> +
>>>> +  if(src_obj && (!obj_is_mark_black_in_table(src_obj))){
>>>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Src]");
>>>> +     analyze_bad_obj(src_obj);
>>>> +     RAISE_ERROR;
>>>> +  }
>>>> +
>>>> +  if(sub_obj && (!obj_is_mark_black_in_table(sub_obj))){
>>>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Sub]");
>>>> +     analyze_bad_obj(sub_obj);
>>>> +     INFO2("gc.verifier", "[source object]");
>>>> +     analyze_bad_obj(src_obj);
>>>> +     //RAISE_ERROR;
>>>> +     return;
>>>> +  }
>>>> +
>>>> +  if(target_obj && (!obj_is_mark_black_in_table(target_obj))){
>>>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Target]");
>>>> +     analyze_bad_obj(target_obj);
>>>> +     RAISE_ERROR;
>>>> +  }
>>>> +
>>>> +  *p_slot = p_target;
>>>> +}
>>>> +*/
>>>>  //===========================================
>>>>
>>>>  /* The following routines were supposed to be the only way to alter any value in gc heap. */
>>>>  void gc_heap_write_ref (Managed_Object_Handle p_obj_holding_ref, unsigned offset, Managed_Object_Handle p_target)
>>>>  {  assert(0); }
>>>>
>>>> -void gc_heap_wrote_object (Managed_Object_Handle p_obj_written)
>>>> +
>>>> +Boolean gc_heap_copy_object_array(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length)
>>>> +{
>>>> +
>>>> +
>>>> +    GC_VTable_Info *src_gcvt = obj_get_gcvt((Partial_Reveal_Object*)src_array);
>>>> +    GC_VTable_Info *dst_gcvt = obj_get_gcvt((Partial_Reveal_Object*)dst_array);
>>>> +
>>>> +    Class_Handle src_class = src_gcvt->gc_clss;
>>>> +    Class_Handle dst_class = dst_gcvt->gc_clss;
>>>> +
>>>> +
>>>> +       //element size of src should be same as element size of dst
>>>> +       assert(src_gcvt->array_elem_size == dst_gcvt->array_elem_size);
>>>> +       unsigned int elem_size = src_gcvt->array_elem_size;
>>>> +       unsigned int src_first_elem_offset = array_first_element_offset((Partial_Reveal_Array*)src_array);
>>>> +       unsigned int dst_first_elem_offset = array_first_element_offset((Partial_Reveal_Array*)dst_array);
>>>> +       /*
>>>> +       #ifdef COMPRESS_REFERENCE
>>>> +          COMPRESSED_REFERENCE *src_copy_body = (COMPRESSED_REFERENCE *)((POINTER_SIZE_INT)src_array + src_first_elem_offset + elem_size*src_start);
>>>> +          COMPRESSED_REFERENCE *dst_copy_body = (COMPRESSED_REFERENCE *)((POINTER_SIZE_INT)dst_array + dst_first_elem_offset + elem_size*dst_start);
>>>> +       #else
>>>> +       #endif
>>>> +       */
>>>> +          REF* src_copy_body = (REF*)((POINTER_SIZE_INT)src_array + src_first_elem_offset + elem_size*src_start);
>>>> +          REF* dst_copy_body = (REF*)((POINTER_SIZE_INT)dst_array + dst_first_elem_offset + elem_size*dst_start);
>>>> +
>>>> +
>>>> +       if(class_is_instanceof(src_class, dst_class)) {
>>>> +         //rem obj before is for OTF GC barriers
>>>> +         if(WB_REM_OLD_VAR == write_barrier_function) {
>>>> +            for (unsigned int count = 0; count < length; count++) {
>>>> +               write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>>>> +            }
>>>> +         } else if(WB_REM_OBJ_SNAPSHOT == write_barrier_function) {
>>>> +            write_barrier_rem_obj_snapshot(dst_array);
>>>> +         }
>>>> +
>>>> +         memmove(dst_copy_body, src_copy_body, length * elem_size);
>>>> +
>>>> +       } else { //for the condition src is not the type of dst
>>>> +          Class_Handle dst_elem_clss = class_get_array_element_class(dst_class);
>>>> +          if(WB_REM_OBJ_SNAPSHOT == write_barrier_function) {
>>>> +            write_barrier_rem_obj_snapshot(dst_array);
>>>> +          }
>>>> +
>>>> +          for (unsigned int count = 0; count < length; count++) {
>>>> +             // 1, null elements copy direct
>>>> +             if (src_copy_body[count] == NULL) {
>>>> +                  if(WB_REM_OLD_VAR == write_barrier_function) {
>>>> +                      write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>>>> +                 }
>>>> +                  dst_copy_body[count] = NULL;
>>>> +                  continue;
>>>> +               }
>>>> +
>>>> +             // 2, For non-null elements check if types are compatible.
>>>> +/*
>>>> +#ifdef COMPRESS_REFERENCE
>>>> +             ManagedObject *src_elem = (ManagedObject *)uncompress_compressed_reference(src_elem_offset);
>>>> +             Class_Handle src_elem_clss = src_elem->vt()->clss;
>>>> +#else
>>>> +#endif
>>>> +*/
>>>> +             Class_Handle src_elem_clss = obj_get_gcvt(ref_to_obj_ptr(src_copy_body[count]))->gc_clss;
>>>> +
>>>> +             if (!class_is_instanceof(src_elem_clss, dst_elem_clss)) {
>>>> +                  if(WB_REM_SOURCE_OBJ == write_barrier_function) {
>>>> +                      write_barrier_rem_source_obj(dst_array);
>>>> +                  }
>>>> +                  return FALSE;
>>>> +             }
>>>> +
>>>> +             if(WB_REM_OLD_VAR == write_barrier_function) {
>>>> +                 write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>>>> +             }
>>>> +              dst_copy_body[count] = src_copy_body[count];
>>>> +        }
>>>> +      }
>>>> +
>>>> +    //rem obj after is for mostly concurrent
>>>> +    if(WB_REM_SOURCE_OBJ == write_barrier_function) {
>>>> +        write_barrier_rem_source_obj(dst_array);
>>>> +    }
>>>> +
>>>> +    return TRUE;
>>>> +}
>>>> +
>>>> +
>>>> +void gc_heap_wrote_object (Managed_Object_Handle p_obj_written )
>>>>  {
>>>> -  /*Concurrent Mark: Since object clone and array copy do not modify object slots,
>>>> -      we treat it as an new object. It has already been marked when dest object was created.
>>>> -      We use WB_REM_SOURCE_OBJ function here to debug.
>>>> -    */
>>>> -
>>>> -  if(WB_REM_SOURCE_OBJ == write_barrier_function){
>>>> -    Mutator *mutator = (Mutator *)gc_get_tls();
>>>> -    lock(mutator->dirty_set_lock);
>>>> -
>>>> -    obj_dirty_in_table((Partial_Reveal_Object *) p_obj_written);
>>>> -    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_written);
>>>> -
>>>> -    unlock(mutator->dirty_set_lock);
>>>> -  }
>>>>
>>>>   if( !gc_is_gen_mode() || !object_has_ref_field((Partial_Reveal_Object*)p_obj_written))
>>>>     return;
>>>> @@ -283,6 +395,13 @@
>>>>       write_barrier_rem_slot_oldvar(p_slot);
>>>>       *p_slot = p_target;
>>>>       break;
>>>> +    //just debugging
>>>> +    /*
>>>> +    case WB_CON_DEBUG:
>>>> +       write_barrier_for_check(p_obj_holding_ref, p_slot, p_target);
>>>> +       //*p_slot = p_target;
>>>> +       break;
>>>> +    */
>>>>     default:
>>>>       assert(0);
>>>>       return;
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h Tue Oct 28 20:01:01 2008
>>>> @@ -32,7 +32,8 @@
>>>>   WB_REM_SOURCE_REF    = 0x02,
>>>>   WB_REM_OLD_VAR       = 0x03,
>>>>   WB_REM_NEW_VAR       = 0x04,
>>>> -  WB_REM_OBJ_SNAPSHOT  = 0x05
>>>> +  WB_REM_OBJ_SNAPSHOT  = 0x05,
>>>> +  WB_CON_DEBUG = 0x06
>>>>  };
>>>>
>>>>  inline void gc_set_barrier_function(unsigned int wb_function)
>>>> @@ -43,4 +44,3 @@
>>>>  #endif /* _GC_FOR_BARRIER_H_ */
>>>>
>>>>
>>>> -
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp Tue Oct 28 20:01:01 2008
>>>> @@ -203,4 +203,3 @@
>>>>
>>>>
>>>>
>>>> -
>>>>
>>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
>>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>>> ==============================================================================
>>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp (original)
>>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp Tue Oct 28 20:01:01 2008
>>>> @@ -30,7 +30,7 @@
>>>>  #include "../mark_sweep/gc_ms.h"
>>>>  #include "../move_compact/gc_mc.h"
>>>>  #include "interior_pointer.h"
>>>> -#include "../thread/marker.h"
>>>> +#include "../thread/conclctor.h"
>>>>  #include "../thread/collector.h"
>>>>  #include "../verify/verify_live_heap.h"
>>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>> @@ -115,7 +115,10 @@
>>>>   collection_scheduler_initialize(gc);
>>>>
>>>>   if(gc_is_specify_con_gc()){
>>>> -    marker_initialize(gc);
>>>> +     gc->gc_concurrent_status = GC_CON_NIL;
>>>> +    conclctor_initialize(gc);
>>>> +  } else {
>>>> +     gc->gc_concurrent_status = GC_CON_DISABLE;
>>>>   }
>>>>
>>>>   collector_initialize(gc);
>>>> @@ -134,6 +137,9 @@
>>>>  {
>>>>   INFO2("gc.process", "GC: call GC wrapup ....");
>>>>   GC* gc =  p_global_gc;
>>>> +  // destruct threads first, and then destruct data structures
>>>> +  conclctor_destruct(gc);
>>>> +  collector_destruct(gc);
>>>>
>>>>  #if defined(USE_UNIQUE_MARK_SWEEP_GC)
>>>>  gc_ms_destruct((GC_MS*)gc);
>>>> @@ -148,8 +154,6 @@
>>>>  #ifndef BUILD_IN_REFERENT
>>>>   gc_finref_metadata_destruct(gc);
>>>>  #endif
>>>> -  collector_destruct(gc);
>>>> -  marker_destruct(gc);
>>>>
>>>>   if( verify_live_heap ){
>>>>     gc_terminate_heap_verification(gc);
>>>> @@ -446,4 +450,3 @@
>>>>
>>>>
>>>>
>>>> -
>>>>
>>>>
>>>>
>>>
>>>
>>>
>>> --
>>> Unless stated otherwise above:
>>> IBM United Kingdom Limited - Registered in England and Wales with number 741598.
>>> Registered office: PO Box 41, North Harbour, Portsmouth, Hampshire PO6 3AU
>>>
>>
>>
>>
>> --
>> http://xiao-feng.blogspot.com
>>
>
>
>
> --
> Unless stated otherwise above:
> IBM United Kingdom Limited - Registered in England and Wales with number 741598.
> Registered office: PO Box 41, North Harbour, Portsmouth, Hampshire PO6 3AU
>



-- 
http://xiao-feng.blogspot.com

Re: svn commit: r708756 [1/3] - in /harmony/enhanced/drlvm/trunk/vm: gc_gen/build/ gc_gen/src/common/ gc_gen/src/finalizer_weakref/ gc_gen/src/gen/ gc_gen/src/los/ gc_gen/src/mark_sweep/ gc_gen/src/thread/ gc_gen/src/trace_forward/ gc_gen/src/verify/

Posted by Sian January <si...@googlemail.com>.
Thanks for your quick reply Xiao-Feng.

I haven't studied the code that much, so if it's all disabled by
default as you say then I think it's ok to leave it in.

In future I do think it would be better practice to discuss it on the
dev list and get some agreement before committing something this size
during feature freeze week.  This is because with some large changes
there can be unforeseen effects that can impact the code in ways that
the original author hadn't realised.  Also if we had several major
changes and then saw regressions it could be difficult to work out
what had caused them and it could badly delay the release.

Does anyone else have a different opinion on either rolling back the
code or on general practice during feature freeze?

Thanks,

Sian


2008/10/29 Xiao-Feng Li <xi...@gmail.com>:
> Sian, thanks for your notice.
>
> This patch is indeed big. Most of the code are guarded by a macro
> USE_UNIQUE_MARK_SWEEP_GC and has no impact on the existing code base.
> It is disabled by default, and I tested it before I committed it.
>
> Actually it is not a new feature, but a fix of existing concurrent GC
> scheduler. Well, I admit it looks like a new feature since it changes
> lot of code...
>
> If it has any impact on stability, I will roll back it immediately.
> Thanks for your patience.
>
> Thanks,
> xiaofeng
>
> On Wed, Oct 29, 2008 at 5:22 PM, Sian January
> <si...@googlemail.com> wrote:
>> Hi Xiao-Feng,
>>
>> This commit looks like quite a large new feature to me.  Since we're
>> in feature freeze this week for M8 I really think it should be backed
>> out until after the milestone, as we should be focussing on testing
>> and stability at the moment.
>>
>> Thanks,
>>
>> Sian
>>
>>
>> 2008/10/29  <xl...@apache.org>:
>>> Author: xli
>>> Date: Tue Oct 28 20:01:01 2008
>>> New Revision: 708756
>>>
>>> URL: http://svn.apache.org/viewvc?rev=708756&view=rev
>>> Log:
>>> HARMONY-5989 : Concurrent GC (Tick) enhancement in scheduling
>>>
>>> Added:
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.cpp   (with props)
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.h   (with props)
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_concurrent_mark.cpp   (with props)
>>> Removed:
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h
>>> Modified:
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.h
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/include/open/gc.h
>>>    harmony/enhanced/drlvm/trunk/vm/vmcore/build/vmcore.exp
>>>    harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp
>>>    harmony/enhanced/drlvm/trunk/vm/vmcore/src/object/vm_arrays.cpp
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp Tue Oct 28 20:01:01 2008
>>> @@ -34,6 +34,7 @@
>>>     gc_heap_write_global_slot;
>>>     gc_heap_write_ref;
>>>     gc_heap_wrote_object;
>>> +    gc_heap_copy_object_array;
>>>     gc_init;
>>>     gc_is_object_pinned;
>>>     gc_iterate_heap;
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp Tue Oct 28 20:01:01 2008
>>> @@ -31,24 +31,16 @@
>>>   return;
>>>  }
>>>
>>> -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
>>> -{
>>> -  if(gc_is_specify_con_gc()){
>>> -    gc_update_con_collection_scheduler(gc, time_mutator, time_collection);
>>> -  }
>>> -  return;
>>> -}
>>>
>>>  Boolean gc_sched_collection(GC* gc, unsigned int gc_cause)
>>>  {
>>>   /*collection scheduler only schedules concurrent collection now.*/
>>>   if(GC_CAUSE_CONCURRENT_GC == gc_cause){
>>>     assert(gc_is_specify_con_gc());
>>> -    return gc_sched_con_collection(gc, gc_cause);
>>> +    return gc_con_perform_collection( gc );
>>>   }else{
>>>     return FALSE;
>>>   }
>>>  }
>>>
>>>
>>> -
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h Tue Oct 28 20:01:01 2008
>>> @@ -26,12 +26,8 @@
>>>  void collection_scheduler_initialize(GC* gc);
>>>  void collection_scheduler_destruct(GC* gc);
>>>
>>> -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
>>>  Boolean gc_sched_collection(GC* gc, unsigned int gc_cause);
>>>
>>>  #endif
>>>
>>>
>>> -
>>> -
>>> -
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp Tue Oct 28 20:01:01 2008
>>> @@ -22,7 +22,7 @@
>>>  #include "collection_scheduler.h"
>>>  #include "concurrent_collection_scheduler.h"
>>>  #include "gc_concurrent.h"
>>> -#include "../thread/marker.h"
>>> +#include "../thread/conclctor.h"
>>>  #include "../verify/verify_live_heap.h"
>>>
>>>  #define NUM_TRIAL_COLLECTION 2
>>> @@ -53,6 +53,7 @@
>>>  Boolean gc_use_space_scheduler()
>>>  { return cc_scheduler_kind & SPACE_BASED_SCHEDULER; }
>>>
>>> +
>>>  static int64 time_delay_to_start_mark = MAX_DELAY_TIME;
>>>  static POINTER_SIZE_INT space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
>>>
>>> @@ -75,6 +76,7 @@
>>>   STD_FREE(gc->collection_scheduler);
>>>  }
>>>
>>> +
>>>  void gc_decide_cc_scheduler_kind(char* cc_scheduler)
>>>  {
>>>   string_to_upper(cc_scheduler);
>>> @@ -93,281 +95,248 @@
>>>   gc_enable_time_scheduler();
>>>  }
>>>
>>> -static Boolean time_to_start_mark(GC* gc)
>>> -{
>>> -  if(!gc_use_time_scheduler()) return FALSE;
>>> -
>>> -  int64 time_current = time_now();
>>> -  return (time_current - get_collection_end_time()) > time_delay_to_start_mark;
>>> -}
>>> -
>>> -static Boolean space_to_start_mark(GC* gc)
>>> -{
>>> -  if(!gc_use_space_scheduler()) return FALSE;
>>> +/*====================== new scheduler ===================*/
>>> +extern unsigned int NUM_CON_MARKERS;
>>> +extern unsigned int NUM_CON_SWEEPERS;
>>> +unsigned int gc_get_mutator_number(GC *gc);
>>> +
>>> +#define MOSTLY_CON_MARKER_DIVISION 0.5
>>> +unsigned int mostly_con_final_marker_num=1;
>>> +unsigned int mostly_con_long_marker_num=1;
>>> +
>>> +unsigned int gc_get_marker_number(GC* gc) {
>>> +  unsigned int mutator_num = gc_get_mutator_number(gc);
>>> +  unsigned int marker_specified = NUM_CON_MARKERS;
>>> +  if(marker_specified == 0) {
>>> +    if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>>> +       marker_specified = min(gc->num_conclctors, mutator_num>>1);
>>> +       INFO2("gc.con.scheduler", "[Marker Num] mutator num="<<mutator_num<<", assign marker num="<<marker_specified);
>>> +    } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>>> +       marker_specified = min(gc->num_conclctors, mutator_num>>1);
>>> +       mostly_con_final_marker_num = max(marker_specified, mostly_con_final_marker_num); // in the STW phase, so all the conclctor can be used
>>> +       mostly_con_long_marker_num = (unsigned int)(marker_specified*MOSTLY_CON_MARKER_DIVISION);
>>> +       //INFO2("gc.con.scheduler", "[Marker Num] common marker="<<marker_specified<<", final marker="<<mostly_con_final_marker_num);
>>> +    }
>>> +  }
>>>
>>> -  POINTER_SIZE_INT size_new_obj = gc_get_new_object_size(gc,FALSE);
>>> -  return (size_new_obj > space_threshold_to_start_mark);
>>> +  assert(marker_specified);
>>> +  return marker_specified;
>>>  }
>>>
>>> -static Boolean gc_need_start_con_mark(GC* gc)
>>> -{
>>> -  if(!gc_is_specify_con_mark() || gc_mark_is_concurrent()) return FALSE;
>>> -
>>> -  if(time_to_start_mark(gc) || space_to_start_mark(gc))
>>> -    return TRUE;
>>> -  else
>>> -    return FALSE;
>>> +#define CON_SWEEPER_DIVISION 0.8
>>> +unsigned int gc_get_sweeper_numer(GC *gc) {
>>> +  unsigned int sweeper_specified = NUM_CON_SWEEPERS;
>>> +  if(sweeper_specified == 0)
>>> +    sweeper_specified = (unsigned int)(gc->num_conclctors*CON_SWEEPER_DIVISION);
>>> +  //INFO2("gc.con.scheduler", "[Sweeper Num] assign sweeper num="<<sweeper_specified);
>>> +  assert(sweeper_specified);
>>> +  return sweeper_specified;
>>>  }
>>>
>>> -static Boolean gc_need_start_con_sweep(GC* gc)
>>> -{
>>> -  if(!gc_is_specify_con_sweep() || gc_sweep_is_concurrent()) return FALSE;
>>>
>>> -  /*if mark is concurrent and STW GC has not started, we should start concurrent sweep*/
>>> -  if(gc_mark_is_concurrent() && !gc_con_is_in_marking(gc))
>>> -    return TRUE;
>>> -  else
>>> -    return FALSE;
>>> -}
>>>
>>> -static Boolean gc_need_reset_after_con_collect(GC* gc)
>>> -{
>>> -  if(gc_sweep_is_concurrent() && !gc_con_is_in_sweeping(gc))
>>> -    return TRUE;
>>> -  else
>>> -    return FALSE;
>>> -}
>>>
>>> -static Boolean gc_need_start_con_enum(GC* gc)
>>> -{
>>> -  /*TODO: support on-the-fly root set enumeration.*/
>>> -  return FALSE;
>>> -}
>>> +#define DEFAULT_CONSERCATIVE_FACTOR (1.0f)
>>> +#define CONSERCATIVE_FACTOR_FULLY_CONCURRENT (0.95f)
>>> +static float conservative_factor = DEFAULT_CONSERCATIVE_FACTOR;
>>>
>>> -#define SPACE_UTIL_RATIO_CORRETION 0.2f
>>> -#define TIME_CORRECTION_OTF_MARK 0.65f
>>> -#define TIME_CORRECTION_OTF_MARK_SWEEP 1.0f
>>> -#define TIME_CORRECTION_MOSTLY_MARK 0.5f
>>> -
>>> -static void con_collection_scheduler_update_stat(GC* gc, int64 time_mutator, int64 time_collection)
>>> -{
>>> -  Space* space = NULL;
>>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>>> -
>>> -#ifdef USE_UNIQUE_MARK_SWEEP_GC
>>> -  space = (Space*) gc_get_wspace(gc);
>>> -#endif
>>> -  if(!space) return;
>>> +/* for checking heap effcient*/
>>> +#define SMALL_DELTA 1000 //minimal check frequency is about delta us
>>> +#define SPACE_CHECK_STAGE_TWO_TIME (SMALL_DELTA<<6)
>>> +#define SPACE_CHECK_STAGE_ONE_TIME (SMALL_DELTA<<12)
>>>
>>> -  Space_Statistics* space_stat = space->space_statistic;
>>> -
>>> -  unsigned int slot_index = cc_scheduler->last_window_index;
>>> -  unsigned int num_slot   = cc_scheduler->num_window_slots;
>>> -
>>> -  cc_scheduler->trace_load_window[slot_index] = space_stat->num_live_obj;
>>> -  cc_scheduler->alloc_load_window[slot_index] = space_stat->size_new_obj;
>>> -  cc_scheduler->space_utilization_ratio[slot_index] = space_stat->space_utilization_ratio;
>>> +#define DEFAULT_ALLOC_RATE (1<<19) //500k/ms
>>> +#define DEFAULT_MARKING_TIME (1<<9) //512 ms
>>>
>>> -  cc_scheduler->last_mutator_time = time_mutator;
>>> -  cc_scheduler->last_collector_time = time_collection;
>>> -
>>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
>>> -    return;
>>> -
>>> -  cc_scheduler->alloc_rate_window[slot_index]
>>> -    = time_mutator == 0 ? 0 : (float)cc_scheduler->alloc_load_window[slot_index] / time_mutator;
>>> +static int64 last_check_time_point = time_now();
>>> +static int64 check_delay_time = time_now(); //  initial value is just for modifying
>>>
>>> -  if(gc_mark_is_concurrent()){
>>> -    cc_scheduler->trace_rate_window[slot_index]
>>> -      = time_collection == 0 ? MAX_TRACING_RATE : (float)cc_scheduler->trace_load_window[slot_index] / time_collection;
>>> -  }else{
>>> -    cc_scheduler->trace_rate_window[slot_index] = MIN_TRACING_RATE;
>>> -  }
>>> -
>>> -  cc_scheduler->num_window_slots = num_slot >= STAT_SAMPLE_WINDOW_SIZE ? num_slot : (++num_slot);
>>> -  cc_scheduler->last_window_index = (++slot_index)% STAT_SAMPLE_WINDOW_SIZE;
>>> +//just debugging
>>> +int64 get_last_check_point()
>>> +{
>>> +   return last_check_time_point;
>>>  }
>>>
>>> -static void con_collection_scheduler_update_start_point(GC* gc, int64 time_mutator, int64 time_collection)
>>> -{
>>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
>>> -    return;
>>> +static unsigned int alloc_space_threshold = 0;
>>>
>>> -  Space* space = NULL;
>>> -#ifdef USE_UNIQUE_MARK_SWEEP_GC
>>> -  space = (Space*) gc_get_wspace(gc);
>>> -#endif
>>> -  if(!space) return;
>>> -
>>> -  Space_Statistics* space_stat = space->space_statistic;
>>> -
>>> -  float sum_alloc_rate = 0;
>>> -  float sum_trace_rate = 0;
>>> -  float sum_space_util_ratio = 0;
>>> +static unsigned int space_check_stage_1; //SPACE_CHECK_EXPECTED_START_TIME
>>> +static unsigned int space_check_stage_2; //BIG_DELTA
>>>
>>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>>> +static unsigned int calculate_start_con_space_threshold(Con_Collection_Statistics *con_collection_stat, unsigned int heap_size)
>>> +{
>>>
>>> -  int64 time_this_collection_correction = 0;
>>> -#if 0
>>> -  float space_util_ratio = space_stat->space_utilization_ratio;
>>> -  if(space_util_ratio > (1-SPACE_UTIL_RATIO_CORRETION)){
>>> -    time_this_collection_correction = 0;
>>> -  }else{
>>> -    time_this_collection_correction
>>> -      = (int64)(((1 - space_util_ratio - SPACE_UTIL_RATIO_CORRETION)/(space_util_ratio))* time_mutator);
>>> -  }
>>> -#endif
>>> -
>>> -  unsigned int i;
>>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>>> -    sum_alloc_rate += cc_scheduler->alloc_rate_window[i];
>>> -    sum_trace_rate += cc_scheduler->trace_rate_window[i];
>>> -    sum_space_util_ratio += cc_scheduler->space_utilization_ratio[i];
>>> -  }
>>> -
>>> -  TRACE2("gc.con.cs","Allocation Rate: ");
>>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>>> -    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->alloc_rate_window[i]);
>>> -  }
>>> -
>>> -  TRACE2("gc.con.cs","Tracing Rate: ");
>>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>>> -    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->trace_rate_window[i]);
>>> -  }
>>> -
>>> -  float average_alloc_rate = sum_alloc_rate / cc_scheduler->num_window_slots;
>>> -  float average_trace_rate = sum_trace_rate / cc_scheduler->num_window_slots;
>>> -  float average_space_util_ratio = sum_space_util_ratio / cc_scheduler->num_window_slots;
>>> -
>>> -  TRACE2("gc.con.cs","averAllocRate: "<<average_alloc_rate<<"averTraceRate: "<<average_trace_rate<<"  average_space_util_ratio: "<<average_space_util_ratio<<" ");
>>> -
>>> -  if(average_alloc_rate == 0 ){
>>> -    time_delay_to_start_mark = MIN_DELAY_TIME;
>>> -    space_threshold_to_start_mark = MIN_SPACE_THRESHOLD;
>>> -  }else if(average_trace_rate == 0){
>>> -    time_delay_to_start_mark = MAX_DELAY_TIME;
>>> -    space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
>>> -  }else{
>>> -    float time_alloc_expected = (space_stat->size_free_space * average_space_util_ratio) / average_alloc_rate;
>>> -    float time_trace_expected = space_stat->num_live_obj / average_trace_rate;
>>> -    TRACE2("gc.con.cs","[GC][Con] expected alloc time "<<time_alloc_expected<<"  expected collect time  "<<time_trace_expected<<" ");
>>> -
>>> -    if(time_alloc_expected > time_trace_expected){
>>> -      if(gc_is_kind(ALGO_CON_OTF_OBJ)||gc_is_kind(ALGO_CON_OTF_REF)){
>>> -        float time_correction = gc_sweep_is_concurrent()? TIME_CORRECTION_OTF_MARK_SWEEP : TIME_CORRECTION_OTF_MARK;
>>> -        cc_scheduler->time_delay_to_start_mark = (int64)((time_alloc_expected - time_trace_expected)*time_correction);
>>> -      }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>>> -        cc_scheduler->time_delay_to_start_mark = (int64)(time_mutator* TIME_CORRECTION_MOSTLY_MARK);
>>> -      }
>>> -    }else{
>>> -      cc_scheduler->time_delay_to_start_mark = MIN_DELAY_TIME;
>>> +  float util_rate = con_collection_stat->heap_utilization_rate;
>>> +  unsigned int space_threshold = 0;
>>> +  if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>>> +    if( con_collection_stat->trace_rate == 0 )  //for initial iteration
>>> +         con_collection_stat->trace_rate = con_collection_stat->alloc_rate*20;
>>> +    unsigned int alloc_rate = con_collection_stat->alloc_rate;
>>> +    if(alloc_rate<con_collection_stat->trace_rate) {       //  THRESHOLD = Heap*utilization_rate*(1-alloc_rate/marking_rate), accurate formaler
>>> +      float alloc_marking_rate_ratio = (float)(alloc_rate)/con_collection_stat->trace_rate;
>>> +
>>> +      space_threshold = (unsigned int)(heap_size*util_rate*(1-alloc_marking_rate_ratio)*conservative_factor);
>>> +    } else {  //use default
>>> +       unsigned int alloc_while_marking = DEFAULT_MARKING_TIME*con_collection_stat->alloc_rate;
>>> +       space_threshold = (unsigned int)(heap_size*util_rate) -alloc_while_marking;
>>>     }
>>> -
>>> -    cc_scheduler->space_threshold_to_start_mark =
>>> -      (POINTER_SIZE_INT)(space_stat->size_free_space * ((time_alloc_expected - time_trace_expected) / time_alloc_expected));
>>> -
>>> -    time_delay_to_start_mark = cc_scheduler->time_delay_to_start_mark + time_this_collection_correction;
>>> -    space_threshold_to_start_mark = cc_scheduler->space_threshold_to_start_mark;
>>> +  } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>>> +    unsigned int alloc_while_marking = DEFAULT_MARKING_TIME*con_collection_stat->alloc_rate;
>>> +    space_threshold = (unsigned int)(heap_size*util_rate) -alloc_while_marking;
>>>   }
>>> -  TRACE2("gc.con.cs","[GC][Con] concurrent marking will delay "<<(unsigned int)(time_delay_to_start_mark>>10)<<" ms ");
>>> -  TRACE2("gc.con.cs","[GC][Con] time correction "<<(unsigned int)(time_this_collection_correction>>10)<<" ms ");
>>>
>>> +  if( space_threshold > con_collection_stat->surviving_size_at_gc_end )
>>> +    alloc_space_threshold = space_threshold - con_collection_stat->surviving_size_at_gc_end;
>>> +  else
>>> +    alloc_space_threshold = MIN_SPACE_THRESHOLD;
>>> +
>>> +  //INFO2("gc.con.info", "[Threshold] alloc_space_threshold=" << alloc_space_threshold);
>>> +  return space_threshold;
>>>  }
>>>
>>> -void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
>>> -{
>>> -  assert(gc_is_specify_con_gc());
>>> -  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) return;
>>> -
>>> -  con_collection_scheduler_update_stat(gc, time_mutator, time_collection);
>>> -  con_collection_scheduler_update_start_point(gc, time_mutator, time_collection);
>>> -
>>> -  return;
>>> -}
>>> -
>>> -Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause)
>>> +/* this parameters are updated at end of GC */
>>> +void gc_update_scheduler_parameter( GC *gc )
>>>  {
>>> -  if(!try_lock(gc->lock_collect_sched)) return FALSE;
>>> -  vm_gc_lock_enum();
>>> -
>>> -  gc_try_finish_con_phase(gc);
>>> -
>>> -  if(gc_need_start_con_enum(gc)){
>>> -    /*TODO:Concurrent rootset enumeration.*/
>>> -    assert(0);
>>> -  }
>>> -
>>> -  if(gc_need_start_con_mark(gc)){
>>> -    INFO2("gc.con.info", "[GC][Con] concurrent mark start ...");
>>> -    gc_start_con_mark(gc);
>>> -    vm_gc_unlock_enum();
>>> -    unlock(gc->lock_collect_sched);
>>> -    return TRUE;
>>> -  }
>>> -
>>> -  if(gc_need_start_con_sweep(gc)){
>>> -    gc->num_collections++;
>>> -    INFO2("gc.con.info", "[GC][Con] collection number:"<< gc->num_collections<<" ");
>>> -    gc_start_con_sweep(gc);
>>> -    vm_gc_unlock_enum();
>>> -    unlock(gc->lock_collect_sched);
>>> -    return TRUE;
>>> -  }
>>> -
>>> -  if(gc_need_reset_after_con_collect(gc)){
>>> -    int64 pause_start = time_now();
>>> -    int disable_count = vm_suspend_all_threads();
>>> -    gc_reset_after_con_collect(gc);
>>> -    gc_start_mutator_time_measure(gc);
>>> -    set_collection_end_time();
>>> -    vm_resume_all_threads(disable_count);
>>> -    vm_gc_unlock_enum();
>>> -    INFO2("gc.con.time","[GC][Con]pause(reset collection):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
>>> -    unlock(gc->lock_collect_sched);
>>> -    return TRUE;
>>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +   last_check_time_point = time_now();
>>> +
>>> +   unsigned int alloc_rate = con_collection_stat->alloc_rate;
>>> +   space_check_stage_1 = alloc_rate * trans_time_unit(SPACE_CHECK_STAGE_ONE_TIME);
>>> +   space_check_stage_2 = alloc_rate * trans_time_unit(SPACE_CHECK_STAGE_TWO_TIME);
>>> +   //INFO2( "gc.con.scheduler", "space_check_stage_1=["<<space_check_stage_1<<"], space_check_stage_2=["<<space_check_stage_2<<"]" );
>>> +
>>> +   check_delay_time = (con_collection_stat->gc_start_time - con_collection_stat->gc_end_time)>>2;
>>> +   //INFO2("gc.con.scheduler", "next check time = [" << trans_time_unit(check_delay_time) << "] ms" );
>>> +   if(gc_is_specify_con_sweep()) {
>>> +         conservative_factor = CONSERCATIVE_FACTOR_FULLY_CONCURRENT;
>>> +   }
>>> +   calculate_start_con_space_threshold(con_collection_stat, gc->committed_heap_size);
>>> +}
>>> +
>>> +void gc_force_update_scheduler_parameter( GC *gc )
>>> +{
>>> +    last_check_time_point = time_now();
>>> +    //check_delay_time = SPACE_CHECK_STAGE_ONE_TIME;
>>> +    check_delay_time = time_now();
>>> +    //INFO2("gc.con.scheduler", "next check time = [" << trans_time_unit(check_delay_time) << "] ms" );
>>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +    con_collection_stat->alloc_rate = DEFAULT_ALLOC_RATE;
>>> +}
>>> +
>>> +
>>> +
>>> +static inline Boolean check_start_mark( GC *gc )
>>> +{
>>> +   unsigned int new_object_occupied_size = gc_get_mutator_new_obj_size(gc);
>>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +   /*just debugging*/
>>> +   float used_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_object_occupied_size)/gc->committed_heap_size;
>>> +   if( alloc_space_threshold < new_object_occupied_size ) {
>>> +       INFO2( "gc.con.info", "[Start Con] check has been delayed " << check_delay_time << " us, until ratio at start point="<<used_rate );
>>> +       return TRUE;
>>> +   }
>>> +
>>> +   unsigned int free_space = alloc_space_threshold - new_object_occupied_size;
>>> +     //INFO2("gc.con.info", "[GC Scheduler debug] alloc_space_threshold="<<alloc_space_threshold<<", new_object_occupied_size"<<new_object_occupied_size);
>>> +   int64 last_check_delay = check_delay_time;
>>> +
>>> +   if( free_space < space_check_stage_2 ) {
>>> +       check_delay_time = SMALL_DELTA;
>>> +   } else if( free_space < space_check_stage_1 ) {
>>> +       if(check_delay_time>SPACE_CHECK_STAGE_TWO_TIME ) { //if time interval is too small, the alloc rate will not be updated
>>> +           unsigned int interval_time = trans_time_unit(time_now() - con_collection_stat->gc_end_time);
>>> +           unsigned int interval_space = new_object_occupied_size;
>>> +           con_collection_stat->alloc_rate = interval_space/interval_time;
>>> +       }
>>> +       check_delay_time = ((alloc_space_threshold - new_object_occupied_size)/con_collection_stat->alloc_rate)<<9;
>>> +   }
>>> +   last_check_time_point = time_now();
>>> +
>>> +   //INFO2("gc.con.info", "[GC Scheduler] check has been delayed=" << last_check_delay << " us, used_rate=" << used_rate << ", free_space=" << free_space << " bytes, next delay=" << check_delay_time << " us" );
>>> +   return FALSE;
>>> +}
>>> +
>>> +static SpinLock check_lock;
>>> +static inline Boolean space_should_start_mark( GC *gc)
>>> +{
>>> +  if( ( time_now() -last_check_time_point ) > check_delay_time && try_lock(check_lock) ) { //first condition is checked frequently, second condition is for synchronization
>>> +      Boolean should_start = check_start_mark(gc);
>>> +      unlock(check_lock);
>>> +      return should_start;
>>>   }
>>> -  vm_gc_unlock_enum();
>>> -  unlock(gc->lock_collect_sched);
>>>   return FALSE;
>>>  }
>>>
>>> -extern unsigned int NUM_MARKERS;
>>> -
>>> -unsigned int gc_decide_marker_number(GC* gc)
>>> -{
>>> -  unsigned int num_active_marker;
>>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>>> +inline static Boolean gc_con_start_condition( GC* gc ) {
>>> +   return space_should_start_mark(gc);
>>> +}
>>>
>>> -  /*If the number of markers is specfied, just return the specified value.*/
>>> -  if(NUM_MARKERS != 0) return NUM_MARKERS;
>>>
>>> -  /*If the number of markers isn't specified, we decide the value dynamically.*/
>>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION){
>>> -    /*Start trial cycle, collection set to 1 in trial cycle and */
>>> -    num_active_marker = 1;
>>> -  }else{
>>> -    num_active_marker = cc_scheduler->last_marker_num;
>>> -    int64 c_time = cc_scheduler->last_collector_time;
>>> -    int64 m_time = cc_scheduler->last_mutator_time;
>>> -    int64 d_time = cc_scheduler->time_delay_to_start_mark;
>>> -
>>> -    if(num_active_marker == 0) num_active_marker = 1;
>>> -
>>> -    if((c_time + d_time) > m_time || (float)d_time < (m_time * 0.25)){
>>> -      TRACE2("gc.con.cs","[GC][Con] increase marker number.");
>>> -      num_active_marker ++;
>>> -      if(num_active_marker > gc->num_markers) num_active_marker = gc->num_markers;
>>> -    }else if((float)d_time > (m_time * 0.6)){
>>> -      TRACE2("gc.con.cs","[GC][Con] decrease marker number.");
>>> -      num_active_marker --;
>>> -      if(num_active_marker == 0)  num_active_marker = 1;
>>> -    }
>>> -
>>> -    TRACE2("gc.con.cs","[GC][Con] ctime  "<<(unsigned)(c_time>>10)<<"  mtime  "<<(unsigned)(m_time>>10)<<"  dtime  "<<(unsigned)(d_time>>10));
>>> -    TRACE2("gc.con.cs","[GC][Con] marker num : "<<num_active_marker<<" ");
>>> +void gc_reset_after_con_collection(GC *gc);
>>> +void gc_merge_free_list_global(GC *gc);
>>> +void gc_con_stat_information_out(GC *gc);
>>> +
>>> +unsigned int sub_time = 0;
>>> +int64 pause_time = 0;
>>> +/*
>>> +   concurrent collection entry function, it may start proper phase according to the current state.
>>> +*/
>>> +Boolean gc_con_perform_collection( GC* gc ) {
>>> +  int disable_count;
>>> +  int64 pause_start;
>>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +  switch( gc->gc_concurrent_status ) {
>>> +    case GC_CON_NIL :
>>> +      if( !gc_con_start_condition(gc) )
>>> +        return FALSE;
>>> +      if( !state_transformation( gc, GC_CON_NIL, GC_CON_STW_ENUM ) )
>>> +        return FALSE;
>>> +
>>> +      gc->num_collections++;
>>> +      gc->cause = GC_CAUSE_CONCURRENT_GC;
>>> +
>>> +      con_collection_stat->gc_start_time = time_now();
>>> +      disable_count = hythread_reset_suspend_disable();
>>> +
>>> +      gc_start_con_enumeration(gc); //now, it is a stw enumeration
>>> +      con_collection_stat->marking_start_time = time_now();
>>> +      state_transformation( gc, GC_CON_STW_ENUM, GC_CON_START_MARKERS );
>>> +      gc_start_con_marking(gc);
>>> +
>>> +      INFO2("gc.con.time","[ER] start con pause, ERSM="<<((unsigned int)(time_now()-con_collection_stat->gc_start_time))<<"  us "); // ERSM means enumerate rootset and start concurrent marking
>>> +      vm_resume_threads_after();
>>> +      hythread_set_suspend_disable(disable_count);
>>> +      break;
>>> +
>>> +    case GC_CON_BEFORE_SWEEP :
>>> +      if(!gc_is_specify_con_sweep())
>>> +         return FALSE;
>>> +      if( !state_transformation( gc, GC_CON_BEFORE_SWEEP, GC_CON_SWEEPING ) )
>>> +         return FALSE;
>>> +      gc_ms_start_con_sweep((GC_MS*)gc, gc_get_sweeper_numer(gc));
>>> +      break;
>>> +
>>> +
>>> +    case GC_CON_BEFORE_FINISH :
>>> +        if( !state_transformation( gc, GC_CON_BEFORE_FINISH, GC_CON_RESET ) )
>>> +                 return FALSE;
>>> +        /* thread should be suspended before the state transformation,
>>> +            it is for the case that the heap is exhausted in the reset state, although it is almost impossible */
>>> +        disable_count = vm_suspend_all_threads();
>>> +        pause_start = time_now();
>>> +
>>> +        gc_merge_free_list_global(gc);
>>> +        gc_reset_after_con_collection(gc);
>>> +        state_transformation( gc, GC_CON_RESET, GC_CON_NIL );
>>> +        pause_time = time_now()-pause_start;
>>> +
>>> +        vm_resume_all_threads(disable_count);
>>> +        gc_con_stat_information_out(gc);
>>> +        INFO2("gc.con.time","[GC][Con]pause(reset collection):  CRST="<<pause_time<<"  us\n\n"); // CRST means concurrent reset
>>> +        break;
>>> +    default :
>>> +      return FALSE;
>>>   }
>>> -
>>> -  cc_scheduler->last_marker_num = num_active_marker;
>>> -  return num_active_marker;
>>> +  return TRUE;
>>>  }
>>>
>>> +
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h Tue Oct 28 20:01:01 2008
>>> @@ -20,6 +20,7 @@
>>>
>>>  #define STAT_SAMPLE_WINDOW_SIZE 5
>>>
>>> +struct GC_MS;
>>>  typedef struct Con_Collection_Scheduler {
>>>   /*common field*/
>>>   GC* gc;
>>> @@ -46,10 +47,17 @@
>>>  void con_collection_scheduler_initialize(GC* gc);
>>>  void con_collection_scheduler_destruct(GC* gc);
>>>
>>> +void gc_update_scheduler_parameter( GC *gc );
>>> +void gc_force_update_scheduler_parameter( GC *gc );
>>> +Boolean gc_con_perform_collection( GC* gc );
>>>  Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause);
>>> -void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
>>>
>>>  void gc_decide_cc_scheduler_kind(char* cc_scheduler);
>>>  void gc_set_default_cc_scheduler_kind();
>>> +
>>> +extern unsigned int mostly_con_final_marker_num;
>>> +extern unsigned int mostly_con_long_marker_num;
>>> +
>>>  #endif
>>>
>>> +
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Tue Oct 28 20:01:01 2008
>>> @@ -22,7 +22,7 @@
>>>  #include "gc_common.h"
>>>  #include "gc_metadata.h"
>>>  #include "../thread/mutator.h"
>>> -#include "../thread/marker.h"
>>> +#include "../thread/conclctor.h"
>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>  #include "../gen/gen.h"
>>>  #include "../mark_sweep/gc_ms.h"
>>> @@ -74,11 +74,19 @@
>>>  static int64 collection_start_time = time_now();
>>>  static int64 collection_end_time = time_now();
>>>
>>> -int64 get_collection_end_time()
>>> +int64 get_gc_start_time()
>>> +{ return collection_start_time; }
>>> +
>>> +void set_gc_start_time()
>>> +{ collection_start_time = time_now(); }
>>> +
>>> +int64 get_gc_end_time()
>>>  { return collection_end_time; }
>>>
>>> -void set_collection_end_time()
>>> -{ collection_end_time = time_now(); }
>>> +void set_gc_end_time()
>>> +{
>>> +  collection_end_time = time_now();
>>> +}
>>>
>>>  void gc_decide_collection_kind(GC* gc, unsigned int cause)
>>>  {
>>> @@ -93,17 +101,17 @@
>>>
>>>  }
>>>
>>> -void gc_update_space_stat(GC_MS* gc)
>>> +void gc_update_space_stat(GC* gc)
>>>  {
>>>  #ifdef USE_UNIQUE_MARK_SWEEP_GC
>>> -    gc_ms_update_space_stat((GC_MS*)gc);
>>> +      gc_ms_update_space_stat((GC_MS *)gc);
>>>  #endif
>>>  }
>>>
>>> -void gc_reset_space_stat(GC_MS* gc)
>>> +void gc_reset_space_stat(GC* gc)
>>>  {
>>>  #ifdef USE_UNIQUE_MARK_SWEEP_GC
>>> -    gc_ms_reset_space_stat((GC_MS*)gc);
>>> +      gc_ms_reset_space_stat((GC_MS *)gc);
>>>  #endif
>>>  }
>>>
>>> @@ -118,7 +126,7 @@
>>>   gc_set_rootset(gc);
>>>  }
>>>
>>> -void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection)
>>> +void gc_reset_after_collection(GC* gc)
>>>  {
>>>   if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
>>>
>>> @@ -139,11 +147,9 @@
>>>  #endif
>>>   }
>>>
>>> -  gc_update_space_stat((GC_MS*)gc);
>>> +  gc_update_space_stat(gc);
>>>
>>> -  gc_update_collection_scheduler(gc, time_mutator, time_collection);
>>> -
>>> -  gc_reset_space_stat((GC_MS*)gc);
>>> +  gc_reset_space_stat(gc);
>>>
>>>   gc_reset_collector_state(gc);
>>>
>>> @@ -154,23 +160,25 @@
>>>
>>>  }
>>>
>>> +void set_check_delay( int64 mutator_time );
>>> +
>>>  void gc_reclaim_heap(GC* gc, unsigned int gc_cause)
>>>  {
>>>   INFO2("gc.process", "\nGC: GC start ...\n");
>>>
>>> -  collection_start_time = time_now();
>>> -  int64 time_mutator = collection_start_time - collection_end_time;
>>> -
>>> -  gc->num_collections++;
>>>   gc->cause = gc_cause;
>>>
>>>   if(gc_is_specify_con_gc()){
>>> -    gc_finish_con_GC(gc, time_mutator);
>>> -    collection_end_time = time_now();
>>> +    gc_wait_con_finish(gc);
>>>     INFO2("gc.process", "GC: GC end\n");
>>>     return;
>>>   }
>>>
>>> +   set_gc_start_time();
>>> +  int64 time_mutator = get_gc_start_time() - get_gc_end_time();
>>> +
>>> +  gc->num_collections++;
>>> +
>>>   /* FIXME:: before mutators suspended, the ops below should be very careful
>>>      to avoid racing with mutators. */
>>>
>>> @@ -207,16 +215,16 @@
>>>   gc_gen_reclaim_heap((GC_Gen*)gc, collection_start_time);
>>>  #endif
>>>
>>> -  collection_end_time = time_now();
>>> +  set_gc_end_time();
>>>
>>> -  int64 time_collection = collection_end_time - collection_start_time;
>>> +  int64 time_collection = get_gc_end_time() - get_gc_start_time();
>>>
>>>  #if !defined(USE_UNIQUE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC)
>>>   gc_gen_collection_verbose_info((GC_Gen*)gc, time_collection, time_mutator);
>>>   gc_gen_space_verbose_info((GC_Gen*)gc);
>>>  #endif
>>>
>>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>>> +  gc_reset_after_collection(gc);
>>>
>>>   gc_assign_free_area_to_mutators(gc);
>>>
>>> @@ -230,6 +238,3 @@
>>>
>>>
>>>
>>> -
>>> -
>>> -
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Tue Oct 28 20:01:01 2008
>>> @@ -39,7 +39,8 @@
>>>
>>>  #include "../common/gc_for_barrier.h"
>>>
>>> -/*
>>> +
>>> + /*
>>>  #define USE_UNIQUE_MARK_SWEEP_GC  //define it to only use Mark-Sweep GC (no NOS, no LOS).
>>>  #define USE_UNIQUE_MOVE_COMPACT_GC //define it to only use Move-Compact GC (no NOS, no LOS).
>>>  */
>>> @@ -336,19 +337,7 @@
>>>   return TRUE;
>>>  }
>>>
>>> -extern volatile Boolean obj_alloced_live;
>>> -inline Boolean is_obj_alloced_live()
>>> -{ return obj_alloced_live;  }
>>>
>>> -inline void gc_enable_alloc_obj_live()
>>> -{
>>> -  obj_alloced_live = TRUE;
>>> -}
>>> -
>>> -inline void gc_disable_alloc_obj_live()
>>> -{
>>> -  obj_alloced_live = FALSE;
>>> -}
>>>
>>>  /***************************************************************/
>>>
>>> @@ -391,7 +380,7 @@
>>>  /***************************************************************/
>>>
>>>  /* all GCs inherit this GC structure */
>>> -struct Marker;
>>> +struct Conclctor;
>>>  struct Mutator;
>>>  struct Collector;
>>>  struct GC_Metadata;
>>> @@ -421,9 +410,12 @@
>>>   unsigned int num_collectors;
>>>   unsigned int num_active_collectors; /* not all collectors are working */
>>>
>>> -  Marker** markers;
>>> -  unsigned int num_markers;
>>> +  /*concurrent markers and collectors*/
>>> +  Conclctor** conclctors;
>>> +  unsigned int num_conclctors;
>>> +  //unsigned int num_active_conclctors;
>>>   unsigned int num_active_markers;
>>> +  unsigned int num_active_sweepers;
>>>
>>>   /* metadata is the pool for rootset, tracestack, etc. */
>>>   GC_Metadata* metadata;
>>> @@ -443,7 +435,7 @@
>>>
>>>   Space_Tuner* tuner;
>>>
>>> -  unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
>>> +  volatile unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
>>>   Collection_Scheduler* collection_scheduler;
>>>
>>>   SpinLock lock_con_mark;
>>> @@ -488,11 +480,15 @@
>>>
>>>  GC* gc_parse_options();
>>>  void gc_reclaim_heap(GC* gc, unsigned int gc_cause);
>>> +void gc_relaim_heap_con_mode( GC *gc);
>>>  void gc_prepare_rootset(GC* gc);
>>>
>>>
>>> -int64 get_collection_end_time();
>>> -void set_collection_end_time();
>>> +int64 get_gc_start_time();
>>> +void set_gc_start_time();
>>> +
>>> +int64 get_gc_end_time();
>>> +void set_gc_end_time();
>>>
>>>  /* generational GC related */
>>>
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp Tue Oct 28 20:01:01 2008
>>> @@ -17,325 +17,582 @@
>>>  #include "gc_common.h"
>>>  #include "gc_metadata.h"
>>>  #include "../thread/mutator.h"
>>> -#include "../thread/marker.h"
>>> +#include "../thread/conclctor.h"
>>>  #include "../thread/collector.h"
>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>>  #include "../gen/gen.h"
>>>  #include "../mark_sweep/gc_ms.h"
>>> +#include "../mark_sweep/wspace_mark_sweep.h"
>>>  #include "interior_pointer.h"
>>>  #include "collection_scheduler.h"
>>>  #include "gc_concurrent.h"
>>>  #include "../common/gc_for_barrier.h"
>>> +#include "concurrent_collection_scheduler.h"
>>> +#include "../verify/verify_live_heap.h"
>>>
>>> -volatile Boolean concurrent_in_marking  = FALSE;
>>> -volatile Boolean concurrent_in_sweeping = FALSE;
>>> -volatile Boolean mark_is_concurrent     = FALSE;
>>> -volatile Boolean sweep_is_concurrent    = FALSE;
>>> +struct Con_Collection_Statistics;
>>>
>>>  volatile Boolean gc_sweep_global_normal_chunk = FALSE;
>>>
>>> -static void gc_check_con_mark(GC* gc)
>>> +//just debugging
>>> +inline void gc_ms_get_current_heap_usage(GC_MS *gc)
>>>  {
>>> -  if(!is_mark_finished(gc)){
>>> -    lock(gc->lock_con_mark);
>>> -    if(gc_is_kind(ALGO_CON_OTF_OBJ)){
>>> -      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>>> -    }else if(gc_is_kind(ALGO_CON_OTF_REF)){
>>> -      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>>> -    }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>>> -      //ignore.
>>> -    }
>>> -    unlock(gc->lock_con_mark);
>>> -  }
>>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat(gc);
>>> +  unsigned int new_obj_size = gc_get_mutator_new_obj_size((GC *)gc);
>>> +  unsigned int current_size = con_collection_stat->surviving_size_at_gc_end + new_obj_size;
>>> +  INFO2("gc.con.scheduler", "[Heap Usage]surviving_size("<<con_collection_stat->surviving_size_at_gc_end<<")+new_obj_size("<<new_obj_size << ")="<<current_size<<" bytes");
>>> +  INFO2("gc.con.scheduler", "[Heap Usage]usage rate ("<< (float)current_size/gc->committed_heap_size<<")");
>>>  }
>>>
>>> -static void gc_wait_con_mark_finish(GC* gc)
>>> +void gc_con_update_stat_before_enable_alloc_live(GC *gc)
>>>  {
>>> -  wait_mark_finish(gc);
>>> -  gc_set_barrier_function(WB_REM_NIL);
>>> -  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
>>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS *)gc);
>>> +  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
>>>  }
>>> +
>>> +volatile Boolean obj_alloced_live;
>>>
>>> -unsigned int gc_decide_marker_number(GC* gc);
>>> +void gc_enable_alloc_obj_live(GC *gc)
>>> +{
>>> +  gc_con_update_stat_before_enable_alloc_live(gc);
>>> +  obj_alloced_live = TRUE;
>>> +}
>>>
>>> -void gc_start_con_mark(GC* gc)
>>> +void gc_mostly_con_update_stat_after_final_marking(GC *gc)
>>>  {
>>> -  int disable_count;
>>> -  unsigned int num_marker;
>>> -
>>> -  if(!try_lock(gc->lock_con_mark) || gc_mark_is_concurrent()) return;
>>> -
>>> -  lock(gc->lock_enum);
>>> -  disable_count = hythread_reset_suspend_disable();
>>> -  int64 pause_start = time_now();
>>> -  gc_set_rootset_type(ROOTSET_IS_OBJ);
>>> -  gc_prepare_rootset(gc);
>>> -
>>> -  gc_set_concurrent_status(gc, GC_CON_MARK_PHASE);
>>> +  POINTER_SIZE_INT num_live_obj = 0;
>>> +  POINTER_SIZE_INT size_live_obj = 0;
>>> +  POINTER_SIZE_INT num_dirty_obj_traced = 0;
>>>
>>> -  num_marker = gc_decide_marker_number(gc);
>>> -
>>> -  /*start concurrent mark*/
>>> -  if(gc_is_kind(ALGO_CON_OTF_OBJ)){
>>> -    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
>>> -    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>> -  }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>>> -    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
>>> -    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
>>> -  }else if(gc_is_kind(ALGO_CON_OTF_REF)){
>>> -    gc_set_barrier_function(WB_REM_OLD_VAR);
>>> -    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>> +  unsigned int num_conclctors = gc->num_conclctors;
>>> +  for( unsigned int i=0; i<num_conclctors; i++ ) {
>>> +    Conclctor* conclctor = gc->conclctors[i];
>>> +    if( conclctor->role != CONCLCTOR_ROLE_MARKER )
>>> +      continue;
>>> +    num_live_obj += conclctor->live_obj_num;
>>> +    size_live_obj += conclctor->live_obj_size;
>>> +    num_dirty_obj_traced += conclctor->num_dirty_slots_traced;
>>> +    conclctor->live_obj_num = 0;
>>> +    conclctor->live_obj_size = 0;
>>> +    conclctor->num_dirty_slots_traced = 0;
>>>   }
>>>
>>> -  unlock(gc->lock_enum);
>>> -  INFO2("gc.con.time","[GC][Con]pause(enumeration root):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
>>> -  vm_resume_threads_after();
>>> -  assert(hythread_is_suspend_enabled());
>>> -  hythread_set_suspend_disable(disable_count);
>>> -
>>> -  unlock(gc->lock_con_mark);
>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +  con_collection_stat->live_size_marked += size_live_obj;
>>> +  INFO2("gc.con.scheduler", "[Final Mark Finish] live_marked_size:      "<<con_collection_stat->live_size_marked<<" bytes");
>>> +
>>>  }
>>>
>>> -void mostly_con_mark_terminate_reset();
>>> -void terminate_mostly_con_mark();
>>> -
>>> -void gc_finish_con_mark(GC* gc, Boolean need_STW)
>>> +unsigned int gc_get_conclcor_num(GC* gc, unsigned int req_role);
>>> +//called by the marker when it finishes
>>> +void gc_con_update_stat_after_marking(GC *gc)
>>>  {
>>> -  gc_check_con_mark(gc);
>>> -
>>> -  if(gc_is_kind(ALGO_CON_MOSTLY))
>>> -    terminate_mostly_con_mark();
>>> -
>>> -  gc_wait_con_mark_finish(gc);
>>> +  POINTER_SIZE_INT num_live_obj = 0;
>>> +  POINTER_SIZE_INT size_live_obj = 0;
>>> +  POINTER_SIZE_INT num_dirty_obj_traced = 0;
>>>
>>> -  int disable_count;
>>> -  if(need_STW){
>>> -    /*suspend the mutators.*/
>>> -    lock(gc->lock_enum);
>>> -    if(gc_is_kind(ALGO_CON_MOSTLY)){
>>> -      /*In mostly concurrent algorithm, there's a final marking pause.
>>> -            Prepare root set for final marking.*/
>>> -      disable_count = hythread_reset_suspend_disable();
>>> -      gc_set_rootset_type(ROOTSET_IS_OBJ);
>>> -      gc_prepare_rootset(gc);
>>> -    }else{
>>> -      disable_count = vm_suspend_all_threads();
>>> -    }
>>> +  unsigned int num_conclctors = gc->num_conclctors;
>>> +  for( unsigned int i=0; i<num_conclctors; i++ ) {
>>> +    Conclctor* conclctor = gc->conclctors[i];
>>> +    if( conclctor->role != CONCLCTOR_ROLE_MARKER )
>>> +      continue;
>>> +    num_live_obj += conclctor->live_obj_num;
>>> +    size_live_obj += conclctor->live_obj_size;
>>> +    num_dirty_obj_traced += conclctor->num_dirty_slots_traced;
>>> +    conclctor->live_obj_num = 0;
>>> +    conclctor->live_obj_size = 0;
>>> +    conclctor->num_dirty_slots_traced = 0;
>>>   }
>>>
>>> -  if(gc_is_kind(ALGO_CON_MOSTLY)){
>>> -    /*In mostly concurrent algorithm, there's a final marking pause.
>>> -          Suspend the mutators once again and finish the marking phase.*/
>>> -
>>> -    /*prepare dirty object*/
>>> -    gc_prepare_dirty_set(gc);
>>> -
>>> -    gc_set_weakref_sets(gc);
>>> -
>>> -    /*start STW mark*/
>>> -    gc_ms_start_mostly_con_final_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>>> -
>>> -    mostly_con_mark_terminate_reset();
>>> -    gc_clear_dirty_set(gc);
>>> -  }
>>> -
>>> -  gc_reset_dirty_set(gc);
>>> -
>>> -  if(need_STW){
>>> -    unlock(gc->lock_enum);
>>> -    if(gc_is_kind(ALGO_CON_MOSTLY)){
>>> -      vm_resume_threads_after();
>>> -      assert(hythread_is_suspend_enabled());
>>> -      hythread_set_suspend_disable(disable_count);
>>> -    }else{
>>> -      vm_resume_all_threads(disable_count);
>>> -    }
>>> -  }
>>> +  unsigned int write_barrier_marked_size = gc_get_mutator_write_barrier_marked_size(gc);
>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +  con_collection_stat->live_size_marked = size_live_obj + write_barrier_marked_size;
>>> +  //INFO2("gc.con.scheduler", "[Mark Finish] live_marked_size:      "<<con_collection_stat->live_size_marked<<" bytes");
>>>
>>> +   /*statistics information update (marking_end_time, trace_rate) */
>>> +  con_collection_stat->marking_end_time = time_now();
>>> +  int64 marking_time = (unsigned int)(con_collection_stat->marking_end_time - con_collection_stat->marking_start_time);
>>> +
>>> +  unsigned int heap_size =
>>> +       con_collection_stat->surviving_size_at_gc_end +
>>> +       gc_get_mutator_new_obj_size(gc);
>>> +
>>> +  con_collection_stat->trace_rate = heap_size/trans_time_unit(marking_time);
>>> +
>>> +
>>> +
>>> +  /*
>>> +  //statistics just for debugging
>>> +  unsigned int marker_num = gc_get_conclcor_num(gc, CONCLCTOR_ROLE_MARKER);
>>> +  float heap_used_rate = (float)heap_size/gc->committed_heap_size;
>>> +  unsigned int new_obj_size_marking = gc_get_mutator_new_obj_size(gc) - con_collection_stat->alloc_size_before_alloc_live;
>>> +  unsigned int alloc_rate_marking = new_obj_size_marking/trans_time_unit(con_collection_stat->marking_end_time - con_collection_stat->marking_start_time);
>>> +  INFO2("gc.con.scheduler", "[Mark Finish] tracing time=" <<marking_time<<" us, trace rate=" << con_collection_stat->trace_rate<<"b/ms, current heap used="<<heap_used_rate );
>>> +  INFO2("gc.con.scheduler", "[Mark Finish] marker num="<<marker_num << ", alloc factor=" << (float)alloc_rate_marking/con_collection_stat->alloc_rate);
>>> +  */
>>>  }
>>>
>>> -void gc_reset_con_mark(GC* gc)
>>> +void gc_PSTW_update_stat_after_marking(GC *gc)
>>>  {
>>> -  gc->num_active_markers = 0;
>>> -  gc_mark_unset_concurrent();
>>> +  unsigned int size_live_obj = gc_ms_get_live_object_size((GC_MS*)gc);
>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +  con_collection_stat->live_size_marked = size_live_obj;
>>> +  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
>>> +
>>> +  INFO2("gc.con.scheduler", "[Mark Finish] live_marked:      "<<con_collection_stat->live_size_marked<<" bytes");
>>> +  INFO2("gc.con.scheduler", "[Mark Finish] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
>>> +  INFO2("gc.con.scheduler", "[Mark Finish] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
>>>  }
>>>
>>> -int64 gc_get_con_mark_time(GC* gc)
>>> +//Called only when heap is exhuaset
>>> +void gc_con_update_stat_heap_exhausted(GC* gc)
>>>  {
>>> -  int64 time_mark = 0;
>>> -  Marker** markers = gc->markers;
>>> -  unsigned int i;
>>> -  for(i = 0; i < gc->num_active_markers; i++){
>>> -    Marker* marker = markers[i];
>>> -    if(marker->time_mark > time_mark){
>>> -      time_mark = marker->time_mark;
>>> -    }
>>> -    marker->time_mark = 0;
>>> -  }
>>> -  return time_mark;
>>> +  unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +  con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
>>> +  //INFO2("gc.con.scheduler", "[Heap exhausted] surviving size="<<con_collection_stat->surviving_size_at_gc_end<<" bytes, new_obj_size="<<new_obj_size<<" bytes");
>>> +  //INFO2("gc.con.scheduler", "[Heap exhausted] current utilization rate="<<con_collection_stat->heap_utilization_rate);
>>>  }
>>>
>>> -void gc_start_con_sweep(GC* gc)
>>> +
>>> +//just debugging
>>> +unsigned int gc_con_get_live_size_from_sweeper(GC *gc)
>>>  {
>>> -  if(!try_lock(gc->lock_con_sweep) || gc_sweep_is_concurrent()) return;
>>> +  POINTER_SIZE_INT num_live_obj = 0;
>>> +  POINTER_SIZE_INT size_live_obj = 0;
>>>
>>> -  /*FIXME: enable finref*/
>>> -  if(!IGNORE_FINREF ){
>>> -    gc_set_obj_with_fin(gc);
>>> -    Collector* collector = gc->collectors[0];
>>> -    collector_identify_finref(collector);
>>> -#ifndef BUILD_IN_REFERENT
>>> -  }else{
>>> -    gc_set_weakref_sets(gc);
>>> -    gc_update_weakref_ignore_finref(gc);
>>> -#endif
>>> +  unsigned int num_collectors = gc->num_active_collectors;
>>> +  Collector** collectors = gc->collectors;
>>> +  unsigned int i;
>>> +  for(i = 0; i < num_collectors; i++){
>>> +    Collector* collector = collectors[i];
>>> +    num_live_obj += collector->live_obj_num;
>>> +    size_live_obj += collector->live_obj_size;
>>> +    collector->live_obj_num = 0;
>>> +    collector->live_obj_size = 0;
>>>   }
>>> +
>>> +  return size_live_obj;
>>> +}
>>>
>>> -  gc_set_concurrent_status(gc, GC_CON_SWEEP_PHASE);
>>> +//Called when Con GC ends, must called in a STW period
>>> +void gc_reset_con_space_stat(GC *gc)
>>> +{
>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +  unsigned int new_obj_size = gc_reset_mutator_new_obj_size((GC *)gc);
>>>
>>> -  gc_set_weakref_sets(gc);
>>> +  if( gc_is_kind(ALGO_CON_MOSTLY) ) {
>>> +    con_collection_stat->live_alloc_size = 0; //mostly concurrent do not make new alloc obj live
>>> +  } else if ( gc_is_kind( ALGO_CON_OTF_OBJ ) || gc_is_kind( ALGO_CON_OTF_REF ) ) {
>>> +    con_collection_stat->live_alloc_size = new_obj_size - con_collection_stat->alloc_size_before_alloc_live;
>>> +  }
>>> +
>>> +  /*live obj size at the end of gc = the size of objs belong to {marked_live + alloc_at_marking+alloc_at_sweeping},
>>> +  (for mostly concurrent, con_collection_stat->surviving_size_at_gc_end = con_collection_stat->live_size_marked .)*/
>>> +  con_collection_stat->surviving_size_at_gc_end = con_collection_stat->live_size_marked + con_collection_stat->live_alloc_size;
>>> +  //INFO2( "gc.con.scheduler", "[Mark Live] live_size_marked = " << con_collection_stat->live_size_marked << ", live_alloc_size=" << con_collection_stat->live_alloc_size );
>>>
>>> -  /*Note: We assumed that adding entry to weakroot_pool is happened in STW rootset enumeration.
>>> -      So, when this assumption changed, we should modified the below function.*/
>>> -  gc_identify_dead_weak_roots(gc);
>>>
>>> -  /*start concurrent mark*/
>>> -  gc_ms_start_con_sweep((GC_MS*)gc, MIN_NUM_MARKERS);
>>> +  /*
>>> +  //just debugging
>>> +  if( !gc_is_specify_con_sweep() ) {
>>> +    unsigned int surviving_sweeper = gc_con_get_live_size_from_sweeper(gc);
>>> +    unsigned int surviving_marker = con_collection_stat->surviving_size_at_gc_end;
>>> +    INFO2("gc.con.scheduler", "[Surviving size] by sweeper: " << surviving_sweeper << " bytes, by marker:" << surviving_marker << " bytes, diff=" << (surviving_sweeper - surviving_marker) );
>>> +  }*/
>>>
>>> -  unlock(gc->lock_con_sweep);
>>> +  int64 current_time = time_now();
>>> +
>>> +  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
>>> +       unsigned int gc_interval_time = 0;
>>> +       if( con_collection_stat->pause_start_time != 0 ) //remove the stw time
>>> +            gc_interval_time = trans_time_unit(con_collection_stat->pause_start_time - con_collection_stat->gc_end_time);
>>> +       else
>>> +            gc_interval_time = trans_time_unit(current_time -con_collection_stat->gc_end_time );
>>> +       con_collection_stat->alloc_rate = new_obj_size/gc_interval_time;
>>> +       gc_update_scheduler_parameter(gc);
>>> +  } else {
>>> +     gc_force_update_scheduler_parameter(gc);
>>> +  }
>>> +
>>> +  con_collection_stat->gc_end_time = current_time;
>>> +
>>> +  con_collection_stat->live_size_marked = 0;
>>> +  con_collection_stat->live_alloc_size = 0;
>>> +  con_collection_stat->alloc_size_before_alloc_live = 0;
>>> +  con_collection_stat->marking_start_time = 0;
>>> +  con_collection_stat->marking_end_time = 0;
>>> +  con_collection_stat->sweeping_time = gc_get_conclctor_time((GC *)gc, CONCLCTOR_ROLE_SWEEPER); //be 0 if not CMCS
>>> +  con_collection_stat->pause_start_time = 0;
>>> +  assert(con_collection_stat->heap_utilization_rate<1);
>>> +
>>>  }
>>>
>>> -void gc_reset_con_sweep(GC* gc)
>>> +void gc_con_stat_information_out(GC *gc)
>>>  {
>>> -  gc->num_active_collectors = 0;
>>> -  gc_sweep_unset_concurrent();
>>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
>>> +  INFO2("gc.con.scheduler", "[Reset] surviving_at_end:       "<<con_collection_stat->surviving_size_at_gc_end<<" bytes");
>>> +  INFO2("gc.con.scheduler", "[Reset] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
>>> +  INFO2("gc.con.scheduler", "[Reset] utilization_rate:      "<<con_collection_stat->heap_utilization_rate);
>>> +  INFO2("gc.con.scheduler", "[Reset] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
>>> +  INFO2("gc.con.scheduler", "[Reset] sweeping time:      "<<con_collection_stat->sweeping_time<<" us");
>>> +  INFO2("gc.con.scheduler", "[Reset] gc time:      "<< trans_time_unit(con_collection_stat->gc_end_time - con_collection_stat->gc_start_time) );
>>> +  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
>>>  }
>>>
>>> -void gc_wait_con_sweep_finish(GC* gc)
>>> +void gc_reset_after_con_collection(GC* gc)
>>>  {
>>> -  wait_collection_finish(gc);
>>> -  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
>>> +  assert(gc_is_specify_con_gc());
>>> +  int64 reset_start = time_now();
>>> +  if(!IGNORE_FINREF ){
>>> +    INFO2("gc.process", "GC: finref process after collection ...\n");
>>> +    gc_put_finref_to_vm(gc);
>>> +    gc_reset_finref_metadata(gc);
>>> +    gc_activate_finref_threads((GC*)gc);
>>> +#ifndef BUILD_IN_REFERENT
>>> +  } else {
>>> +    gc_clear_weakref_pools(gc);
>>> +    gc_clear_finref_repset_pool(gc);
>>> +#endif
>>> +  }
>>> +  reset_start = time_now();
>>> +  gc_reset_con_space_stat(gc);
>>> +  gc_clear_conclctor_role(gc);
>>> +  vm_reclaim_native_objs();
>>>  }
>>>
>>> -void gc_finish_con_sweep(GC * gc)
>>> +
>>> +
>>> +void gc_set_default_con_algo()
>>>  {
>>> -  gc_wait_con_sweep_finish(gc);
>>> +  assert((GC_PROP & ALGO_CON_MASK) == 0);
>>> +  GC_PROP |= ALGO_CON_OTF_OBJ;
>>>  }
>>>
>>> -void gc_try_finish_con_phase(GC * gc)
>>> +void gc_decide_con_algo(char* concurrent_algo)
>>>  {
>>> -  /*Note: we do not finish concurrent mark here if we do not want to start concurrent sweep.*/
>>> -  if(gc_con_is_in_marking(gc) && is_mark_finished(gc)){
>>> -    /*Although all conditions above are satisfied, we can not guarantee concurrent marking is finished.
>>> -          Because, sometimes, the concurrent marking has not started yet. We check the concurrent mark lock
>>> -          here to guarantee this occasional case.*/
>>> -    if(try_lock(gc->lock_con_mark)){
>>> -      unlock(gc->lock_con_mark);
>>> -      gc_finish_con_mark(gc, TRUE);
>>> -    }
>>> -  }
>>> -
>>> -  if(gc_con_is_in_sweeping(gc) && is_collector_finished(gc)){
>>> -    //The reason is same as concurrent mark above.
>>> -    if(try_lock(gc->lock_con_sweep)){
>>> -      unlock(gc->lock_con_sweep);
>>> -      gc_finish_con_sweep(gc);
>>> -    }
>>> +  string_to_upper(concurrent_algo);
>>> +  GC_PROP &= ~ALGO_CON_MASK;
>>> +  if(!strcmp(concurrent_algo, "OTF_OBJ")){
>>> +    GC_PROP |= ALGO_CON_OTF_OBJ;
>>> +  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
>>> +    GC_PROP |= ALGO_CON_MOSTLY;
>>> +  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
>>> +    GC_PROP |= ALGO_CON_OTF_REF;
>>>   }
>>>  }
>>>
>>> -void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection);
>>>
>>> -void gc_reset_after_con_collect(GC* gc)
>>> +/*
>>> +    gc start enumeration phase, now, it is in a stop-the-world manner
>>> +*/
>>> +void gc_start_con_enumeration(GC * gc)
>>>  {
>>> -  assert(gc_is_specify_con_gc());
>>> -
>>> -  int64 time_mutator = gc_get_mutator_time(gc);
>>> -  int64 time_collection = gc_get_collector_time(gc) + gc_get_marker_time(gc);
>>> +  gc_set_rootset_type(ROOTSET_IS_OBJ);
>>> +  gc_prepare_rootset(gc);
>>> +}
>>>
>>> -  gc_reset_interior_pointer_table();
>>> +//unsigned int gc_decide_marker_number(GC* gc);
>>> +unsigned int gc_get_marker_number(GC* gc);
>>> +/*  gc start marking phase */
>>> +void gc_start_con_marking(GC *gc)
>>> +{
>>> +  unsigned int num_marker;
>>> +  num_marker = gc_get_marker_number(gc);
>>>
>>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>>> -
>>> -  if(gc_mark_is_concurrent()){
>>> -    gc_reset_con_mark(gc);
>>> +  if(gc_is_kind(ALGO_CON_OTF_OBJ)) {
>>> +    gc_enable_alloc_obj_live(gc);
>>> +    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
>>> +    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>> +  } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>>> +    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
>>> +    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
>>> +  } else if(gc_is_kind(ALGO_CON_OTF_REF)) {
>>> +    gc_enable_alloc_obj_live(gc);
>>> +    gc_set_barrier_function(WB_REM_OLD_VAR);
>>> +    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>>   }
>>> +}
>>> +
>>>
>>> -  if(gc_sweep_is_concurrent()){
>>> -    gc_reset_con_sweep(gc);
>>> +/*
>>> +    gc start sweeping phase
>>> +*/
>>> +void gc_prepare_sweeping(GC *gc) {
>>> +  INFO2("gc.con.info", "Concurrent collection, current collection = " << gc->num_collections );
>>> +  /*FIXME: enable finref*/
>>> +  if(!IGNORE_FINREF ){
>>> +    gc_set_obj_with_fin(gc);
>>> +    Collector* collector = gc->collectors[0];
>>> +    collector_identify_finref(collector);
>>> +  #ifndef BUILD_IN_REFERENT
>>> +  } else {
>>> +    conclctor_set_weakref_sets(gc);
>>> +    gc_update_weakref_ignore_finref(gc);
>>> +  #endif
>>>   }
>>> +  gc_identify_dead_weak_roots(gc);
>>>  }
>>>
>>> -void gc_finish_con_GC(GC* gc, int64 time_mutator)
>>> -{
>>> +int64 get_last_check_point();
>>> +// for the case pure stop the world
>>> +static void gc_partial_con_PSTW( GC *gc) {
>>>   int64 time_collection_start = time_now();
>>> -
>>> +  INFO2("gc.space.stat","Stop-the-world collection = "<<gc->num_collections<<"");
>>> +  INFO2("gc.con.info", "from last check point =" << (unsigned int)(time_collection_start -get_last_check_point()) );
>>> +  // stop the world enumeration
>>>   gc->num_collections++;
>>> -
>>> -  lock(gc->lock_enum);
>>> -
>>>   int disable_count = hythread_reset_suspend_disable();
>>>   gc_set_rootset_type(ROOTSET_IS_REF);
>>>   gc_prepare_rootset(gc);
>>> -  unlock(gc->lock_enum);
>>> -
>>> -  if(gc_sweep_is_concurrent()){
>>> -    if(gc_con_is_in_sweeping())
>>> -      gc_finish_con_sweep(gc);
>>> -  }else{
>>> -    if(gc_con_is_in_marking()){
>>> -      gc_finish_con_mark(gc, FALSE);
>>> -    }
>>> -    gc->in_collection = TRUE;
>>> -    gc_reset_mutator_context(gc);
>>> -    if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>> -    gc_ms_reclaim_heap((GC_MS*)gc);
>>> -  }
>>> -
>>> -  int64 time_collection = 0;
>>> -  if(gc_mark_is_concurrent()){
>>> -    time_collection = gc_get_con_mark_time(gc);
>>> -    gc_reset_con_mark(gc);
>>> -  }else{
>>> -    time_collection = time_now()-time_collection_start;
>>> -  }
>>> +
>>> +  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
>>> +      unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
>>> +      Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +      con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
>>> +  }
>>> +
>>> +  //reclaim heap
>>> +  gc_reset_mutator_context(gc);
>>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>>> +
>>> +  //update live size
>>> +  gc_PSTW_update_stat_after_marking(gc);
>>> +
>>> +  // reset the collection and resume mutators
>>> +  gc_reset_after_con_collection(gc);
>>>
>>> -  if(gc_sweep_is_concurrent()){
>>> -    gc_reset_con_sweep(gc);
>>> -  }
>>> -
>>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>>> -
>>> -  gc_start_mutator_time_measure(gc);
>>> -
>>> +  set_con_nil(gc); // concurrent scheduling will continue after mutators are resumed
>>>   vm_resume_threads_after();
>>>   assert(hythread_is_suspend_enabled());
>>> -  hythread_set_suspend_disable(disable_count);
>>> -  int64 pause_time = time_now()-time_collection_start;
>>> -
>>> -  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause){
>>> -    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
>>> -  }else{
>>> -    INFO2("gc.con.time","[GC][Con]pause( Heap exhuasted ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
>>> -  }
>>> -  return;
>>> +  hythread_set_suspend_disable(disable_count);
>>>  }
>>>
>>> -void gc_set_default_con_algo()
>>> -{
>>> -  assert((GC_PROP & ALGO_CON_MASK) == 0);
>>> -  GC_PROP |= ALGO_CON_OTF_OBJ;
>>> +void terminate_mostly_con_mark();
>>> +void wspace_mostly_con_final_mark( GC *gc );
>>> +
>>> +// for the case concurrent marking is not finished before heap is exhausted
>>> +static void gc_partial_con_PMSS(GC *gc) {
>>> +  INFO2("gc.con.info", "[PMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
>>> +  // wait concurrent marking finishes
>>> +  int64 wait_start = time_now();
>>> +  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
>>> +  //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
>>> +  while( gc->gc_concurrent_status == GC_CON_START_MARKERS ||
>>> +             gc->gc_concurrent_status == GC_CON_TRACING ||
>>> +             gc->gc_concurrent_status == GC_CON_TRACE_DONE)
>>> +  {
>>> +      vm_thread_yield(); //let the unfinished marker run
>>> +  }
>>> +
>>> +  /*just debugging*/
>>> +    gc_ms_get_current_heap_usage((GC_MS *)gc);
>>> +    int64 pause_time = time_now() - wait_start;
>>> +    INFO2("gc.con.info", "[PMSS]wait marking time="<<pause_time<<" us" );
>>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +    unsigned int marking_time_shortage = (unsigned int)(con_collection_stat->marking_end_time - wait_start);
>>> +    INFO2("gc.con.info", "[PMSS] marking late time [" << marking_time_shortage << "] us" );
>>> +
>>> +  // start STW reclaiming heap
>>> +  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>>> +  gc_reset_mutator_context(gc);
>>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>>> +
>>> +  // reset after partial stop the world collection
>>> +  gc_reset_after_con_collection(gc);
>>> +  set_con_nil(gc);
>>> +}
>>> +
>>> +// only when current sweep is set to false
>>> +static void gc_partial_con_CMSS(GC *gc) {
>>> +
>>> +  INFO2("gc.con.info", "[CMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
>>> +  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
>>> +
>>> +  /*just debugging*/
>>> +    gc_ms_get_current_heap_usage((GC_MS *)gc);
>>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +    unsigned int from_marking_end = (unsigned int)(time_now() - con_collection_stat->marking_end_time);
>>> +    INFO2("gc.con.info", "[CMSS] marking early time [" << from_marking_end << "] us" );
>>> +
>>> +  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>>> +
>>> +  // start reclaiming heap, it will skip the marking phase
>>> +  gc_reset_mutator_context(gc);
>>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>>> +
>>> +  // reset after partial stop the world collection
>>> +  gc_reset_after_con_collection(gc);
>>> +  set_con_nil(gc);
>>> +}
>>> +
>>> +void gc_merge_free_list_global(GC *gc);
>>> +//for the case concurrent marking and partial concurrent sweeping
>>> +static void gc_partial_con_CMPS( GC *gc ) {
>>> +
>>> +  while(gc->gc_concurrent_status == GC_CON_SWEEPING || gc->gc_concurrent_status == GC_CON_SWEEP_DONE) {
>>> +      vm_thread_yield();  //let the unfinished sweeper run
>>> +  }
>>> +  gc_merge_free_list_global(gc);
>>> +  // reset after partial stop the world collection
>>> +  gc_reset_after_con_collection(gc);
>>> +  set_con_nil(gc);
>>> +}
>>> +
>>> +
>>> +inline static void partial_stop_the_world_info( unsigned int type, unsigned int pause_time ) {
>>> +  switch( type ) {
>>> +    case GC_PARTIAL_PSTW :
>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PSTW=" << pause_time << " us");
>>> +      break;
>>> +    case GC_PARTIAL_PMSS :
>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PMSS=" << pause_time << " us");
>>> +      break;
>>> +    case GC_PARTIAL_CMPS :
>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMPS=" << pause_time << " us");
>>> +      break;
>>> +    case GC_PARTIAL_CMSS :
>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMSS=" << pause_time << " us");
>>> +      break;
>>> +    case GC_PARTIAL_FCSR :
>>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), FCSR=" << pause_time << " us");
>>> +      break;
>>> +  }
>>> +}
>>> +
>>> +static unsigned int gc_con_heap_full_mostly_con( GC *gc )
>>> +{
>>> +   while( gc->gc_concurrent_status == GC_CON_START_MARKERS ) { // we should enumerate rootset after old rootset is traced
>>> +      vm_thread_yield();
>>> +   }
>>> +
>>> +   int64 final_start = time_now();
>>> +   int disable_count = hythread_reset_suspend_disable();
>>> +   gc_set_rootset_type(ROOTSET_IS_OBJ);
>>> +   gc_prepare_rootset(gc);
>>> +
>>> +   gc_set_barrier_function(WB_REM_NIL); //in stw phase, so we can remove write barrier at any time
>>> +   terminate_mostly_con_mark(); // terminate current mostly concurrent marking
>>> +
>>> +   //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
>>> +   while(gc->gc_concurrent_status == GC_CON_TRACING) {
>>> +      vm_thread_yield(); //let the unfinished marker run
>>> +   }
>>> +
>>> +   //final marking phase
>>> +   gc_clear_conclctor_role(gc);
>>> +   wspace_mostly_con_final_mark(gc);
>>> +
>>> +   /*just debugging*/
>>> +   int64 final_time = time_now() - final_start;
>>> +   INFO2("gc.scheduler", "[MOSTLY_CON] final marking time=" << final_time << " us");
>>> +   gc_ms_get_current_heap_usage((GC_MS *)gc);
>>> +
>>> +  // start STW reclaiming heap
>>> +   gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>>> +   gc_reset_mutator_context(gc);
>>> +   if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>>> +   gc_ms_reclaim_heap((GC_MS*)gc);
>>> +
>>> +   // reset after partial stop the world collection
>>> +   gc_reset_after_con_collection(gc);
>>> +   set_con_nil(gc);
>>> +
>>> +   vm_resume_threads_after();
>>> +   hythread_set_suspend_disable(disable_count);
>>> +   return GC_PARTIAL_PMSS;
>>> +
>>> +}
>>> +
>>> +static unsigned int gc_con_heap_full_otf( GC *gc )
>>> +{
>>> +   unsigned int partial_type; //for time measuring and debugging
>>> +   int disable_count = vm_suspend_all_threads();
>>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +   con_collection_stat->pause_start_time = time_now();
>>> +   switch(gc->gc_concurrent_status) {
>>> +       case GC_CON_START_MARKERS :
>>> +       case GC_CON_TRACING :
>>> +       case GC_CON_TRACE_DONE :
>>> +         partial_type = GC_PARTIAL_PMSS;
>>> +         gc_partial_con_PMSS(gc);
>>> +         break;
>>> +       case GC_CON_BEFORE_SWEEP : // only when current sweep is set to false
>>> +         partial_type = GC_PARTIAL_CMSS;
>>> +         gc_partial_con_CMSS(gc);
>>> +         break;
>>> +       case GC_CON_SWEEPING :
>>> +       case GC_CON_SWEEP_DONE :
>>> +         partial_type = GC_PARTIAL_CMPS;
>>> +         gc_partial_con_CMPS(gc);
>>> +         break;
>>> +       case GC_CON_BEFORE_FINISH : //heap can be exhausted when sweeping finishes, very rare
>>> +         partial_type = GC_PARTIAL_FCSR;
>>> +         gc_merge_free_list_global(gc);
>>> +         gc_reset_after_con_collection(gc);
>>> +         set_con_nil(gc);
>>> +         break;
>>> +       case GC_CON_RESET :
>>> +       case GC_CON_NIL :
>>> +       case GC_CON_STW_ENUM :
>>> +         /*do nothing, if still in gc_con_reset, will wait to finish after resuming. this case happens rarely*/
>>> +         partial_type = GC_PARTIAL_FCSR;
>>> +         break;
>>> +       /* other state is illegal here */
>>> +       default:
>>> +         INFO2("gc.con.info", "illegal state when the heap is out [" << gc->gc_concurrent_status << "]");
>>> +         RAISE_ERROR;
>>> +    }
>>> +    vm_resume_all_threads(disable_count);
>>> +    return partial_type;
>>>  }
>>>
>>> -void gc_decide_con_algo(char* concurrent_algo)
>>> -{
>>> -  string_to_upper(concurrent_algo);
>>> -  GC_PROP &= ~ALGO_CON_MASK;
>>> -  if(!strcmp(concurrent_algo, "OTF_OBJ")){
>>> -    GC_PROP |= ALGO_CON_OTF_OBJ;
>>> -  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
>>> -    GC_PROP |= ALGO_CON_MOSTLY;
>>> -  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
>>> -    GC_PROP |= ALGO_CON_OTF_REF;
>>> +void gc_con_stat_information_out(GC *gc);
>>> +/*
>>> +this method is called before STW gc start, there is a big lock outside
>>> +*/
>>> +void gc_wait_con_finish( GC* gc ) {
>>> +  int64 time_collection_start = time_now();
>>> +  unsigned int partial_type; //for time measuring and debugging
>>> +
>>> +   /* cocurrent gc is idle */
>>> +   if( state_transformation( gc, GC_CON_NIL, GC_CON_DISABLE ) ) { // for the race condition of con schduling and STW gc
>>> +        Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>>> +        con_collection_stat->gc_start_time = time_now();
>>> +        con_collection_stat->pause_start_time = con_collection_stat->gc_start_time;
>>> +        partial_type = GC_PARTIAL_PSTW;
>>> +        gc_partial_con_PSTW( gc );
>>> +   } else {
>>> +      while(gc->gc_concurrent_status == GC_CON_STW_ENUM ) { //wait concurrent gc finish enumeration
>>> +          hythread_safe_point();
>>> +          vm_thread_yield();
>>> +       }
>>> +       if( gc_is_kind(ALGO_CON_MOSTLY) )
>>> +         partial_type = gc_con_heap_full_mostly_con(gc);
>>> +       else if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>>> +         partial_type = gc_con_heap_full_otf(gc);
>>> +         if(gc->gc_concurrent_status == GC_CON_RESET) {
>>> +            while( gc->gc_concurrent_status == GC_CON_RESET ) { //wait concurrent to finish
>>> +              hythread_safe_point();
>>> +              vm_thread_yield();
>>> +            }
>>> +         }
>>> +       }
>>> +       else
>>> +         RAISE_ERROR;
>>> +   }
>>> +
>>> +  int64 pause_time = time_now()-time_collection_start;
>>> +  gc_con_stat_information_out(gc);
>>> +  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) {
>>> +    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<(unsigned int)(pause_time)<<"  us ");
>>> +  } else {
>>> +    partial_stop_the_world_info( partial_type, (unsigned int)pause_time );
>>>   }
>>>  }
>>> +
>>> +
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h Tue Oct 28 20:01:01 2008
>>> @@ -19,21 +19,69 @@
>>>  #define _GC_CONCURRENT_H_
>>>  #include "gc_common.h"
>>>
>>> -enum GC_CONCURRENT_STATUS{
>>> -  GC_CON_STATUS_NIL = 0x00,
>>> -  GC_CON_MARK_PHASE = 0x01,
>>> -  GC_MOSTLY_CON_FINAL_MARK_PHASE = 0x11, // for mostly concurrent only.
>>> -  GC_CON_SWEEP_PHASE = 0x02
>>> +
>>> +#define RATE_CALCULATE_DENOMINATOR_FACTOR 10; //trans us to ms
>>> +inline unsigned int trans_time_unit(int64 x)
>>> +{
>>> +  int64 result = x>>10;
>>> +  if(result) return (unsigned int)result;
>>> +  return 1;
>>> +}
>>> +
>>> +#define RAISE_ERROR  assert(0);
>>> +/* concurrent collection states in new design */
>>> +enum GC_CONCURRENT_STATUS {
>>> +  GC_CON_NIL = 0x00,
>>> +  GC_CON_STW_ENUM = 0x01,
>>> +  GC_CON_START_MARKERS = 0x02,
>>> +  GC_CON_TRACING = 0x03,
>>> +  GC_CON_TRACE_DONE = 0x04,
>>> +  GC_CON_BEFORE_SWEEP = 0x05,
>>> +  GC_CON_SWEEPING = 0x06,
>>> +  GC_CON_SWEEP_DONE = 0x07,
>>> +  GC_CON_BEFORE_FINISH = 0x08,
>>> +  GC_CON_RESET = 0x09,
>>> +  GC_CON_DISABLE = 0x0A,
>>> +};
>>> +
>>> +// this type is just for debugging and time measuring
>>> +enum GC_PARTIAL_STW_TYPE {
>>> +  GC_PARTIAL_PSTW = 0x00,  //pure stop the world
>>> +  GC_PARTIAL_PMSS = 0x01,  //concurrent marking has finished and stop the world sweeping
>>> +  GC_PARTIAL_CMSS = 0x02,  // partial concurrent marking and stop the world sweeping
>>> +  GC_PARTIAL_CMPS = 0x03,  //concurrent marking and sweeping
>>> +  GC_PARTIAL_FCSR = 0x04, //fully concurrent marking and sweeping, but stw finish reset
>>>  };
>>>
>>>  enum HANDSHAKE_SINGAL{
>>>   HSIG_MUTATOR_SAFE = 0x0,
>>> -
>>>   HSIG_DISABLE_SWEEP_LOCAL_CHUNKS  = 0x01,
>>>   HSIG_DISABLE_SWEEP_GLOBAL_CHUNKS = 0x02,
>>>   HSIG_MUTATOR_ENTER_ALLOC_MARK    = 0x03,
>>>  };
>>>
>>> +typedef struct Con_Collection_Statistics {
>>> +    POINTER_SIZE_INT live_size_marked;     //marked objects size
>>> +    POINTER_SIZE_INT alloc_size_before_alloc_live;  //alloc objects size before marking
>>> +    POINTER_SIZE_INT live_alloc_size;
>>> +    POINTER_SIZE_INT surviving_size_at_gc_end; //total live object size when gc is ended
>>> +
>>> +    POINTER_SIZE_INT trace_rate;  //bytes per ms
>>> +    POINTER_SIZE_INT alloc_rate;       //bytes per ms
>>> +
>>> +    float heap_utilization_rate;
>>> +
>>> +    int64 gc_start_time;
>>> +    int64 gc_end_time;
>>> +
>>> +    int64 marking_start_time;
>>> +    int64 marking_end_time;
>>> +
>>> +    int64 sweeping_time;
>>> +    int64 pause_start_time;
>>> +
>>> +} Con_Space_Statistics;
>>> +
>>>  inline void gc_set_con_gc(unsigned int con_phase)
>>>  { GC_PROP |= con_phase;  }
>>>
>>> @@ -58,107 +106,101 @@
>>>  inline Boolean gc_is_specify_con_sweep()
>>>  { return (GC_PROP & ALGO_CON_SWEEP) == ALGO_CON_SWEEP; }
>>>
>>> -extern volatile Boolean concurrent_in_marking;
>>> -extern volatile Boolean concurrent_in_sweeping;
>>> -extern volatile Boolean mark_is_concurrent;
>>> -extern volatile Boolean sweep_is_concurrent;
>>>
>>> -inline Boolean gc_mark_is_concurrent()
>>> -{
>>> -  return mark_is_concurrent;
>>> -}
>>> +extern volatile Boolean obj_alloced_live;
>>>
>>> -inline void gc_mark_set_concurrent()
>>> -{
>>> -  if(gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF))
>>> -    gc_enable_alloc_obj_live();
>>> -  mark_is_concurrent = TRUE;
>>> -}
>>> +inline Boolean is_obj_alloced_live()
>>> +{ return obj_alloced_live;  }
>>>
>>> -inline void gc_mark_unset_concurrent()
>>> -{
>>> -  gc_disable_alloc_obj_live();
>>> -  mark_is_concurrent = FALSE;
>>> +inline void gc_disable_alloc_obj_live(GC *gc)
>>> +{
>>> +  obj_alloced_live = FALSE;
>>>  }
>>>
>>> -inline Boolean gc_con_is_in_marking()
>>> +void gc_enable_alloc_obj_live(GC * gc);
>>> +
>>> +/*
>>> +    tranform the states across the collection process,
>>> +  which should be a atomic operation because there are several collector run parallel
>>> +*/
>>> +inline Boolean state_transformation( GC* gc, unsigned int from_state, unsigned int to_state )
>>>  {
>>> -  return concurrent_in_marking;
>>> +  unsigned int old_state = apr_atomic_cas32( &gc->gc_concurrent_status, to_state, from_state );
>>> +  if( old_state != from_state )
>>> +    return FALSE;
>>> +  else
>>> +    return TRUE;
>>>  }
>>>
>>> -inline Boolean gc_con_is_in_marking(GC* gc)
>>> -{
>>> -  return gc->gc_concurrent_status == GC_CON_MARK_PHASE;
>>> +/* set concurrent to idle,
>>> +    Or enable concurrent gc, called when STW gc finishes
>>> + */
>>> +inline void set_con_nil( GC *gc ) {
>>> +  apr_atomic_set32( &gc->gc_concurrent_status, GC_CON_NIL );
>>>  }
>>>
>>> -inline Boolean gc_sweep_is_concurrent()
>>> -{
>>> -  return sweep_is_concurrent;
>>> +
>>> +/* gc start enumeration phase, now, it is in a stop-the-world manner */
>>> +void gc_start_con_enumeration(GC * gc);
>>> +
>>> +/* gc start marking phase */
>>> +void gc_start_con_marking(GC *gc);
>>> +
>>> +
>>> +/* prepare for sweeping */
>>> +void gc_prepare_sweeping(GC *gc);
>>> +
>>> +/* gc start sweeping phase */
>>> +void gc_start_con_sweeping(GC *gc);
>>> +
>>> +/* gc finish concurrent collection */
>>> +void gc_con_final_work(GC* gc);
>>> +
>>> +
>>> +/* gc wait cocurrent collection finishes */
>>> +void gc_wait_con_finish( GC* gc );
>>> +
>>> +/* is in gc marking phase */
>>> +inline Boolean in_con_marking_phase( GC *gc ) {
>>> +  unsigned int status = gc->gc_concurrent_status;
>>> +  return (status == GC_CON_TRACING) || (status == GC_CON_TRACE_DONE);
>>>  }
>>>
>>> -inline void gc_sweep_set_concurrent()
>>> -{
>>> -  sweep_is_concurrent = TRUE;
>>> +/* is in gc sweeping phase */
>>> +inline Boolean in_con_sweeping_phase( GC *gc ) {
>>> +  unsigned int status = gc->gc_concurrent_status;
>>> +  return (status == GC_CON_SWEEPING) || (status == GC_CON_SWEEP_DONE);
>>>  }
>>>
>>> -inline void gc_sweep_unset_concurrent()
>>> -{
>>> -  sweep_is_concurrent = FALSE;
>>> +inline Boolean in_con_idle( GC *gc ) {
>>> +  return gc->gc_concurrent_status == GC_CON_NIL;
>>>  }
>>>
>>> -inline Boolean gc_con_is_in_sweeping()
>>> -{
>>> -  return concurrent_in_sweeping;
>>> +inline Boolean gc_con_is_in_STW( GC *gc ) {
>>> +  return gc->gc_concurrent_status == GC_CON_DISABLE;
>>>  }
>>>
>>> -inline Boolean gc_con_is_in_sweeping(GC* gc)
>>> -{
>>> -  return gc->gc_concurrent_status == GC_CON_SWEEP_PHASE;
>>> +/* is gc ready to sweeping */
>>> +inline Boolean in_con_ready_sweep( GC *gc ) {
>>> +  return gc->gc_concurrent_status == GC_CON_BEFORE_SWEEP;
>>>  }
>>>
>>> -inline void gc_set_concurrent_status(GC*gc, unsigned int status)
>>> -{
>>> -  /*Reset status*/
>>> -  concurrent_in_marking = FALSE;
>>> -  concurrent_in_sweeping = FALSE;
>>> -
>>> -  gc->gc_concurrent_status = status;
>>> -  switch(status){
>>> -    case GC_CON_MARK_PHASE:
>>> -      gc_mark_set_concurrent();
>>> -      concurrent_in_marking = TRUE;
>>> -      break;
>>> -    case GC_CON_SWEEP_PHASE:
>>> -      gc_sweep_set_concurrent();
>>> -      concurrent_in_sweeping = TRUE;
>>> -      break;
>>> -    default:
>>> -      assert(!concurrent_in_marking && !concurrent_in_sweeping);
>>> -  }
>>> +/* is gc sweeping */
>>> +inline Boolean in_con_sweep( GC *gc ) {
>>> +  return ( gc->gc_concurrent_status == GC_CON_SWEEPING || gc->gc_concurrent_status == GC_CON_SWEEP_DONE );
>>>
>>> -  return;
>>>  }
>>>
>>> -void gc_reset_con_mark(GC* gc);
>>> -void gc_start_con_mark(GC* gc);
>>> -void gc_finish_con_mark(GC* gc, Boolean need_STW);
>>> -int64 gc_get_con_mark_time(GC* gc);
>>> -
>>> -void gc_start_con_sweep(GC* gc);
>>> -void gc_finish_con_sweep(GC * gc);
>>> +void gc_con_update_stat_after_marking( GC *gc );
>>>
>>> -void gc_reset_after_con_collect(GC* gc);
>>> -void gc_try_finish_con_phase(GC * gc);
>>>
>>>  void gc_decide_con_algo(char* concurrent_algo);
>>>  void gc_set_default_con_algo();
>>>
>>> -void gc_reset_con_sweep(GC* gc);
>>> -
>>> -void gc_finish_con_GC(GC* gc, int64 time_mutator);
>>>
>>>  extern volatile Boolean gc_sweep_global_normal_chunk;
>>>
>>> +
>>>  inline Boolean gc_is_sweep_global_normal_chunk()
>>>  { return gc_sweep_global_normal_chunk; }
>>>
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp Tue Oct 28 20:01:01 2008
>>> @@ -18,13 +18,17 @@
>>>  /**
>>>  * @author Xiao-Feng Li, 2006/10/05
>>>  */
>>> -
>>> +
>>> +#include <open/vm_class_info.h>
>>> +#include <open/vm_class_manipulation.h>
>>>  #include "../gen/gen.h"
>>>  #include "../thread/mutator.h"
>>>  #include "gc_for_barrier.h"
>>>  #include "../mark_sweep/wspace_mark_sweep.h"
>>>  #include "../common/gc_concurrent.h"
>>> +#include "../common/gc_common.h"
>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>> +#include "../verify/verify_live_heap.h"
>>>
>>>
>>>  /* All the write barrier interfaces need cleanup */
>>> @@ -117,10 +121,8 @@
>>>     Mutator *mutator = (Mutator *)gc_get_tls();
>>>
>>>     //FIXME: Release lock.
>>> -    lock(mutator->dirty_set_lock);
>>>     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>> -    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
>>> -    unlock(mutator->dirty_set_lock);
>>> +    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
>>>   }
>>>  }
>>>
>>> @@ -204,7 +206,8 @@
>>>           mutator_dirtyset_add_entry(mutator, obj_to_snapshot);
>>>       }
>>>     }
>>> -    obj_mark_black_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>> +    obj_mark_gray_in_table((Partial_Reveal_Object *) p_obj_holding_ref);  // now, the black-only obj (no gray bit been set) will also be scaned by marker, here mark it to gray to prevent this, just a workaround
>>> +    obj_mark_black_in_table((Partial_Reveal_Object *) p_obj_holding_ref, mutator);
>>>     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>>   }
>>>  }
>>> @@ -215,32 +218,141 @@
>>>   REF* p_obj_slot = (REF*) p_slot ;
>>>   Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)read_slot(p_obj_slot);
>>>   if(p_obj && obj_need_remember_oldvar(p_obj)){
>>> +    mutator->dirty_obj_num++;
>>>     mutator_dirtyset_add_entry(mutator, p_obj);
>>>   }
>>>  }
>>>
>>> +/*
>>> +static void write_barrier_for_check(Managed_Object_Handle p_obj_holding_ref,Managed_Object_Handle *p_slot, Managed_Object_Handle p_target)
>>> +{
>>> +  //Mutator *mutator = (Mutator *)gc_get_tls();
>>> +
>>> +  Partial_Reveal_Object* src_obj = (Partial_Reveal_Object*)p_obj_holding_ref;
>>> +  Partial_Reveal_Object* sub_obj = (Partial_Reveal_Object*)read_slot((REF*) p_slot);
>>> +  Partial_Reveal_Object* target_obj = (Partial_Reveal_Object*)p_target;
>>> +
>>> +  if(src_obj && (!obj_is_mark_black_in_table(src_obj))){
>>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Src]");
>>> +     analyze_bad_obj(src_obj);
>>> +     RAISE_ERROR;
>>> +  }
>>> +
>>> +  if(sub_obj && (!obj_is_mark_black_in_table(sub_obj))){
>>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Sub]");
>>> +     analyze_bad_obj(sub_obj);
>>> +     INFO2("gc.verifier", "[source object]");
>>> +     analyze_bad_obj(src_obj);
>>> +     //RAISE_ERROR;
>>> +     return;
>>> +  }
>>> +
>>> +  if(target_obj && (!obj_is_mark_black_in_table(target_obj))){
>>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Target]");
>>> +     analyze_bad_obj(target_obj);
>>> +     RAISE_ERROR;
>>> +  }
>>> +
>>> +  *p_slot = p_target;
>>> +}
>>> +*/
>>>  //===========================================
>>>
>>>  /* The following routines were supposed to be the only way to alter any value in gc heap. */
>>>  void gc_heap_write_ref (Managed_Object_Handle p_obj_holding_ref, unsigned offset, Managed_Object_Handle p_target)
>>>  {  assert(0); }
>>>
>>> -void gc_heap_wrote_object (Managed_Object_Handle p_obj_written)
>>> +
>>> +Boolean gc_heap_copy_object_array(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length)
>>> +{
>>> +
>>> +
>>> +    GC_VTable_Info *src_gcvt = obj_get_gcvt((Partial_Reveal_Object*)src_array);
>>> +    GC_VTable_Info *dst_gcvt = obj_get_gcvt((Partial_Reveal_Object*)dst_array);
>>> +
>>> +    Class_Handle src_class = src_gcvt->gc_clss;
>>> +    Class_Handle dst_class = dst_gcvt->gc_clss;
>>> +
>>> +
>>> +       //element size of src should be same as element size of dst
>>> +       assert(src_gcvt->array_elem_size == dst_gcvt->array_elem_size);
>>> +       unsigned int elem_size = src_gcvt->array_elem_size;
>>> +       unsigned int src_first_elem_offset = array_first_element_offset((Partial_Reveal_Array*)src_array);
>>> +       unsigned int dst_first_elem_offset = array_first_element_offset((Partial_Reveal_Array*)dst_array);
>>> +       /*
>>> +       #ifdef COMPRESS_REFERENCE
>>> +          COMPRESSED_REFERENCE *src_copy_body = (COMPRESSED_REFERENCE *)((POINTER_SIZE_INT)src_array + src_first_elem_offset + elem_size*src_start);
>>> +          COMPRESSED_REFERENCE *dst_copy_body = (COMPRESSED_REFERENCE *)((POINTER_SIZE_INT)dst_array + dst_first_elem_offset + elem_size*dst_start);
>>> +       #else
>>> +       #endif
>>> +       */
>>> +          REF* src_copy_body = (REF*)((POINTER_SIZE_INT)src_array + src_first_elem_offset + elem_size*src_start);
>>> +          REF* dst_copy_body = (REF*)((POINTER_SIZE_INT)dst_array + dst_first_elem_offset + elem_size*dst_start);
>>> +
>>> +
>>> +       if(class_is_instanceof(src_class, dst_class)) {
>>> +         //rem obj before is for OTF GC barriers
>>> +         if(WB_REM_OLD_VAR == write_barrier_function) {
>>> +            for (unsigned int count = 0; count < length; count++) {
>>> +               write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>>> +            }
>>> +         } else if(WB_REM_OBJ_SNAPSHOT == write_barrier_function) {
>>> +            write_barrier_rem_obj_snapshot(dst_array);
>>> +         }
>>> +
>>> +         memmove(dst_copy_body, src_copy_body, length * elem_size);
>>> +
>>> +       } else { //for the condition src is not the type of dst
>>> +          Class_Handle dst_elem_clss = class_get_array_element_class(dst_class);
>>> +          if(WB_REM_OBJ_SNAPSHOT == write_barrier_function) {
>>> +            write_barrier_rem_obj_snapshot(dst_array);
>>> +          }
>>> +
>>> +          for (unsigned int count = 0; count < length; count++) {
>>> +             // 1, null elements copy direct
>>> +             if (src_copy_body[count] == NULL) {
>>> +                  if(WB_REM_OLD_VAR == write_barrier_function) {
>>> +                      write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>>> +                 }
>>> +                  dst_copy_body[count] = NULL;
>>> +                  continue;
>>> +               }
>>> +
>>> +             // 2, For non-null elements check if types are compatible.
>>> +/*
>>> +#ifdef COMPRESS_REFERENCE
>>> +             ManagedObject *src_elem = (ManagedObject *)uncompress_compressed_reference(src_elem_offset);
>>> +             Class_Handle src_elem_clss = src_elem->vt()->clss;
>>> +#else
>>> +#endif
>>> +*/
>>> +             Class_Handle src_elem_clss = obj_get_gcvt(ref_to_obj_ptr(src_copy_body[count]))->gc_clss;
>>> +
>>> +             if (!class_is_instanceof(src_elem_clss, dst_elem_clss)) {
>>> +                  if(WB_REM_SOURCE_OBJ == write_barrier_function) {
>>> +                      write_barrier_rem_source_obj(dst_array);
>>> +                  }
>>> +                  return FALSE;
>>> +             }
>>> +
>>> +             if(WB_REM_OLD_VAR == write_barrier_function) {
>>> +                 write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>>> +             }
>>> +              dst_copy_body[count] = src_copy_body[count];
>>> +        }
>>> +      }
>>> +
>>> +    //rem obj after is for mostly concurrent
>>> +    if(WB_REM_SOURCE_OBJ == write_barrier_function) {
>>> +        write_barrier_rem_source_obj(dst_array);
>>> +    }
>>> +
>>> +    return TRUE;
>>> +}
>>> +
>>> +
>>> +void gc_heap_wrote_object (Managed_Object_Handle p_obj_written )
>>>  {
>>> -  /*Concurrent Mark: Since object clone and array copy do not modify object slots,
>>> -      we treat it as an new object. It has already been marked when dest object was created.
>>> -      We use WB_REM_SOURCE_OBJ function here to debug.
>>> -    */
>>> -
>>> -  if(WB_REM_SOURCE_OBJ == write_barrier_function){
>>> -    Mutator *mutator = (Mutator *)gc_get_tls();
>>> -    lock(mutator->dirty_set_lock);
>>> -
>>> -    obj_dirty_in_table((Partial_Reveal_Object *) p_obj_written);
>>> -    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_written);
>>> -
>>> -    unlock(mutator->dirty_set_lock);
>>> -  }
>>>
>>>   if( !gc_is_gen_mode() || !object_has_ref_field((Partial_Reveal_Object*)p_obj_written))
>>>     return;
>>> @@ -283,6 +395,13 @@
>>>       write_barrier_rem_slot_oldvar(p_slot);
>>>       *p_slot = p_target;
>>>       break;
>>> +    //just debugging
>>> +    /*
>>> +    case WB_CON_DEBUG:
>>> +       write_barrier_for_check(p_obj_holding_ref, p_slot, p_target);
>>> +       //*p_slot = p_target;
>>> +       break;
>>> +    */
>>>     default:
>>>       assert(0);
>>>       return;
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h Tue Oct 28 20:01:01 2008
>>> @@ -32,7 +32,8 @@
>>>   WB_REM_SOURCE_REF    = 0x02,
>>>   WB_REM_OLD_VAR       = 0x03,
>>>   WB_REM_NEW_VAR       = 0x04,
>>> -  WB_REM_OBJ_SNAPSHOT  = 0x05
>>> +  WB_REM_OBJ_SNAPSHOT  = 0x05,
>>> +  WB_CON_DEBUG = 0x06
>>>  };
>>>
>>>  inline void gc_set_barrier_function(unsigned int wb_function)
>>> @@ -43,4 +44,3 @@
>>>  #endif /* _GC_FOR_BARRIER_H_ */
>>>
>>>
>>> -
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp Tue Oct 28 20:01:01 2008
>>> @@ -203,4 +203,3 @@
>>>
>>>
>>>
>>> -
>>>
>>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
>>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp?rev=708756&r1=708755&r2=708756&view=diff
>>> ==============================================================================
>>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp (original)
>>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp Tue Oct 28 20:01:01 2008
>>> @@ -30,7 +30,7 @@
>>>  #include "../mark_sweep/gc_ms.h"
>>>  #include "../move_compact/gc_mc.h"
>>>  #include "interior_pointer.h"
>>> -#include "../thread/marker.h"
>>> +#include "../thread/conclctor.h"
>>>  #include "../thread/collector.h"
>>>  #include "../verify/verify_live_heap.h"
>>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>> @@ -115,7 +115,10 @@
>>>   collection_scheduler_initialize(gc);
>>>
>>>   if(gc_is_specify_con_gc()){
>>> -    marker_initialize(gc);
>>> +     gc->gc_concurrent_status = GC_CON_NIL;
>>> +    conclctor_initialize(gc);
>>> +  } else {
>>> +     gc->gc_concurrent_status = GC_CON_DISABLE;
>>>   }
>>>
>>>   collector_initialize(gc);
>>> @@ -134,6 +137,9 @@
>>>  {
>>>   INFO2("gc.process", "GC: call GC wrapup ....");
>>>   GC* gc =  p_global_gc;
>>> +  // destruct threads first, and then destruct data structures
>>> +  conclctor_destruct(gc);
>>> +  collector_destruct(gc);
>>>
>>>  #if defined(USE_UNIQUE_MARK_SWEEP_GC)
>>>  gc_ms_destruct((GC_MS*)gc);
>>> @@ -148,8 +154,6 @@
>>>  #ifndef BUILD_IN_REFERENT
>>>   gc_finref_metadata_destruct(gc);
>>>  #endif
>>> -  collector_destruct(gc);
>>> -  marker_destruct(gc);
>>>
>>>   if( verify_live_heap ){
>>>     gc_terminate_heap_verification(gc);
>>> @@ -446,4 +450,3 @@
>>>
>>>
>>>
>>> -
>>>
>>>
>>>
>>
>>
>>
>> --
>> Unless stated otherwise above:
>> IBM United Kingdom Limited - Registered in England and Wales with number 741598.
>> Registered office: PO Box 41, North Harbour, Portsmouth, Hampshire PO6 3AU
>>
>
>
>
> --
> http://xiao-feng.blogspot.com
>



-- 
Unless stated otherwise above:
IBM United Kingdom Limited - Registered in England and Wales with number 741598.
Registered office: PO Box 41, North Harbour, Portsmouth, Hampshire PO6 3AU

Re: svn commit: r708756 [1/3] - in /harmony/enhanced/drlvm/trunk/vm: gc_gen/build/ gc_gen/src/common/ gc_gen/src/finalizer_weakref/ gc_gen/src/gen/ gc_gen/src/los/ gc_gen/src/mark_sweep/ gc_gen/src/thread/ gc_gen/src/trace_forward/ gc_gen/src/verify/

Posted by Xiao-Feng Li <xi...@gmail.com>.
Sian, thanks for your notice.

This patch is indeed big. Most of the code are guarded by a macro
USE_UNIQUE_MARK_SWEEP_GC and has no impact on the existing code base.
It is disabled by default, and I tested it before I committed it.

Actually it is not a new feature, but a fix of existing concurrent GC
scheduler. Well, I admit it looks like a new feature since it changes
lot of code...

If it has any impact on stability, I will roll back it immediately.
Thanks for your patience.

Thanks,
xiaofeng

On Wed, Oct 29, 2008 at 5:22 PM, Sian January
<si...@googlemail.com> wrote:
> Hi Xiao-Feng,
>
> This commit looks like quite a large new feature to me.  Since we're
> in feature freeze this week for M8 I really think it should be backed
> out until after the milestone, as we should be focussing on testing
> and stability at the moment.
>
> Thanks,
>
> Sian
>
>
> 2008/10/29  <xl...@apache.org>:
>> Author: xli
>> Date: Tue Oct 28 20:01:01 2008
>> New Revision: 708756
>>
>> URL: http://svn.apache.org/viewvc?rev=708756&view=rev
>> Log:
>> HARMONY-5989 : Concurrent GC (Tick) enhancement in scheduling
>>
>> Added:
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.cpp   (with props)
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.h   (with props)
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_concurrent_mark.cpp   (with props)
>> Removed:
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/marker.h
>> Modified:
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.h
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp
>>    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp
>>    harmony/enhanced/drlvm/trunk/vm/include/open/gc.h
>>    harmony/enhanced/drlvm/trunk/vm/vmcore/build/vmcore.exp
>>    harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp
>>    harmony/enhanced/drlvm/trunk/vm/vmcore/src/object/vm_arrays.cpp
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/build/gc_gen.exp Tue Oct 28 20:01:01 2008
>> @@ -34,6 +34,7 @@
>>     gc_heap_write_global_slot;
>>     gc_heap_write_ref;
>>     gc_heap_wrote_object;
>> +    gc_heap_copy_object_array;
>>     gc_init;
>>     gc_is_object_pinned;
>>     gc_iterate_heap;
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.cpp Tue Oct 28 20:01:01 2008
>> @@ -31,24 +31,16 @@
>>   return;
>>  }
>>
>> -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
>> -{
>> -  if(gc_is_specify_con_gc()){
>> -    gc_update_con_collection_scheduler(gc, time_mutator, time_collection);
>> -  }
>> -  return;
>> -}
>>
>>  Boolean gc_sched_collection(GC* gc, unsigned int gc_cause)
>>  {
>>   /*collection scheduler only schedules concurrent collection now.*/
>>   if(GC_CAUSE_CONCURRENT_GC == gc_cause){
>>     assert(gc_is_specify_con_gc());
>> -    return gc_sched_con_collection(gc, gc_cause);
>> +    return gc_con_perform_collection( gc );
>>   }else{
>>     return FALSE;
>>   }
>>  }
>>
>>
>> -
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/collection_scheduler.h Tue Oct 28 20:01:01 2008
>> @@ -26,12 +26,8 @@
>>  void collection_scheduler_initialize(GC* gc);
>>  void collection_scheduler_destruct(GC* gc);
>>
>> -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
>>  Boolean gc_sched_collection(GC* gc, unsigned int gc_cause);
>>
>>  #endif
>>
>>
>> -
>> -
>> -
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.cpp Tue Oct 28 20:01:01 2008
>> @@ -22,7 +22,7 @@
>>  #include "collection_scheduler.h"
>>  #include "concurrent_collection_scheduler.h"
>>  #include "gc_concurrent.h"
>> -#include "../thread/marker.h"
>> +#include "../thread/conclctor.h"
>>  #include "../verify/verify_live_heap.h"
>>
>>  #define NUM_TRIAL_COLLECTION 2
>> @@ -53,6 +53,7 @@
>>  Boolean gc_use_space_scheduler()
>>  { return cc_scheduler_kind & SPACE_BASED_SCHEDULER; }
>>
>> +
>>  static int64 time_delay_to_start_mark = MAX_DELAY_TIME;
>>  static POINTER_SIZE_INT space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
>>
>> @@ -75,6 +76,7 @@
>>   STD_FREE(gc->collection_scheduler);
>>  }
>>
>> +
>>  void gc_decide_cc_scheduler_kind(char* cc_scheduler)
>>  {
>>   string_to_upper(cc_scheduler);
>> @@ -93,281 +95,248 @@
>>   gc_enable_time_scheduler();
>>  }
>>
>> -static Boolean time_to_start_mark(GC* gc)
>> -{
>> -  if(!gc_use_time_scheduler()) return FALSE;
>> -
>> -  int64 time_current = time_now();
>> -  return (time_current - get_collection_end_time()) > time_delay_to_start_mark;
>> -}
>> -
>> -static Boolean space_to_start_mark(GC* gc)
>> -{
>> -  if(!gc_use_space_scheduler()) return FALSE;
>> +/*====================== new scheduler ===================*/
>> +extern unsigned int NUM_CON_MARKERS;
>> +extern unsigned int NUM_CON_SWEEPERS;
>> +unsigned int gc_get_mutator_number(GC *gc);
>> +
>> +#define MOSTLY_CON_MARKER_DIVISION 0.5
>> +unsigned int mostly_con_final_marker_num=1;
>> +unsigned int mostly_con_long_marker_num=1;
>> +
>> +unsigned int gc_get_marker_number(GC* gc) {
>> +  unsigned int mutator_num = gc_get_mutator_number(gc);
>> +  unsigned int marker_specified = NUM_CON_MARKERS;
>> +  if(marker_specified == 0) {
>> +    if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>> +       marker_specified = min(gc->num_conclctors, mutator_num>>1);
>> +       INFO2("gc.con.scheduler", "[Marker Num] mutator num="<<mutator_num<<", assign marker num="<<marker_specified);
>> +    } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>> +       marker_specified = min(gc->num_conclctors, mutator_num>>1);
>> +       mostly_con_final_marker_num = max(marker_specified, mostly_con_final_marker_num); // in the STW phase, so all the conclctor can be used
>> +       mostly_con_long_marker_num = (unsigned int)(marker_specified*MOSTLY_CON_MARKER_DIVISION);
>> +       //INFO2("gc.con.scheduler", "[Marker Num] common marker="<<marker_specified<<", final marker="<<mostly_con_final_marker_num);
>> +    }
>> +  }
>>
>> -  POINTER_SIZE_INT size_new_obj = gc_get_new_object_size(gc,FALSE);
>> -  return (size_new_obj > space_threshold_to_start_mark);
>> +  assert(marker_specified);
>> +  return marker_specified;
>>  }
>>
>> -static Boolean gc_need_start_con_mark(GC* gc)
>> -{
>> -  if(!gc_is_specify_con_mark() || gc_mark_is_concurrent()) return FALSE;
>> -
>> -  if(time_to_start_mark(gc) || space_to_start_mark(gc))
>> -    return TRUE;
>> -  else
>> -    return FALSE;
>> +#define CON_SWEEPER_DIVISION 0.8
>> +unsigned int gc_get_sweeper_numer(GC *gc) {
>> +  unsigned int sweeper_specified = NUM_CON_SWEEPERS;
>> +  if(sweeper_specified == 0)
>> +    sweeper_specified = (unsigned int)(gc->num_conclctors*CON_SWEEPER_DIVISION);
>> +  //INFO2("gc.con.scheduler", "[Sweeper Num] assign sweeper num="<<sweeper_specified);
>> +  assert(sweeper_specified);
>> +  return sweeper_specified;
>>  }
>>
>> -static Boolean gc_need_start_con_sweep(GC* gc)
>> -{
>> -  if(!gc_is_specify_con_sweep() || gc_sweep_is_concurrent()) return FALSE;
>>
>> -  /*if mark is concurrent and STW GC has not started, we should start concurrent sweep*/
>> -  if(gc_mark_is_concurrent() && !gc_con_is_in_marking(gc))
>> -    return TRUE;
>> -  else
>> -    return FALSE;
>> -}
>>
>> -static Boolean gc_need_reset_after_con_collect(GC* gc)
>> -{
>> -  if(gc_sweep_is_concurrent() && !gc_con_is_in_sweeping(gc))
>> -    return TRUE;
>> -  else
>> -    return FALSE;
>> -}
>>
>> -static Boolean gc_need_start_con_enum(GC* gc)
>> -{
>> -  /*TODO: support on-the-fly root set enumeration.*/
>> -  return FALSE;
>> -}
>> +#define DEFAULT_CONSERCATIVE_FACTOR (1.0f)
>> +#define CONSERCATIVE_FACTOR_FULLY_CONCURRENT (0.95f)
>> +static float conservative_factor = DEFAULT_CONSERCATIVE_FACTOR;
>>
>> -#define SPACE_UTIL_RATIO_CORRETION 0.2f
>> -#define TIME_CORRECTION_OTF_MARK 0.65f
>> -#define TIME_CORRECTION_OTF_MARK_SWEEP 1.0f
>> -#define TIME_CORRECTION_MOSTLY_MARK 0.5f
>> -
>> -static void con_collection_scheduler_update_stat(GC* gc, int64 time_mutator, int64 time_collection)
>> -{
>> -  Space* space = NULL;
>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>> -
>> -#ifdef USE_UNIQUE_MARK_SWEEP_GC
>> -  space = (Space*) gc_get_wspace(gc);
>> -#endif
>> -  if(!space) return;
>> +/* for checking heap effcient*/
>> +#define SMALL_DELTA 1000 //minimal check frequency is about delta us
>> +#define SPACE_CHECK_STAGE_TWO_TIME (SMALL_DELTA<<6)
>> +#define SPACE_CHECK_STAGE_ONE_TIME (SMALL_DELTA<<12)
>>
>> -  Space_Statistics* space_stat = space->space_statistic;
>> -
>> -  unsigned int slot_index = cc_scheduler->last_window_index;
>> -  unsigned int num_slot   = cc_scheduler->num_window_slots;
>> -
>> -  cc_scheduler->trace_load_window[slot_index] = space_stat->num_live_obj;
>> -  cc_scheduler->alloc_load_window[slot_index] = space_stat->size_new_obj;
>> -  cc_scheduler->space_utilization_ratio[slot_index] = space_stat->space_utilization_ratio;
>> +#define DEFAULT_ALLOC_RATE (1<<19) //500k/ms
>> +#define DEFAULT_MARKING_TIME (1<<9) //512 ms
>>
>> -  cc_scheduler->last_mutator_time = time_mutator;
>> -  cc_scheduler->last_collector_time = time_collection;
>> -
>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
>> -    return;
>> -
>> -  cc_scheduler->alloc_rate_window[slot_index]
>> -    = time_mutator == 0 ? 0 : (float)cc_scheduler->alloc_load_window[slot_index] / time_mutator;
>> +static int64 last_check_time_point = time_now();
>> +static int64 check_delay_time = time_now(); //  initial value is just for modifying
>>
>> -  if(gc_mark_is_concurrent()){
>> -    cc_scheduler->trace_rate_window[slot_index]
>> -      = time_collection == 0 ? MAX_TRACING_RATE : (float)cc_scheduler->trace_load_window[slot_index] / time_collection;
>> -  }else{
>> -    cc_scheduler->trace_rate_window[slot_index] = MIN_TRACING_RATE;
>> -  }
>> -
>> -  cc_scheduler->num_window_slots = num_slot >= STAT_SAMPLE_WINDOW_SIZE ? num_slot : (++num_slot);
>> -  cc_scheduler->last_window_index = (++slot_index)% STAT_SAMPLE_WINDOW_SIZE;
>> +//just debugging
>> +int64 get_last_check_point()
>> +{
>> +   return last_check_time_point;
>>  }
>>
>> -static void con_collection_scheduler_update_start_point(GC* gc, int64 time_mutator, int64 time_collection)
>> -{
>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
>> -    return;
>> +static unsigned int alloc_space_threshold = 0;
>>
>> -  Space* space = NULL;
>> -#ifdef USE_UNIQUE_MARK_SWEEP_GC
>> -  space = (Space*) gc_get_wspace(gc);
>> -#endif
>> -  if(!space) return;
>> -
>> -  Space_Statistics* space_stat = space->space_statistic;
>> -
>> -  float sum_alloc_rate = 0;
>> -  float sum_trace_rate = 0;
>> -  float sum_space_util_ratio = 0;
>> +static unsigned int space_check_stage_1; //SPACE_CHECK_EXPECTED_START_TIME
>> +static unsigned int space_check_stage_2; //BIG_DELTA
>>
>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>> +static unsigned int calculate_start_con_space_threshold(Con_Collection_Statistics *con_collection_stat, unsigned int heap_size)
>> +{
>>
>> -  int64 time_this_collection_correction = 0;
>> -#if 0
>> -  float space_util_ratio = space_stat->space_utilization_ratio;
>> -  if(space_util_ratio > (1-SPACE_UTIL_RATIO_CORRETION)){
>> -    time_this_collection_correction = 0;
>> -  }else{
>> -    time_this_collection_correction
>> -      = (int64)(((1 - space_util_ratio - SPACE_UTIL_RATIO_CORRETION)/(space_util_ratio))* time_mutator);
>> -  }
>> -#endif
>> -
>> -  unsigned int i;
>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>> -    sum_alloc_rate += cc_scheduler->alloc_rate_window[i];
>> -    sum_trace_rate += cc_scheduler->trace_rate_window[i];
>> -    sum_space_util_ratio += cc_scheduler->space_utilization_ratio[i];
>> -  }
>> -
>> -  TRACE2("gc.con.cs","Allocation Rate: ");
>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>> -    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->alloc_rate_window[i]);
>> -  }
>> -
>> -  TRACE2("gc.con.cs","Tracing Rate: ");
>> -  for(i = 0; i < cc_scheduler->num_window_slots; i++){
>> -    TRACE2("gc.con.cs",i+1<<"--"<<cc_scheduler->trace_rate_window[i]);
>> -  }
>> -
>> -  float average_alloc_rate = sum_alloc_rate / cc_scheduler->num_window_slots;
>> -  float average_trace_rate = sum_trace_rate / cc_scheduler->num_window_slots;
>> -  float average_space_util_ratio = sum_space_util_ratio / cc_scheduler->num_window_slots;
>> -
>> -  TRACE2("gc.con.cs","averAllocRate: "<<average_alloc_rate<<"averTraceRate: "<<average_trace_rate<<"  average_space_util_ratio: "<<average_space_util_ratio<<" ");
>> -
>> -  if(average_alloc_rate == 0 ){
>> -    time_delay_to_start_mark = MIN_DELAY_TIME;
>> -    space_threshold_to_start_mark = MIN_SPACE_THRESHOLD;
>> -  }else if(average_trace_rate == 0){
>> -    time_delay_to_start_mark = MAX_DELAY_TIME;
>> -    space_threshold_to_start_mark = MAX_SPACE_THRESHOLD;
>> -  }else{
>> -    float time_alloc_expected = (space_stat->size_free_space * average_space_util_ratio) / average_alloc_rate;
>> -    float time_trace_expected = space_stat->num_live_obj / average_trace_rate;
>> -    TRACE2("gc.con.cs","[GC][Con] expected alloc time "<<time_alloc_expected<<"  expected collect time  "<<time_trace_expected<<" ");
>> -
>> -    if(time_alloc_expected > time_trace_expected){
>> -      if(gc_is_kind(ALGO_CON_OTF_OBJ)||gc_is_kind(ALGO_CON_OTF_REF)){
>> -        float time_correction = gc_sweep_is_concurrent()? TIME_CORRECTION_OTF_MARK_SWEEP : TIME_CORRECTION_OTF_MARK;
>> -        cc_scheduler->time_delay_to_start_mark = (int64)((time_alloc_expected - time_trace_expected)*time_correction);
>> -      }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>> -        cc_scheduler->time_delay_to_start_mark = (int64)(time_mutator* TIME_CORRECTION_MOSTLY_MARK);
>> -      }
>> -    }else{
>> -      cc_scheduler->time_delay_to_start_mark = MIN_DELAY_TIME;
>> +  float util_rate = con_collection_stat->heap_utilization_rate;
>> +  unsigned int space_threshold = 0;
>> +  if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>> +    if( con_collection_stat->trace_rate == 0 )  //for initial iteration
>> +         con_collection_stat->trace_rate = con_collection_stat->alloc_rate*20;
>> +    unsigned int alloc_rate = con_collection_stat->alloc_rate;
>> +    if(alloc_rate<con_collection_stat->trace_rate) {       //  THRESHOLD = Heap*utilization_rate*(1-alloc_rate/marking_rate), accurate formaler
>> +      float alloc_marking_rate_ratio = (float)(alloc_rate)/con_collection_stat->trace_rate;
>> +
>> +      space_threshold = (unsigned int)(heap_size*util_rate*(1-alloc_marking_rate_ratio)*conservative_factor);
>> +    } else {  //use default
>> +       unsigned int alloc_while_marking = DEFAULT_MARKING_TIME*con_collection_stat->alloc_rate;
>> +       space_threshold = (unsigned int)(heap_size*util_rate) -alloc_while_marking;
>>     }
>> -
>> -    cc_scheduler->space_threshold_to_start_mark =
>> -      (POINTER_SIZE_INT)(space_stat->size_free_space * ((time_alloc_expected - time_trace_expected) / time_alloc_expected));
>> -
>> -    time_delay_to_start_mark = cc_scheduler->time_delay_to_start_mark + time_this_collection_correction;
>> -    space_threshold_to_start_mark = cc_scheduler->space_threshold_to_start_mark;
>> +  } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>> +    unsigned int alloc_while_marking = DEFAULT_MARKING_TIME*con_collection_stat->alloc_rate;
>> +    space_threshold = (unsigned int)(heap_size*util_rate) -alloc_while_marking;
>>   }
>> -  TRACE2("gc.con.cs","[GC][Con] concurrent marking will delay "<<(unsigned int)(time_delay_to_start_mark>>10)<<" ms ");
>> -  TRACE2("gc.con.cs","[GC][Con] time correction "<<(unsigned int)(time_this_collection_correction>>10)<<" ms ");
>>
>> +  if( space_threshold > con_collection_stat->surviving_size_at_gc_end )
>> +    alloc_space_threshold = space_threshold - con_collection_stat->surviving_size_at_gc_end;
>> +  else
>> +    alloc_space_threshold = MIN_SPACE_THRESHOLD;
>> +
>> +  //INFO2("gc.con.info", "[Threshold] alloc_space_threshold=" << alloc_space_threshold);
>> +  return space_threshold;
>>  }
>>
>> -void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection)
>> -{
>> -  assert(gc_is_specify_con_gc());
>> -  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) return;
>> -
>> -  con_collection_scheduler_update_stat(gc, time_mutator, time_collection);
>> -  con_collection_scheduler_update_start_point(gc, time_mutator, time_collection);
>> -
>> -  return;
>> -}
>> -
>> -Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause)
>> +/* this parameters are updated at end of GC */
>> +void gc_update_scheduler_parameter( GC *gc )
>>  {
>> -  if(!try_lock(gc->lock_collect_sched)) return FALSE;
>> -  vm_gc_lock_enum();
>> -
>> -  gc_try_finish_con_phase(gc);
>> -
>> -  if(gc_need_start_con_enum(gc)){
>> -    /*TODO:Concurrent rootset enumeration.*/
>> -    assert(0);
>> -  }
>> -
>> -  if(gc_need_start_con_mark(gc)){
>> -    INFO2("gc.con.info", "[GC][Con] concurrent mark start ...");
>> -    gc_start_con_mark(gc);
>> -    vm_gc_unlock_enum();
>> -    unlock(gc->lock_collect_sched);
>> -    return TRUE;
>> -  }
>> -
>> -  if(gc_need_start_con_sweep(gc)){
>> -    gc->num_collections++;
>> -    INFO2("gc.con.info", "[GC][Con] collection number:"<< gc->num_collections<<" ");
>> -    gc_start_con_sweep(gc);
>> -    vm_gc_unlock_enum();
>> -    unlock(gc->lock_collect_sched);
>> -    return TRUE;
>> -  }
>> -
>> -  if(gc_need_reset_after_con_collect(gc)){
>> -    int64 pause_start = time_now();
>> -    int disable_count = vm_suspend_all_threads();
>> -    gc_reset_after_con_collect(gc);
>> -    gc_start_mutator_time_measure(gc);
>> -    set_collection_end_time();
>> -    vm_resume_all_threads(disable_count);
>> -    vm_gc_unlock_enum();
>> -    INFO2("gc.con.time","[GC][Con]pause(reset collection):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
>> -    unlock(gc->lock_collect_sched);
>> -    return TRUE;
>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +   last_check_time_point = time_now();
>> +
>> +   unsigned int alloc_rate = con_collection_stat->alloc_rate;
>> +   space_check_stage_1 = alloc_rate * trans_time_unit(SPACE_CHECK_STAGE_ONE_TIME);
>> +   space_check_stage_2 = alloc_rate * trans_time_unit(SPACE_CHECK_STAGE_TWO_TIME);
>> +   //INFO2( "gc.con.scheduler", "space_check_stage_1=["<<space_check_stage_1<<"], space_check_stage_2=["<<space_check_stage_2<<"]" );
>> +
>> +   check_delay_time = (con_collection_stat->gc_start_time - con_collection_stat->gc_end_time)>>2;
>> +   //INFO2("gc.con.scheduler", "next check time = [" << trans_time_unit(check_delay_time) << "] ms" );
>> +   if(gc_is_specify_con_sweep()) {
>> +         conservative_factor = CONSERCATIVE_FACTOR_FULLY_CONCURRENT;
>> +   }
>> +   calculate_start_con_space_threshold(con_collection_stat, gc->committed_heap_size);
>> +}
>> +
>> +void gc_force_update_scheduler_parameter( GC *gc )
>> +{
>> +    last_check_time_point = time_now();
>> +    //check_delay_time = SPACE_CHECK_STAGE_ONE_TIME;
>> +    check_delay_time = time_now();
>> +    //INFO2("gc.con.scheduler", "next check time = [" << trans_time_unit(check_delay_time) << "] ms" );
>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +    con_collection_stat->alloc_rate = DEFAULT_ALLOC_RATE;
>> +}
>> +
>> +
>> +
>> +static inline Boolean check_start_mark( GC *gc )
>> +{
>> +   unsigned int new_object_occupied_size = gc_get_mutator_new_obj_size(gc);
>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +   /*just debugging*/
>> +   float used_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_object_occupied_size)/gc->committed_heap_size;
>> +   if( alloc_space_threshold < new_object_occupied_size ) {
>> +       INFO2( "gc.con.info", "[Start Con] check has been delayed " << check_delay_time << " us, until ratio at start point="<<used_rate );
>> +       return TRUE;
>> +   }
>> +
>> +   unsigned int free_space = alloc_space_threshold - new_object_occupied_size;
>> +     //INFO2("gc.con.info", "[GC Scheduler debug] alloc_space_threshold="<<alloc_space_threshold<<", new_object_occupied_size"<<new_object_occupied_size);
>> +   int64 last_check_delay = check_delay_time;
>> +
>> +   if( free_space < space_check_stage_2 ) {
>> +       check_delay_time = SMALL_DELTA;
>> +   } else if( free_space < space_check_stage_1 ) {
>> +       if(check_delay_time>SPACE_CHECK_STAGE_TWO_TIME ) { //if time interval is too small, the alloc rate will not be updated
>> +           unsigned int interval_time = trans_time_unit(time_now() - con_collection_stat->gc_end_time);
>> +           unsigned int interval_space = new_object_occupied_size;
>> +           con_collection_stat->alloc_rate = interval_space/interval_time;
>> +       }
>> +       check_delay_time = ((alloc_space_threshold - new_object_occupied_size)/con_collection_stat->alloc_rate)<<9;
>> +   }
>> +   last_check_time_point = time_now();
>> +
>> +   //INFO2("gc.con.info", "[GC Scheduler] check has been delayed=" << last_check_delay << " us, used_rate=" << used_rate << ", free_space=" << free_space << " bytes, next delay=" << check_delay_time << " us" );
>> +   return FALSE;
>> +}
>> +
>> +static SpinLock check_lock;
>> +static inline Boolean space_should_start_mark( GC *gc)
>> +{
>> +  if( ( time_now() -last_check_time_point ) > check_delay_time && try_lock(check_lock) ) { //first condition is checked frequently, second condition is for synchronization
>> +      Boolean should_start = check_start_mark(gc);
>> +      unlock(check_lock);
>> +      return should_start;
>>   }
>> -  vm_gc_unlock_enum();
>> -  unlock(gc->lock_collect_sched);
>>   return FALSE;
>>  }
>>
>> -extern unsigned int NUM_MARKERS;
>> -
>> -unsigned int gc_decide_marker_number(GC* gc)
>> -{
>> -  unsigned int num_active_marker;
>> -  Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler;
>> +inline static Boolean gc_con_start_condition( GC* gc ) {
>> +   return space_should_start_mark(gc);
>> +}
>>
>> -  /*If the number of markers is specfied, just return the specified value.*/
>> -  if(NUM_MARKERS != 0) return NUM_MARKERS;
>>
>> -  /*If the number of markers isn't specified, we decide the value dynamically.*/
>> -  if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION){
>> -    /*Start trial cycle, collection set to 1 in trial cycle and */
>> -    num_active_marker = 1;
>> -  }else{
>> -    num_active_marker = cc_scheduler->last_marker_num;
>> -    int64 c_time = cc_scheduler->last_collector_time;
>> -    int64 m_time = cc_scheduler->last_mutator_time;
>> -    int64 d_time = cc_scheduler->time_delay_to_start_mark;
>> -
>> -    if(num_active_marker == 0) num_active_marker = 1;
>> -
>> -    if((c_time + d_time) > m_time || (float)d_time < (m_time * 0.25)){
>> -      TRACE2("gc.con.cs","[GC][Con] increase marker number.");
>> -      num_active_marker ++;
>> -      if(num_active_marker > gc->num_markers) num_active_marker = gc->num_markers;
>> -    }else if((float)d_time > (m_time * 0.6)){
>> -      TRACE2("gc.con.cs","[GC][Con] decrease marker number.");
>> -      num_active_marker --;
>> -      if(num_active_marker == 0)  num_active_marker = 1;
>> -    }
>> -
>> -    TRACE2("gc.con.cs","[GC][Con] ctime  "<<(unsigned)(c_time>>10)<<"  mtime  "<<(unsigned)(m_time>>10)<<"  dtime  "<<(unsigned)(d_time>>10));
>> -    TRACE2("gc.con.cs","[GC][Con] marker num : "<<num_active_marker<<" ");
>> +void gc_reset_after_con_collection(GC *gc);
>> +void gc_merge_free_list_global(GC *gc);
>> +void gc_con_stat_information_out(GC *gc);
>> +
>> +unsigned int sub_time = 0;
>> +int64 pause_time = 0;
>> +/*
>> +   concurrent collection entry function, it may start proper phase according to the current state.
>> +*/
>> +Boolean gc_con_perform_collection( GC* gc ) {
>> +  int disable_count;
>> +  int64 pause_start;
>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +  switch( gc->gc_concurrent_status ) {
>> +    case GC_CON_NIL :
>> +      if( !gc_con_start_condition(gc) )
>> +        return FALSE;
>> +      if( !state_transformation( gc, GC_CON_NIL, GC_CON_STW_ENUM ) )
>> +        return FALSE;
>> +
>> +      gc->num_collections++;
>> +      gc->cause = GC_CAUSE_CONCURRENT_GC;
>> +
>> +      con_collection_stat->gc_start_time = time_now();
>> +      disable_count = hythread_reset_suspend_disable();
>> +
>> +      gc_start_con_enumeration(gc); //now, it is a stw enumeration
>> +      con_collection_stat->marking_start_time = time_now();
>> +      state_transformation( gc, GC_CON_STW_ENUM, GC_CON_START_MARKERS );
>> +      gc_start_con_marking(gc);
>> +
>> +      INFO2("gc.con.time","[ER] start con pause, ERSM="<<((unsigned int)(time_now()-con_collection_stat->gc_start_time))<<"  us "); // ERSM means enumerate rootset and start concurrent marking
>> +      vm_resume_threads_after();
>> +      hythread_set_suspend_disable(disable_count);
>> +      break;
>> +
>> +    case GC_CON_BEFORE_SWEEP :
>> +      if(!gc_is_specify_con_sweep())
>> +         return FALSE;
>> +      if( !state_transformation( gc, GC_CON_BEFORE_SWEEP, GC_CON_SWEEPING ) )
>> +         return FALSE;
>> +      gc_ms_start_con_sweep((GC_MS*)gc, gc_get_sweeper_numer(gc));
>> +      break;
>> +
>> +
>> +    case GC_CON_BEFORE_FINISH :
>> +        if( !state_transformation( gc, GC_CON_BEFORE_FINISH, GC_CON_RESET ) )
>> +                 return FALSE;
>> +        /* thread should be suspended before the state transformation,
>> +            it is for the case that the heap is exhausted in the reset state, although it is almost impossible */
>> +        disable_count = vm_suspend_all_threads();
>> +        pause_start = time_now();
>> +
>> +        gc_merge_free_list_global(gc);
>> +        gc_reset_after_con_collection(gc);
>> +        state_transformation( gc, GC_CON_RESET, GC_CON_NIL );
>> +        pause_time = time_now()-pause_start;
>> +
>> +        vm_resume_all_threads(disable_count);
>> +        gc_con_stat_information_out(gc);
>> +        INFO2("gc.con.time","[GC][Con]pause(reset collection):  CRST="<<pause_time<<"  us\n\n"); // CRST means concurrent reset
>> +        break;
>> +    default :
>> +      return FALSE;
>>   }
>> -
>> -  cc_scheduler->last_marker_num = num_active_marker;
>> -  return num_active_marker;
>> +  return TRUE;
>>  }
>>
>> +
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/concurrent_collection_scheduler.h Tue Oct 28 20:01:01 2008
>> @@ -20,6 +20,7 @@
>>
>>  #define STAT_SAMPLE_WINDOW_SIZE 5
>>
>> +struct GC_MS;
>>  typedef struct Con_Collection_Scheduler {
>>   /*common field*/
>>   GC* gc;
>> @@ -46,10 +47,17 @@
>>  void con_collection_scheduler_initialize(GC* gc);
>>  void con_collection_scheduler_destruct(GC* gc);
>>
>> +void gc_update_scheduler_parameter( GC *gc );
>> +void gc_force_update_scheduler_parameter( GC *gc );
>> +Boolean gc_con_perform_collection( GC* gc );
>>  Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause);
>> -void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection);
>>
>>  void gc_decide_cc_scheduler_kind(char* cc_scheduler);
>>  void gc_set_default_cc_scheduler_kind();
>> +
>> +extern unsigned int mostly_con_final_marker_num;
>> +extern unsigned int mostly_con_long_marker_num;
>> +
>>  #endif
>>
>> +
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Tue Oct 28 20:01:01 2008
>> @@ -22,7 +22,7 @@
>>  #include "gc_common.h"
>>  #include "gc_metadata.h"
>>  #include "../thread/mutator.h"
>> -#include "../thread/marker.h"
>> +#include "../thread/conclctor.h"
>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>  #include "../gen/gen.h"
>>  #include "../mark_sweep/gc_ms.h"
>> @@ -74,11 +74,19 @@
>>  static int64 collection_start_time = time_now();
>>  static int64 collection_end_time = time_now();
>>
>> -int64 get_collection_end_time()
>> +int64 get_gc_start_time()
>> +{ return collection_start_time; }
>> +
>> +void set_gc_start_time()
>> +{ collection_start_time = time_now(); }
>> +
>> +int64 get_gc_end_time()
>>  { return collection_end_time; }
>>
>> -void set_collection_end_time()
>> -{ collection_end_time = time_now(); }
>> +void set_gc_end_time()
>> +{
>> +  collection_end_time = time_now();
>> +}
>>
>>  void gc_decide_collection_kind(GC* gc, unsigned int cause)
>>  {
>> @@ -93,17 +101,17 @@
>>
>>  }
>>
>> -void gc_update_space_stat(GC_MS* gc)
>> +void gc_update_space_stat(GC* gc)
>>  {
>>  #ifdef USE_UNIQUE_MARK_SWEEP_GC
>> -    gc_ms_update_space_stat((GC_MS*)gc);
>> +      gc_ms_update_space_stat((GC_MS *)gc);
>>  #endif
>>  }
>>
>> -void gc_reset_space_stat(GC_MS* gc)
>> +void gc_reset_space_stat(GC* gc)
>>  {
>>  #ifdef USE_UNIQUE_MARK_SWEEP_GC
>> -    gc_ms_reset_space_stat((GC_MS*)gc);
>> +      gc_ms_reset_space_stat((GC_MS *)gc);
>>  #endif
>>  }
>>
>> @@ -118,7 +126,7 @@
>>   gc_set_rootset(gc);
>>  }
>>
>> -void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection)
>> +void gc_reset_after_collection(GC* gc)
>>  {
>>   if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
>>
>> @@ -139,11 +147,9 @@
>>  #endif
>>   }
>>
>> -  gc_update_space_stat((GC_MS*)gc);
>> +  gc_update_space_stat(gc);
>>
>> -  gc_update_collection_scheduler(gc, time_mutator, time_collection);
>> -
>> -  gc_reset_space_stat((GC_MS*)gc);
>> +  gc_reset_space_stat(gc);
>>
>>   gc_reset_collector_state(gc);
>>
>> @@ -154,23 +160,25 @@
>>
>>  }
>>
>> +void set_check_delay( int64 mutator_time );
>> +
>>  void gc_reclaim_heap(GC* gc, unsigned int gc_cause)
>>  {
>>   INFO2("gc.process", "\nGC: GC start ...\n");
>>
>> -  collection_start_time = time_now();
>> -  int64 time_mutator = collection_start_time - collection_end_time;
>> -
>> -  gc->num_collections++;
>>   gc->cause = gc_cause;
>>
>>   if(gc_is_specify_con_gc()){
>> -    gc_finish_con_GC(gc, time_mutator);
>> -    collection_end_time = time_now();
>> +    gc_wait_con_finish(gc);
>>     INFO2("gc.process", "GC: GC end\n");
>>     return;
>>   }
>>
>> +   set_gc_start_time();
>> +  int64 time_mutator = get_gc_start_time() - get_gc_end_time();
>> +
>> +  gc->num_collections++;
>> +
>>   /* FIXME:: before mutators suspended, the ops below should be very careful
>>      to avoid racing with mutators. */
>>
>> @@ -207,16 +215,16 @@
>>   gc_gen_reclaim_heap((GC_Gen*)gc, collection_start_time);
>>  #endif
>>
>> -  collection_end_time = time_now();
>> +  set_gc_end_time();
>>
>> -  int64 time_collection = collection_end_time - collection_start_time;
>> +  int64 time_collection = get_gc_end_time() - get_gc_start_time();
>>
>>  #if !defined(USE_UNIQUE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC)
>>   gc_gen_collection_verbose_info((GC_Gen*)gc, time_collection, time_mutator);
>>   gc_gen_space_verbose_info((GC_Gen*)gc);
>>  #endif
>>
>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>> +  gc_reset_after_collection(gc);
>>
>>   gc_assign_free_area_to_mutators(gc);
>>
>> @@ -230,6 +238,3 @@
>>
>>
>>
>> -
>> -
>> -
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Tue Oct 28 20:01:01 2008
>> @@ -39,7 +39,8 @@
>>
>>  #include "../common/gc_for_barrier.h"
>>
>> -/*
>> +
>> + /*
>>  #define USE_UNIQUE_MARK_SWEEP_GC  //define it to only use Mark-Sweep GC (no NOS, no LOS).
>>  #define USE_UNIQUE_MOVE_COMPACT_GC //define it to only use Move-Compact GC (no NOS, no LOS).
>>  */
>> @@ -336,19 +337,7 @@
>>   return TRUE;
>>  }
>>
>> -extern volatile Boolean obj_alloced_live;
>> -inline Boolean is_obj_alloced_live()
>> -{ return obj_alloced_live;  }
>>
>> -inline void gc_enable_alloc_obj_live()
>> -{
>> -  obj_alloced_live = TRUE;
>> -}
>> -
>> -inline void gc_disable_alloc_obj_live()
>> -{
>> -  obj_alloced_live = FALSE;
>> -}
>>
>>  /***************************************************************/
>>
>> @@ -391,7 +380,7 @@
>>  /***************************************************************/
>>
>>  /* all GCs inherit this GC structure */
>> -struct Marker;
>> +struct Conclctor;
>>  struct Mutator;
>>  struct Collector;
>>  struct GC_Metadata;
>> @@ -421,9 +410,12 @@
>>   unsigned int num_collectors;
>>   unsigned int num_active_collectors; /* not all collectors are working */
>>
>> -  Marker** markers;
>> -  unsigned int num_markers;
>> +  /*concurrent markers and collectors*/
>> +  Conclctor** conclctors;
>> +  unsigned int num_conclctors;
>> +  //unsigned int num_active_conclctors;
>>   unsigned int num_active_markers;
>> +  unsigned int num_active_sweepers;
>>
>>   /* metadata is the pool for rootset, tracestack, etc. */
>>   GC_Metadata* metadata;
>> @@ -443,7 +435,7 @@
>>
>>   Space_Tuner* tuner;
>>
>> -  unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
>> +  volatile unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/
>>   Collection_Scheduler* collection_scheduler;
>>
>>   SpinLock lock_con_mark;
>> @@ -488,11 +480,15 @@
>>
>>  GC* gc_parse_options();
>>  void gc_reclaim_heap(GC* gc, unsigned int gc_cause);
>> +void gc_relaim_heap_con_mode( GC *gc);
>>  void gc_prepare_rootset(GC* gc);
>>
>>
>> -int64 get_collection_end_time();
>> -void set_collection_end_time();
>> +int64 get_gc_start_time();
>> +void set_gc_start_time();
>> +
>> +int64 get_gc_end_time();
>> +void set_gc_end_time();
>>
>>  /* generational GC related */
>>
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.cpp Tue Oct 28 20:01:01 2008
>> @@ -17,325 +17,582 @@
>>  #include "gc_common.h"
>>  #include "gc_metadata.h"
>>  #include "../thread/mutator.h"
>> -#include "../thread/marker.h"
>> +#include "../thread/conclctor.h"
>>  #include "../thread/collector.h"
>>  #include "../finalizer_weakref/finalizer_weakref.h"
>>  #include "../gen/gen.h"
>>  #include "../mark_sweep/gc_ms.h"
>> +#include "../mark_sweep/wspace_mark_sweep.h"
>>  #include "interior_pointer.h"
>>  #include "collection_scheduler.h"
>>  #include "gc_concurrent.h"
>>  #include "../common/gc_for_barrier.h"
>> +#include "concurrent_collection_scheduler.h"
>> +#include "../verify/verify_live_heap.h"
>>
>> -volatile Boolean concurrent_in_marking  = FALSE;
>> -volatile Boolean concurrent_in_sweeping = FALSE;
>> -volatile Boolean mark_is_concurrent     = FALSE;
>> -volatile Boolean sweep_is_concurrent    = FALSE;
>> +struct Con_Collection_Statistics;
>>
>>  volatile Boolean gc_sweep_global_normal_chunk = FALSE;
>>
>> -static void gc_check_con_mark(GC* gc)
>> +//just debugging
>> +inline void gc_ms_get_current_heap_usage(GC_MS *gc)
>>  {
>> -  if(!is_mark_finished(gc)){
>> -    lock(gc->lock_con_mark);
>> -    if(gc_is_kind(ALGO_CON_OTF_OBJ)){
>> -      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>> -    }else if(gc_is_kind(ALGO_CON_OTF_REF)){
>> -      gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>> -    }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>> -      //ignore.
>> -    }
>> -    unlock(gc->lock_con_mark);
>> -  }
>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat(gc);
>> +  unsigned int new_obj_size = gc_get_mutator_new_obj_size((GC *)gc);
>> +  unsigned int current_size = con_collection_stat->surviving_size_at_gc_end + new_obj_size;
>> +  INFO2("gc.con.scheduler", "[Heap Usage]surviving_size("<<con_collection_stat->surviving_size_at_gc_end<<")+new_obj_size("<<new_obj_size << ")="<<current_size<<" bytes");
>> +  INFO2("gc.con.scheduler", "[Heap Usage]usage rate ("<< (float)current_size/gc->committed_heap_size<<")");
>>  }
>>
>> -static void gc_wait_con_mark_finish(GC* gc)
>> +void gc_con_update_stat_before_enable_alloc_live(GC *gc)
>>  {
>> -  wait_mark_finish(gc);
>> -  gc_set_barrier_function(WB_REM_NIL);
>> -  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
>> +  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS *)gc);
>> +  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
>>  }
>> +
>> +volatile Boolean obj_alloced_live;
>>
>> -unsigned int gc_decide_marker_number(GC* gc);
>> +void gc_enable_alloc_obj_live(GC *gc)
>> +{
>> +  gc_con_update_stat_before_enable_alloc_live(gc);
>> +  obj_alloced_live = TRUE;
>> +}
>>
>> -void gc_start_con_mark(GC* gc)
>> +void gc_mostly_con_update_stat_after_final_marking(GC *gc)
>>  {
>> -  int disable_count;
>> -  unsigned int num_marker;
>> -
>> -  if(!try_lock(gc->lock_con_mark) || gc_mark_is_concurrent()) return;
>> -
>> -  lock(gc->lock_enum);
>> -  disable_count = hythread_reset_suspend_disable();
>> -  int64 pause_start = time_now();
>> -  gc_set_rootset_type(ROOTSET_IS_OBJ);
>> -  gc_prepare_rootset(gc);
>> -
>> -  gc_set_concurrent_status(gc, GC_CON_MARK_PHASE);
>> +  POINTER_SIZE_INT num_live_obj = 0;
>> +  POINTER_SIZE_INT size_live_obj = 0;
>> +  POINTER_SIZE_INT num_dirty_obj_traced = 0;
>>
>> -  num_marker = gc_decide_marker_number(gc);
>> -
>> -  /*start concurrent mark*/
>> -  if(gc_is_kind(ALGO_CON_OTF_OBJ)){
>> -    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
>> -    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>> -  }else if(gc_is_kind(ALGO_CON_MOSTLY)){
>> -    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
>> -    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
>> -  }else if(gc_is_kind(ALGO_CON_OTF_REF)){
>> -    gc_set_barrier_function(WB_REM_OLD_VAR);
>> -    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>> +  unsigned int num_conclctors = gc->num_conclctors;
>> +  for( unsigned int i=0; i<num_conclctors; i++ ) {
>> +    Conclctor* conclctor = gc->conclctors[i];
>> +    if( conclctor->role != CONCLCTOR_ROLE_MARKER )
>> +      continue;
>> +    num_live_obj += conclctor->live_obj_num;
>> +    size_live_obj += conclctor->live_obj_size;
>> +    num_dirty_obj_traced += conclctor->num_dirty_slots_traced;
>> +    conclctor->live_obj_num = 0;
>> +    conclctor->live_obj_size = 0;
>> +    conclctor->num_dirty_slots_traced = 0;
>>   }
>>
>> -  unlock(gc->lock_enum);
>> -  INFO2("gc.con.time","[GC][Con]pause(enumeration root):    "<<((unsigned int)((time_now()-pause_start)>>10))<<"  ms ");
>> -  vm_resume_threads_after();
>> -  assert(hythread_is_suspend_enabled());
>> -  hythread_set_suspend_disable(disable_count);
>> -
>> -  unlock(gc->lock_con_mark);
>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +  con_collection_stat->live_size_marked += size_live_obj;
>> +  INFO2("gc.con.scheduler", "[Final Mark Finish] live_marked_size:      "<<con_collection_stat->live_size_marked<<" bytes");
>> +
>>  }
>>
>> -void mostly_con_mark_terminate_reset();
>> -void terminate_mostly_con_mark();
>> -
>> -void gc_finish_con_mark(GC* gc, Boolean need_STW)
>> +unsigned int gc_get_conclcor_num(GC* gc, unsigned int req_role);
>> +//called by the marker when it finishes
>> +void gc_con_update_stat_after_marking(GC *gc)
>>  {
>> -  gc_check_con_mark(gc);
>> -
>> -  if(gc_is_kind(ALGO_CON_MOSTLY))
>> -    terminate_mostly_con_mark();
>> -
>> -  gc_wait_con_mark_finish(gc);
>> +  POINTER_SIZE_INT num_live_obj = 0;
>> +  POINTER_SIZE_INT size_live_obj = 0;
>> +  POINTER_SIZE_INT num_dirty_obj_traced = 0;
>>
>> -  int disable_count;
>> -  if(need_STW){
>> -    /*suspend the mutators.*/
>> -    lock(gc->lock_enum);
>> -    if(gc_is_kind(ALGO_CON_MOSTLY)){
>> -      /*In mostly concurrent algorithm, there's a final marking pause.
>> -            Prepare root set for final marking.*/
>> -      disable_count = hythread_reset_suspend_disable();
>> -      gc_set_rootset_type(ROOTSET_IS_OBJ);
>> -      gc_prepare_rootset(gc);
>> -    }else{
>> -      disable_count = vm_suspend_all_threads();
>> -    }
>> +  unsigned int num_conclctors = gc->num_conclctors;
>> +  for( unsigned int i=0; i<num_conclctors; i++ ) {
>> +    Conclctor* conclctor = gc->conclctors[i];
>> +    if( conclctor->role != CONCLCTOR_ROLE_MARKER )
>> +      continue;
>> +    num_live_obj += conclctor->live_obj_num;
>> +    size_live_obj += conclctor->live_obj_size;
>> +    num_dirty_obj_traced += conclctor->num_dirty_slots_traced;
>> +    conclctor->live_obj_num = 0;
>> +    conclctor->live_obj_size = 0;
>> +    conclctor->num_dirty_slots_traced = 0;
>>   }
>>
>> -  if(gc_is_kind(ALGO_CON_MOSTLY)){
>> -    /*In mostly concurrent algorithm, there's a final marking pause.
>> -          Suspend the mutators once again and finish the marking phase.*/
>> -
>> -    /*prepare dirty object*/
>> -    gc_prepare_dirty_set(gc);
>> -
>> -    gc_set_weakref_sets(gc);
>> -
>> -    /*start STW mark*/
>> -    gc_ms_start_mostly_con_final_mark((GC_MS*)gc, MIN_NUM_MARKERS);
>> -
>> -    mostly_con_mark_terminate_reset();
>> -    gc_clear_dirty_set(gc);
>> -  }
>> -
>> -  gc_reset_dirty_set(gc);
>> -
>> -  if(need_STW){
>> -    unlock(gc->lock_enum);
>> -    if(gc_is_kind(ALGO_CON_MOSTLY)){
>> -      vm_resume_threads_after();
>> -      assert(hythread_is_suspend_enabled());
>> -      hythread_set_suspend_disable(disable_count);
>> -    }else{
>> -      vm_resume_all_threads(disable_count);
>> -    }
>> -  }
>> +  unsigned int write_barrier_marked_size = gc_get_mutator_write_barrier_marked_size(gc);
>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +  con_collection_stat->live_size_marked = size_live_obj + write_barrier_marked_size;
>> +  //INFO2("gc.con.scheduler", "[Mark Finish] live_marked_size:      "<<con_collection_stat->live_size_marked<<" bytes");
>>
>> +   /*statistics information update (marking_end_time, trace_rate) */
>> +  con_collection_stat->marking_end_time = time_now();
>> +  int64 marking_time = (unsigned int)(con_collection_stat->marking_end_time - con_collection_stat->marking_start_time);
>> +
>> +  unsigned int heap_size =
>> +       con_collection_stat->surviving_size_at_gc_end +
>> +       gc_get_mutator_new_obj_size(gc);
>> +
>> +  con_collection_stat->trace_rate = heap_size/trans_time_unit(marking_time);
>> +
>> +
>> +
>> +  /*
>> +  //statistics just for debugging
>> +  unsigned int marker_num = gc_get_conclcor_num(gc, CONCLCTOR_ROLE_MARKER);
>> +  float heap_used_rate = (float)heap_size/gc->committed_heap_size;
>> +  unsigned int new_obj_size_marking = gc_get_mutator_new_obj_size(gc) - con_collection_stat->alloc_size_before_alloc_live;
>> +  unsigned int alloc_rate_marking = new_obj_size_marking/trans_time_unit(con_collection_stat->marking_end_time - con_collection_stat->marking_start_time);
>> +  INFO2("gc.con.scheduler", "[Mark Finish] tracing time=" <<marking_time<<" us, trace rate=" << con_collection_stat->trace_rate<<"b/ms, current heap used="<<heap_used_rate );
>> +  INFO2("gc.con.scheduler", "[Mark Finish] marker num="<<marker_num << ", alloc factor=" << (float)alloc_rate_marking/con_collection_stat->alloc_rate);
>> +  */
>>  }
>>
>> -void gc_reset_con_mark(GC* gc)
>> +void gc_PSTW_update_stat_after_marking(GC *gc)
>>  {
>> -  gc->num_active_markers = 0;
>> -  gc_mark_unset_concurrent();
>> +  unsigned int size_live_obj = gc_ms_get_live_object_size((GC_MS*)gc);
>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +  con_collection_stat->live_size_marked = size_live_obj;
>> +  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
>> +
>> +  INFO2("gc.con.scheduler", "[Mark Finish] live_marked:      "<<con_collection_stat->live_size_marked<<" bytes");
>> +  INFO2("gc.con.scheduler", "[Mark Finish] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
>> +  INFO2("gc.con.scheduler", "[Mark Finish] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
>>  }
>>
>> -int64 gc_get_con_mark_time(GC* gc)
>> +//Called only when heap is exhuaset
>> +void gc_con_update_stat_heap_exhausted(GC* gc)
>>  {
>> -  int64 time_mark = 0;
>> -  Marker** markers = gc->markers;
>> -  unsigned int i;
>> -  for(i = 0; i < gc->num_active_markers; i++){
>> -    Marker* marker = markers[i];
>> -    if(marker->time_mark > time_mark){
>> -      time_mark = marker->time_mark;
>> -    }
>> -    marker->time_mark = 0;
>> -  }
>> -  return time_mark;
>> +  unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +  con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
>> +  //INFO2("gc.con.scheduler", "[Heap exhausted] surviving size="<<con_collection_stat->surviving_size_at_gc_end<<" bytes, new_obj_size="<<new_obj_size<<" bytes");
>> +  //INFO2("gc.con.scheduler", "[Heap exhausted] current utilization rate="<<con_collection_stat->heap_utilization_rate);
>>  }
>>
>> -void gc_start_con_sweep(GC* gc)
>> +
>> +//just debugging
>> +unsigned int gc_con_get_live_size_from_sweeper(GC *gc)
>>  {
>> -  if(!try_lock(gc->lock_con_sweep) || gc_sweep_is_concurrent()) return;
>> +  POINTER_SIZE_INT num_live_obj = 0;
>> +  POINTER_SIZE_INT size_live_obj = 0;
>>
>> -  /*FIXME: enable finref*/
>> -  if(!IGNORE_FINREF ){
>> -    gc_set_obj_with_fin(gc);
>> -    Collector* collector = gc->collectors[0];
>> -    collector_identify_finref(collector);
>> -#ifndef BUILD_IN_REFERENT
>> -  }else{
>> -    gc_set_weakref_sets(gc);
>> -    gc_update_weakref_ignore_finref(gc);
>> -#endif
>> +  unsigned int num_collectors = gc->num_active_collectors;
>> +  Collector** collectors = gc->collectors;
>> +  unsigned int i;
>> +  for(i = 0; i < num_collectors; i++){
>> +    Collector* collector = collectors[i];
>> +    num_live_obj += collector->live_obj_num;
>> +    size_live_obj += collector->live_obj_size;
>> +    collector->live_obj_num = 0;
>> +    collector->live_obj_size = 0;
>>   }
>> +
>> +  return size_live_obj;
>> +}
>>
>> -  gc_set_concurrent_status(gc, GC_CON_SWEEP_PHASE);
>> +//Called when Con GC ends, must called in a STW period
>> +void gc_reset_con_space_stat(GC *gc)
>> +{
>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +  unsigned int new_obj_size = gc_reset_mutator_new_obj_size((GC *)gc);
>>
>> -  gc_set_weakref_sets(gc);
>> +  if( gc_is_kind(ALGO_CON_MOSTLY) ) {
>> +    con_collection_stat->live_alloc_size = 0; //mostly concurrent do not make new alloc obj live
>> +  } else if ( gc_is_kind( ALGO_CON_OTF_OBJ ) || gc_is_kind( ALGO_CON_OTF_REF ) ) {
>> +    con_collection_stat->live_alloc_size = new_obj_size - con_collection_stat->alloc_size_before_alloc_live;
>> +  }
>> +
>> +  /*live obj size at the end of gc = the size of objs belong to {marked_live + alloc_at_marking+alloc_at_sweeping},
>> +  (for mostly concurrent, con_collection_stat->surviving_size_at_gc_end = con_collection_stat->live_size_marked .)*/
>> +  con_collection_stat->surviving_size_at_gc_end = con_collection_stat->live_size_marked + con_collection_stat->live_alloc_size;
>> +  //INFO2( "gc.con.scheduler", "[Mark Live] live_size_marked = " << con_collection_stat->live_size_marked << ", live_alloc_size=" << con_collection_stat->live_alloc_size );
>>
>> -  /*Note: We assumed that adding entry to weakroot_pool is happened in STW rootset enumeration.
>> -      So, when this assumption changed, we should modified the below function.*/
>> -  gc_identify_dead_weak_roots(gc);
>>
>> -  /*start concurrent mark*/
>> -  gc_ms_start_con_sweep((GC_MS*)gc, MIN_NUM_MARKERS);
>> +  /*
>> +  //just debugging
>> +  if( !gc_is_specify_con_sweep() ) {
>> +    unsigned int surviving_sweeper = gc_con_get_live_size_from_sweeper(gc);
>> +    unsigned int surviving_marker = con_collection_stat->surviving_size_at_gc_end;
>> +    INFO2("gc.con.scheduler", "[Surviving size] by sweeper: " << surviving_sweeper << " bytes, by marker:" << surviving_marker << " bytes, diff=" << (surviving_sweeper - surviving_marker) );
>> +  }*/
>>
>> -  unlock(gc->lock_con_sweep);
>> +  int64 current_time = time_now();
>> +
>> +  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
>> +       unsigned int gc_interval_time = 0;
>> +       if( con_collection_stat->pause_start_time != 0 ) //remove the stw time
>> +            gc_interval_time = trans_time_unit(con_collection_stat->pause_start_time - con_collection_stat->gc_end_time);
>> +       else
>> +            gc_interval_time = trans_time_unit(current_time -con_collection_stat->gc_end_time );
>> +       con_collection_stat->alloc_rate = new_obj_size/gc_interval_time;
>> +       gc_update_scheduler_parameter(gc);
>> +  } else {
>> +     gc_force_update_scheduler_parameter(gc);
>> +  }
>> +
>> +  con_collection_stat->gc_end_time = current_time;
>> +
>> +  con_collection_stat->live_size_marked = 0;
>> +  con_collection_stat->live_alloc_size = 0;
>> +  con_collection_stat->alloc_size_before_alloc_live = 0;
>> +  con_collection_stat->marking_start_time = 0;
>> +  con_collection_stat->marking_end_time = 0;
>> +  con_collection_stat->sweeping_time = gc_get_conclctor_time((GC *)gc, CONCLCTOR_ROLE_SWEEPER); //be 0 if not CMCS
>> +  con_collection_stat->pause_start_time = 0;
>> +  assert(con_collection_stat->heap_utilization_rate<1);
>> +
>>  }
>>
>> -void gc_reset_con_sweep(GC* gc)
>> +void gc_con_stat_information_out(GC *gc)
>>  {
>> -  gc->num_active_collectors = 0;
>> -  gc_sweep_unset_concurrent();
>> +  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
>> +  INFO2("gc.con.scheduler", "[Reset] surviving_at_end:       "<<con_collection_stat->surviving_size_at_gc_end<<" bytes");
>> +  INFO2("gc.con.scheduler", "[Reset] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
>> +  INFO2("gc.con.scheduler", "[Reset] utilization_rate:      "<<con_collection_stat->heap_utilization_rate);
>> +  INFO2("gc.con.scheduler", "[Reset] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
>> +  INFO2("gc.con.scheduler", "[Reset] sweeping time:      "<<con_collection_stat->sweeping_time<<" us");
>> +  INFO2("gc.con.scheduler", "[Reset] gc time:      "<< trans_time_unit(con_collection_stat->gc_end_time - con_collection_stat->gc_start_time) );
>> +  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
>>  }
>>
>> -void gc_wait_con_sweep_finish(GC* gc)
>> +void gc_reset_after_con_collection(GC* gc)
>>  {
>> -  wait_collection_finish(gc);
>> -  gc_set_concurrent_status(gc,GC_CON_STATUS_NIL);
>> +  assert(gc_is_specify_con_gc());
>> +  int64 reset_start = time_now();
>> +  if(!IGNORE_FINREF ){
>> +    INFO2("gc.process", "GC: finref process after collection ...\n");
>> +    gc_put_finref_to_vm(gc);
>> +    gc_reset_finref_metadata(gc);
>> +    gc_activate_finref_threads((GC*)gc);
>> +#ifndef BUILD_IN_REFERENT
>> +  } else {
>> +    gc_clear_weakref_pools(gc);
>> +    gc_clear_finref_repset_pool(gc);
>> +#endif
>> +  }
>> +  reset_start = time_now();
>> +  gc_reset_con_space_stat(gc);
>> +  gc_clear_conclctor_role(gc);
>> +  vm_reclaim_native_objs();
>>  }
>>
>> -void gc_finish_con_sweep(GC * gc)
>> +
>> +
>> +void gc_set_default_con_algo()
>>  {
>> -  gc_wait_con_sweep_finish(gc);
>> +  assert((GC_PROP & ALGO_CON_MASK) == 0);
>> +  GC_PROP |= ALGO_CON_OTF_OBJ;
>>  }
>>
>> -void gc_try_finish_con_phase(GC * gc)
>> +void gc_decide_con_algo(char* concurrent_algo)
>>  {
>> -  /*Note: we do not finish concurrent mark here if we do not want to start concurrent sweep.*/
>> -  if(gc_con_is_in_marking(gc) && is_mark_finished(gc)){
>> -    /*Although all conditions above are satisfied, we can not guarantee concurrent marking is finished.
>> -          Because, sometimes, the concurrent marking has not started yet. We check the concurrent mark lock
>> -          here to guarantee this occasional case.*/
>> -    if(try_lock(gc->lock_con_mark)){
>> -      unlock(gc->lock_con_mark);
>> -      gc_finish_con_mark(gc, TRUE);
>> -    }
>> -  }
>> -
>> -  if(gc_con_is_in_sweeping(gc) && is_collector_finished(gc)){
>> -    //The reason is same as concurrent mark above.
>> -    if(try_lock(gc->lock_con_sweep)){
>> -      unlock(gc->lock_con_sweep);
>> -      gc_finish_con_sweep(gc);
>> -    }
>> +  string_to_upper(concurrent_algo);
>> +  GC_PROP &= ~ALGO_CON_MASK;
>> +  if(!strcmp(concurrent_algo, "OTF_OBJ")){
>> +    GC_PROP |= ALGO_CON_OTF_OBJ;
>> +  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
>> +    GC_PROP |= ALGO_CON_MOSTLY;
>> +  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
>> +    GC_PROP |= ALGO_CON_OTF_REF;
>>   }
>>  }
>>
>> -void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection);
>>
>> -void gc_reset_after_con_collect(GC* gc)
>> +/*
>> +    gc start enumeration phase, now, it is in a stop-the-world manner
>> +*/
>> +void gc_start_con_enumeration(GC * gc)
>>  {
>> -  assert(gc_is_specify_con_gc());
>> -
>> -  int64 time_mutator = gc_get_mutator_time(gc);
>> -  int64 time_collection = gc_get_collector_time(gc) + gc_get_marker_time(gc);
>> +  gc_set_rootset_type(ROOTSET_IS_OBJ);
>> +  gc_prepare_rootset(gc);
>> +}
>>
>> -  gc_reset_interior_pointer_table();
>> +//unsigned int gc_decide_marker_number(GC* gc);
>> +unsigned int gc_get_marker_number(GC* gc);
>> +/*  gc start marking phase */
>> +void gc_start_con_marking(GC *gc)
>> +{
>> +  unsigned int num_marker;
>> +  num_marker = gc_get_marker_number(gc);
>>
>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>> -
>> -  if(gc_mark_is_concurrent()){
>> -    gc_reset_con_mark(gc);
>> +  if(gc_is_kind(ALGO_CON_OTF_OBJ)) {
>> +    gc_enable_alloc_obj_live(gc);
>> +    gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT);
>> +    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>> +  } else if(gc_is_kind(ALGO_CON_MOSTLY)) {
>> +    gc_set_barrier_function(WB_REM_SOURCE_OBJ);
>> +    gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker);
>> +  } else if(gc_is_kind(ALGO_CON_OTF_REF)) {
>> +    gc_enable_alloc_obj_live(gc);
>> +    gc_set_barrier_function(WB_REM_OLD_VAR);
>> +    gc_ms_start_con_mark((GC_MS*)gc, num_marker);
>>   }
>> +}
>> +
>>
>> -  if(gc_sweep_is_concurrent()){
>> -    gc_reset_con_sweep(gc);
>> +/*
>> +    gc start sweeping phase
>> +*/
>> +void gc_prepare_sweeping(GC *gc) {
>> +  INFO2("gc.con.info", "Concurrent collection, current collection = " << gc->num_collections );
>> +  /*FIXME: enable finref*/
>> +  if(!IGNORE_FINREF ){
>> +    gc_set_obj_with_fin(gc);
>> +    Collector* collector = gc->collectors[0];
>> +    collector_identify_finref(collector);
>> +  #ifndef BUILD_IN_REFERENT
>> +  } else {
>> +    conclctor_set_weakref_sets(gc);
>> +    gc_update_weakref_ignore_finref(gc);
>> +  #endif
>>   }
>> +  gc_identify_dead_weak_roots(gc);
>>  }
>>
>> -void gc_finish_con_GC(GC* gc, int64 time_mutator)
>> -{
>> +int64 get_last_check_point();
>> +// for the case pure stop the world
>> +static void gc_partial_con_PSTW( GC *gc) {
>>   int64 time_collection_start = time_now();
>> -
>> +  INFO2("gc.space.stat","Stop-the-world collection = "<<gc->num_collections<<"");
>> +  INFO2("gc.con.info", "from last check point =" << (unsigned int)(time_collection_start -get_last_check_point()) );
>> +  // stop the world enumeration
>>   gc->num_collections++;
>> -
>> -  lock(gc->lock_enum);
>> -
>>   int disable_count = hythread_reset_suspend_disable();
>>   gc_set_rootset_type(ROOTSET_IS_REF);
>>   gc_prepare_rootset(gc);
>> -  unlock(gc->lock_enum);
>> -
>> -  if(gc_sweep_is_concurrent()){
>> -    if(gc_con_is_in_sweeping())
>> -      gc_finish_con_sweep(gc);
>> -  }else{
>> -    if(gc_con_is_in_marking()){
>> -      gc_finish_con_mark(gc, FALSE);
>> -    }
>> -    gc->in_collection = TRUE;
>> -    gc_reset_mutator_context(gc);
>> -    if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>> -    gc_ms_reclaim_heap((GC_MS*)gc);
>> -  }
>> -
>> -  int64 time_collection = 0;
>> -  if(gc_mark_is_concurrent()){
>> -    time_collection = gc_get_con_mark_time(gc);
>> -    gc_reset_con_mark(gc);
>> -  }else{
>> -    time_collection = time_now()-time_collection_start;
>> -  }
>> +
>> +  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
>> +      unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
>> +      Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +      con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
>> +  }
>> +
>> +  //reclaim heap
>> +  gc_reset_mutator_context(gc);
>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>> +
>> +  //update live size
>> +  gc_PSTW_update_stat_after_marking(gc);
>> +
>> +  // reset the collection and resume mutators
>> +  gc_reset_after_con_collection(gc);
>>
>> -  if(gc_sweep_is_concurrent()){
>> -    gc_reset_con_sweep(gc);
>> -  }
>> -
>> -  gc_reset_after_collection(gc, time_mutator, time_collection);
>> -
>> -  gc_start_mutator_time_measure(gc);
>> -
>> +  set_con_nil(gc); // concurrent scheduling will continue after mutators are resumed
>>   vm_resume_threads_after();
>>   assert(hythread_is_suspend_enabled());
>> -  hythread_set_suspend_disable(disable_count);
>> -  int64 pause_time = time_now()-time_collection_start;
>> -
>> -  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause){
>> -    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
>> -  }else{
>> -    INFO2("gc.con.time","[GC][Con]pause( Heap exhuasted ):    "<<((unsigned int)(pause_time>>10))<<"  ms ");
>> -  }
>> -  return;
>> +  hythread_set_suspend_disable(disable_count);
>>  }
>>
>> -void gc_set_default_con_algo()
>> -{
>> -  assert((GC_PROP & ALGO_CON_MASK) == 0);
>> -  GC_PROP |= ALGO_CON_OTF_OBJ;
>> +void terminate_mostly_con_mark();
>> +void wspace_mostly_con_final_mark( GC *gc );
>> +
>> +// for the case concurrent marking is not finished before heap is exhausted
>> +static void gc_partial_con_PMSS(GC *gc) {
>> +  INFO2("gc.con.info", "[PMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
>> +  // wait concurrent marking finishes
>> +  int64 wait_start = time_now();
>> +  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
>> +  //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
>> +  while( gc->gc_concurrent_status == GC_CON_START_MARKERS ||
>> +             gc->gc_concurrent_status == GC_CON_TRACING ||
>> +             gc->gc_concurrent_status == GC_CON_TRACE_DONE)
>> +  {
>> +      vm_thread_yield(); //let the unfinished marker run
>> +  }
>> +
>> +  /*just debugging*/
>> +    gc_ms_get_current_heap_usage((GC_MS *)gc);
>> +    int64 pause_time = time_now() - wait_start;
>> +    INFO2("gc.con.info", "[PMSS]wait marking time="<<pause_time<<" us" );
>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +    unsigned int marking_time_shortage = (unsigned int)(con_collection_stat->marking_end_time - wait_start);
>> +    INFO2("gc.con.info", "[PMSS] marking late time [" << marking_time_shortage << "] us" );
>> +
>> +  // start STW reclaiming heap
>> +  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>> +  gc_reset_mutator_context(gc);
>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>> +
>> +  // reset after partial stop the world collection
>> +  gc_reset_after_con_collection(gc);
>> +  set_con_nil(gc);
>> +}
>> +
>> +// only when current sweep is set to false
>> +static void gc_partial_con_CMSS(GC *gc) {
>> +
>> +  INFO2("gc.con.info", "[CMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
>> +  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
>> +
>> +  /*just debugging*/
>> +    gc_ms_get_current_heap_usage((GC_MS *)gc);
>> +    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +    unsigned int from_marking_end = (unsigned int)(time_now() - con_collection_stat->marking_end_time);
>> +    INFO2("gc.con.info", "[CMSS] marking early time [" << from_marking_end << "] us" );
>> +
>> +  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>> +
>> +  // start reclaiming heap, it will skip the marking phase
>> +  gc_reset_mutator_context(gc);
>> +  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>> +  gc_ms_reclaim_heap((GC_MS*)gc);
>> +
>> +  // reset after partial stop the world collection
>> +  gc_reset_after_con_collection(gc);
>> +  set_con_nil(gc);
>> +}
>> +
>> +void gc_merge_free_list_global(GC *gc);
>> +//for the case concurrent marking and partial concurrent sweeping
>> +static void gc_partial_con_CMPS( GC *gc ) {
>> +
>> +  while(gc->gc_concurrent_status == GC_CON_SWEEPING || gc->gc_concurrent_status == GC_CON_SWEEP_DONE) {
>> +      vm_thread_yield();  //let the unfinished sweeper run
>> +  }
>> +  gc_merge_free_list_global(gc);
>> +  // reset after partial stop the world collection
>> +  gc_reset_after_con_collection(gc);
>> +  set_con_nil(gc);
>> +}
>> +
>> +
>> +inline static void partial_stop_the_world_info( unsigned int type, unsigned int pause_time ) {
>> +  switch( type ) {
>> +    case GC_PARTIAL_PSTW :
>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PSTW=" << pause_time << " us");
>> +      break;
>> +    case GC_PARTIAL_PMSS :
>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PMSS=" << pause_time << " us");
>> +      break;
>> +    case GC_PARTIAL_CMPS :
>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMPS=" << pause_time << " us");
>> +      break;
>> +    case GC_PARTIAL_CMSS :
>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMSS=" << pause_time << " us");
>> +      break;
>> +    case GC_PARTIAL_FCSR :
>> +      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), FCSR=" << pause_time << " us");
>> +      break;
>> +  }
>> +}
>> +
>> +static unsigned int gc_con_heap_full_mostly_con( GC *gc )
>> +{
>> +   while( gc->gc_concurrent_status == GC_CON_START_MARKERS ) { // we should enumerate rootset after old rootset is traced
>> +      vm_thread_yield();
>> +   }
>> +
>> +   int64 final_start = time_now();
>> +   int disable_count = hythread_reset_suspend_disable();
>> +   gc_set_rootset_type(ROOTSET_IS_OBJ);
>> +   gc_prepare_rootset(gc);
>> +
>> +   gc_set_barrier_function(WB_REM_NIL); //in stw phase, so we can remove write barrier at any time
>> +   terminate_mostly_con_mark(); // terminate current mostly concurrent marking
>> +
>> +   //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
>> +   while(gc->gc_concurrent_status == GC_CON_TRACING) {
>> +      vm_thread_yield(); //let the unfinished marker run
>> +   }
>> +
>> +   //final marking phase
>> +   gc_clear_conclctor_role(gc);
>> +   wspace_mostly_con_final_mark(gc);
>> +
>> +   /*just debugging*/
>> +   int64 final_time = time_now() - final_start;
>> +   INFO2("gc.scheduler", "[MOSTLY_CON] final marking time=" << final_time << " us");
>> +   gc_ms_get_current_heap_usage((GC_MS *)gc);
>> +
>> +  // start STW reclaiming heap
>> +   gc_con_update_stat_heap_exhausted(gc); // calculate util rate
>> +   gc_reset_mutator_context(gc);
>> +   if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
>> +   gc_ms_reclaim_heap((GC_MS*)gc);
>> +
>> +   // reset after partial stop the world collection
>> +   gc_reset_after_con_collection(gc);
>> +   set_con_nil(gc);
>> +
>> +   vm_resume_threads_after();
>> +   hythread_set_suspend_disable(disable_count);
>> +   return GC_PARTIAL_PMSS;
>> +
>> +}
>> +
>> +static unsigned int gc_con_heap_full_otf( GC *gc )
>> +{
>> +   unsigned int partial_type; //for time measuring and debugging
>> +   int disable_count = vm_suspend_all_threads();
>> +   Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +   con_collection_stat->pause_start_time = time_now();
>> +   switch(gc->gc_concurrent_status) {
>> +       case GC_CON_START_MARKERS :
>> +       case GC_CON_TRACING :
>> +       case GC_CON_TRACE_DONE :
>> +         partial_type = GC_PARTIAL_PMSS;
>> +         gc_partial_con_PMSS(gc);
>> +         break;
>> +       case GC_CON_BEFORE_SWEEP : // only when current sweep is set to false
>> +         partial_type = GC_PARTIAL_CMSS;
>> +         gc_partial_con_CMSS(gc);
>> +         break;
>> +       case GC_CON_SWEEPING :
>> +       case GC_CON_SWEEP_DONE :
>> +         partial_type = GC_PARTIAL_CMPS;
>> +         gc_partial_con_CMPS(gc);
>> +         break;
>> +       case GC_CON_BEFORE_FINISH : //heap can be exhausted when sweeping finishes, very rare
>> +         partial_type = GC_PARTIAL_FCSR;
>> +         gc_merge_free_list_global(gc);
>> +         gc_reset_after_con_collection(gc);
>> +         set_con_nil(gc);
>> +         break;
>> +       case GC_CON_RESET :
>> +       case GC_CON_NIL :
>> +       case GC_CON_STW_ENUM :
>> +         /*do nothing, if still in gc_con_reset, will wait to finish after resuming. this case happens rarely*/
>> +         partial_type = GC_PARTIAL_FCSR;
>> +         break;
>> +       /* other state is illegal here */
>> +       default:
>> +         INFO2("gc.con.info", "illegal state when the heap is out [" << gc->gc_concurrent_status << "]");
>> +         RAISE_ERROR;
>> +    }
>> +    vm_resume_all_threads(disable_count);
>> +    return partial_type;
>>  }
>>
>> -void gc_decide_con_algo(char* concurrent_algo)
>> -{
>> -  string_to_upper(concurrent_algo);
>> -  GC_PROP &= ~ALGO_CON_MASK;
>> -  if(!strcmp(concurrent_algo, "OTF_OBJ")){
>> -    GC_PROP |= ALGO_CON_OTF_OBJ;
>> -  }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){
>> -    GC_PROP |= ALGO_CON_MOSTLY;
>> -  }else if(!strcmp(concurrent_algo, "OTF_SLOT")){
>> -    GC_PROP |= ALGO_CON_OTF_REF;
>> +void gc_con_stat_information_out(GC *gc);
>> +/*
>> +this method is called before STW gc start, there is a big lock outside
>> +*/
>> +void gc_wait_con_finish( GC* gc ) {
>> +  int64 time_collection_start = time_now();
>> +  unsigned int partial_type; //for time measuring and debugging
>> +
>> +   /* cocurrent gc is idle */
>> +   if( state_transformation( gc, GC_CON_NIL, GC_CON_DISABLE ) ) { // for the race condition of con schduling and STW gc
>> +        Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
>> +        con_collection_stat->gc_start_time = time_now();
>> +        con_collection_stat->pause_start_time = con_collection_stat->gc_start_time;
>> +        partial_type = GC_PARTIAL_PSTW;
>> +        gc_partial_con_PSTW( gc );
>> +   } else {
>> +      while(gc->gc_concurrent_status == GC_CON_STW_ENUM ) { //wait concurrent gc finish enumeration
>> +          hythread_safe_point();
>> +          vm_thread_yield();
>> +       }
>> +       if( gc_is_kind(ALGO_CON_MOSTLY) )
>> +         partial_type = gc_con_heap_full_mostly_con(gc);
>> +       else if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) {
>> +         partial_type = gc_con_heap_full_otf(gc);
>> +         if(gc->gc_concurrent_status == GC_CON_RESET) {
>> +            while( gc->gc_concurrent_status == GC_CON_RESET ) { //wait concurrent to finish
>> +              hythread_safe_point();
>> +              vm_thread_yield();
>> +            }
>> +         }
>> +       }
>> +       else
>> +         RAISE_ERROR;
>> +   }
>> +
>> +  int64 pause_time = time_now()-time_collection_start;
>> +  gc_con_stat_information_out(gc);
>> +  if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) {
>> +    INFO2("gc.con.time","[GC][Con]pause(   Forcing GC   ):    "<<(unsigned int)(pause_time)<<"  us ");
>> +  } else {
>> +    partial_stop_the_world_info( partial_type, (unsigned int)pause_time );
>>   }
>>  }
>> +
>> +
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_concurrent.h Tue Oct 28 20:01:01 2008
>> @@ -19,21 +19,69 @@
>>  #define _GC_CONCURRENT_H_
>>  #include "gc_common.h"
>>
>> -enum GC_CONCURRENT_STATUS{
>> -  GC_CON_STATUS_NIL = 0x00,
>> -  GC_CON_MARK_PHASE = 0x01,
>> -  GC_MOSTLY_CON_FINAL_MARK_PHASE = 0x11, // for mostly concurrent only.
>> -  GC_CON_SWEEP_PHASE = 0x02
>> +
>> +#define RATE_CALCULATE_DENOMINATOR_FACTOR 10; //trans us to ms
>> +inline unsigned int trans_time_unit(int64 x)
>> +{
>> +  int64 result = x>>10;
>> +  if(result) return (unsigned int)result;
>> +  return 1;
>> +}
>> +
>> +#define RAISE_ERROR  assert(0);
>> +/* concurrent collection states in new design */
>> +enum GC_CONCURRENT_STATUS {
>> +  GC_CON_NIL = 0x00,
>> +  GC_CON_STW_ENUM = 0x01,
>> +  GC_CON_START_MARKERS = 0x02,
>> +  GC_CON_TRACING = 0x03,
>> +  GC_CON_TRACE_DONE = 0x04,
>> +  GC_CON_BEFORE_SWEEP = 0x05,
>> +  GC_CON_SWEEPING = 0x06,
>> +  GC_CON_SWEEP_DONE = 0x07,
>> +  GC_CON_BEFORE_FINISH = 0x08,
>> +  GC_CON_RESET = 0x09,
>> +  GC_CON_DISABLE = 0x0A,
>> +};
>> +
>> +// this type is just for debugging and time measuring
>> +enum GC_PARTIAL_STW_TYPE {
>> +  GC_PARTIAL_PSTW = 0x00,  //pure stop the world
>> +  GC_PARTIAL_PMSS = 0x01,  //concurrent marking has finished and stop the world sweeping
>> +  GC_PARTIAL_CMSS = 0x02,  // partial concurrent marking and stop the world sweeping
>> +  GC_PARTIAL_CMPS = 0x03,  //concurrent marking and sweeping
>> +  GC_PARTIAL_FCSR = 0x04, //fully concurrent marking and sweeping, but stw finish reset
>>  };
>>
>>  enum HANDSHAKE_SINGAL{
>>   HSIG_MUTATOR_SAFE = 0x0,
>> -
>>   HSIG_DISABLE_SWEEP_LOCAL_CHUNKS  = 0x01,
>>   HSIG_DISABLE_SWEEP_GLOBAL_CHUNKS = 0x02,
>>   HSIG_MUTATOR_ENTER_ALLOC_MARK    = 0x03,
>>  };
>>
>> +typedef struct Con_Collection_Statistics {
>> +    POINTER_SIZE_INT live_size_marked;     //marked objects size
>> +    POINTER_SIZE_INT alloc_size_before_alloc_live;  //alloc objects size before marking
>> +    POINTER_SIZE_INT live_alloc_size;
>> +    POINTER_SIZE_INT surviving_size_at_gc_end; //total live object size when gc is ended
>> +
>> +    POINTER_SIZE_INT trace_rate;  //bytes per ms
>> +    POINTER_SIZE_INT alloc_rate;       //bytes per ms
>> +
>> +    float heap_utilization_rate;
>> +
>> +    int64 gc_start_time;
>> +    int64 gc_end_time;
>> +
>> +    int64 marking_start_time;
>> +    int64 marking_end_time;
>> +
>> +    int64 sweeping_time;
>> +    int64 pause_start_time;
>> +
>> +} Con_Space_Statistics;
>> +
>>  inline void gc_set_con_gc(unsigned int con_phase)
>>  { GC_PROP |= con_phase;  }
>>
>> @@ -58,107 +106,101 @@
>>  inline Boolean gc_is_specify_con_sweep()
>>  { return (GC_PROP & ALGO_CON_SWEEP) == ALGO_CON_SWEEP; }
>>
>> -extern volatile Boolean concurrent_in_marking;
>> -extern volatile Boolean concurrent_in_sweeping;
>> -extern volatile Boolean mark_is_concurrent;
>> -extern volatile Boolean sweep_is_concurrent;
>>
>> -inline Boolean gc_mark_is_concurrent()
>> -{
>> -  return mark_is_concurrent;
>> -}
>> +extern volatile Boolean obj_alloced_live;
>>
>> -inline void gc_mark_set_concurrent()
>> -{
>> -  if(gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF))
>> -    gc_enable_alloc_obj_live();
>> -  mark_is_concurrent = TRUE;
>> -}
>> +inline Boolean is_obj_alloced_live()
>> +{ return obj_alloced_live;  }
>>
>> -inline void gc_mark_unset_concurrent()
>> -{
>> -  gc_disable_alloc_obj_live();
>> -  mark_is_concurrent = FALSE;
>> +inline void gc_disable_alloc_obj_live(GC *gc)
>> +{
>> +  obj_alloced_live = FALSE;
>>  }
>>
>> -inline Boolean gc_con_is_in_marking()
>> +void gc_enable_alloc_obj_live(GC * gc);
>> +
>> +/*
>> +    tranform the states across the collection process,
>> +  which should be a atomic operation because there are several collector run parallel
>> +*/
>> +inline Boolean state_transformation( GC* gc, unsigned int from_state, unsigned int to_state )
>>  {
>> -  return concurrent_in_marking;
>> +  unsigned int old_state = apr_atomic_cas32( &gc->gc_concurrent_status, to_state, from_state );
>> +  if( old_state != from_state )
>> +    return FALSE;
>> +  else
>> +    return TRUE;
>>  }
>>
>> -inline Boolean gc_con_is_in_marking(GC* gc)
>> -{
>> -  return gc->gc_concurrent_status == GC_CON_MARK_PHASE;
>> +/* set concurrent to idle,
>> +    Or enable concurrent gc, called when STW gc finishes
>> + */
>> +inline void set_con_nil( GC *gc ) {
>> +  apr_atomic_set32( &gc->gc_concurrent_status, GC_CON_NIL );
>>  }
>>
>> -inline Boolean gc_sweep_is_concurrent()
>> -{
>> -  return sweep_is_concurrent;
>> +
>> +/* gc start enumeration phase, now, it is in a stop-the-world manner */
>> +void gc_start_con_enumeration(GC * gc);
>> +
>> +/* gc start marking phase */
>> +void gc_start_con_marking(GC *gc);
>> +
>> +
>> +/* prepare for sweeping */
>> +void gc_prepare_sweeping(GC *gc);
>> +
>> +/* gc start sweeping phase */
>> +void gc_start_con_sweeping(GC *gc);
>> +
>> +/* gc finish concurrent collection */
>> +void gc_con_final_work(GC* gc);
>> +
>> +
>> +/* gc wait cocurrent collection finishes */
>> +void gc_wait_con_finish( GC* gc );
>> +
>> +/* is in gc marking phase */
>> +inline Boolean in_con_marking_phase( GC *gc ) {
>> +  unsigned int status = gc->gc_concurrent_status;
>> +  return (status == GC_CON_TRACING) || (status == GC_CON_TRACE_DONE);
>>  }
>>
>> -inline void gc_sweep_set_concurrent()
>> -{
>> -  sweep_is_concurrent = TRUE;
>> +/* is in gc sweeping phase */
>> +inline Boolean in_con_sweeping_phase( GC *gc ) {
>> +  unsigned int status = gc->gc_concurrent_status;
>> +  return (status == GC_CON_SWEEPING) || (status == GC_CON_SWEEP_DONE);
>>  }
>>
>> -inline void gc_sweep_unset_concurrent()
>> -{
>> -  sweep_is_concurrent = FALSE;
>> +inline Boolean in_con_idle( GC *gc ) {
>> +  return gc->gc_concurrent_status == GC_CON_NIL;
>>  }
>>
>> -inline Boolean gc_con_is_in_sweeping()
>> -{
>> -  return concurrent_in_sweeping;
>> +inline Boolean gc_con_is_in_STW( GC *gc ) {
>> +  return gc->gc_concurrent_status == GC_CON_DISABLE;
>>  }
>>
>> -inline Boolean gc_con_is_in_sweeping(GC* gc)
>> -{
>> -  return gc->gc_concurrent_status == GC_CON_SWEEP_PHASE;
>> +/* is gc ready to sweeping */
>> +inline Boolean in_con_ready_sweep( GC *gc ) {
>> +  return gc->gc_concurrent_status == GC_CON_BEFORE_SWEEP;
>>  }
>>
>> -inline void gc_set_concurrent_status(GC*gc, unsigned int status)
>> -{
>> -  /*Reset status*/
>> -  concurrent_in_marking = FALSE;
>> -  concurrent_in_sweeping = FALSE;
>> -
>> -  gc->gc_concurrent_status = status;
>> -  switch(status){
>> -    case GC_CON_MARK_PHASE:
>> -      gc_mark_set_concurrent();
>> -      concurrent_in_marking = TRUE;
>> -      break;
>> -    case GC_CON_SWEEP_PHASE:
>> -      gc_sweep_set_concurrent();
>> -      concurrent_in_sweeping = TRUE;
>> -      break;
>> -    default:
>> -      assert(!concurrent_in_marking && !concurrent_in_sweeping);
>> -  }
>> +/* is gc sweeping */
>> +inline Boolean in_con_sweep( GC *gc ) {
>> +  return ( gc->gc_concurrent_status == GC_CON_SWEEPING || gc->gc_concurrent_status == GC_CON_SWEEP_DONE );
>>
>> -  return;
>>  }
>>
>> -void gc_reset_con_mark(GC* gc);
>> -void gc_start_con_mark(GC* gc);
>> -void gc_finish_con_mark(GC* gc, Boolean need_STW);
>> -int64 gc_get_con_mark_time(GC* gc);
>> -
>> -void gc_start_con_sweep(GC* gc);
>> -void gc_finish_con_sweep(GC * gc);
>> +void gc_con_update_stat_after_marking( GC *gc );
>>
>> -void gc_reset_after_con_collect(GC* gc);
>> -void gc_try_finish_con_phase(GC * gc);
>>
>>  void gc_decide_con_algo(char* concurrent_algo);
>>  void gc_set_default_con_algo();
>>
>> -void gc_reset_con_sweep(GC* gc);
>> -
>> -void gc_finish_con_GC(GC* gc, int64 time_mutator);
>>
>>  extern volatile Boolean gc_sweep_global_normal_chunk;
>>
>> +
>>  inline Boolean gc_is_sweep_global_normal_chunk()
>>  { return gc_sweep_global_normal_chunk; }
>>
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.cpp Tue Oct 28 20:01:01 2008
>> @@ -18,13 +18,17 @@
>>  /**
>>  * @author Xiao-Feng Li, 2006/10/05
>>  */
>> -
>> +
>> +#include <open/vm_class_info.h>
>> +#include <open/vm_class_manipulation.h>
>>  #include "../gen/gen.h"
>>  #include "../thread/mutator.h"
>>  #include "gc_for_barrier.h"
>>  #include "../mark_sweep/wspace_mark_sweep.h"
>>  #include "../common/gc_concurrent.h"
>> +#include "../common/gc_common.h"
>>  #include "../finalizer_weakref/finalizer_weakref.h"
>> +#include "../verify/verify_live_heap.h"
>>
>>
>>  /* All the write barrier interfaces need cleanup */
>> @@ -117,10 +121,8 @@
>>     Mutator *mutator = (Mutator *)gc_get_tls();
>>
>>     //FIXME: Release lock.
>> -    lock(mutator->dirty_set_lock);
>>     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>> -    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
>> -    unlock(mutator->dirty_set_lock);
>> +    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_holding_ref);
>>   }
>>  }
>>
>> @@ -204,7 +206,8 @@
>>           mutator_dirtyset_add_entry(mutator, obj_to_snapshot);
>>       }
>>     }
>> -    obj_mark_black_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>> +    obj_mark_gray_in_table((Partial_Reveal_Object *) p_obj_holding_ref);  // now, the black-only obj (no gray bit been set) will also be scaned by marker, here mark it to gray to prevent this, just a workaround
>> +    obj_mark_black_in_table((Partial_Reveal_Object *) p_obj_holding_ref, mutator);
>>     obj_dirty_in_table((Partial_Reveal_Object *) p_obj_holding_ref);
>>   }
>>  }
>> @@ -215,32 +218,141 @@
>>   REF* p_obj_slot = (REF*) p_slot ;
>>   Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)read_slot(p_obj_slot);
>>   if(p_obj && obj_need_remember_oldvar(p_obj)){
>> +    mutator->dirty_obj_num++;
>>     mutator_dirtyset_add_entry(mutator, p_obj);
>>   }
>>  }
>>
>> +/*
>> +static void write_barrier_for_check(Managed_Object_Handle p_obj_holding_ref,Managed_Object_Handle *p_slot, Managed_Object_Handle p_target)
>> +{
>> +  //Mutator *mutator = (Mutator *)gc_get_tls();
>> +
>> +  Partial_Reveal_Object* src_obj = (Partial_Reveal_Object*)p_obj_holding_ref;
>> +  Partial_Reveal_Object* sub_obj = (Partial_Reveal_Object*)read_slot((REF*) p_slot);
>> +  Partial_Reveal_Object* target_obj = (Partial_Reveal_Object*)p_target;
>> +
>> +  if(src_obj && (!obj_is_mark_black_in_table(src_obj))){
>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Src]");
>> +     analyze_bad_obj(src_obj);
>> +     RAISE_ERROR;
>> +  }
>> +
>> +  if(sub_obj && (!obj_is_mark_black_in_table(sub_obj))){
>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Sub]");
>> +     analyze_bad_obj(sub_obj);
>> +     INFO2("gc.verifier", "[source object]");
>> +     analyze_bad_obj(src_obj);
>> +     //RAISE_ERROR;
>> +     return;
>> +  }
>> +
>> +  if(target_obj && (!obj_is_mark_black_in_table(target_obj))){
>> +     INFO2("gc.verifier", "[write_barrier_for_check] [Target]");
>> +     analyze_bad_obj(target_obj);
>> +     RAISE_ERROR;
>> +  }
>> +
>> +  *p_slot = p_target;
>> +}
>> +*/
>>  //===========================================
>>
>>  /* The following routines were supposed to be the only way to alter any value in gc heap. */
>>  void gc_heap_write_ref (Managed_Object_Handle p_obj_holding_ref, unsigned offset, Managed_Object_Handle p_target)
>>  {  assert(0); }
>>
>> -void gc_heap_wrote_object (Managed_Object_Handle p_obj_written)
>> +
>> +Boolean gc_heap_copy_object_array(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length)
>> +{
>> +
>> +
>> +    GC_VTable_Info *src_gcvt = obj_get_gcvt((Partial_Reveal_Object*)src_array);
>> +    GC_VTable_Info *dst_gcvt = obj_get_gcvt((Partial_Reveal_Object*)dst_array);
>> +
>> +    Class_Handle src_class = src_gcvt->gc_clss;
>> +    Class_Handle dst_class = dst_gcvt->gc_clss;
>> +
>> +
>> +       //element size of src should be same as element size of dst
>> +       assert(src_gcvt->array_elem_size == dst_gcvt->array_elem_size);
>> +       unsigned int elem_size = src_gcvt->array_elem_size;
>> +       unsigned int src_first_elem_offset = array_first_element_offset((Partial_Reveal_Array*)src_array);
>> +       unsigned int dst_first_elem_offset = array_first_element_offset((Partial_Reveal_Array*)dst_array);
>> +       /*
>> +       #ifdef COMPRESS_REFERENCE
>> +          COMPRESSED_REFERENCE *src_copy_body = (COMPRESSED_REFERENCE *)((POINTER_SIZE_INT)src_array + src_first_elem_offset + elem_size*src_start);
>> +          COMPRESSED_REFERENCE *dst_copy_body = (COMPRESSED_REFERENCE *)((POINTER_SIZE_INT)dst_array + dst_first_elem_offset + elem_size*dst_start);
>> +       #else
>> +       #endif
>> +       */
>> +          REF* src_copy_body = (REF*)((POINTER_SIZE_INT)src_array + src_first_elem_offset + elem_size*src_start);
>> +          REF* dst_copy_body = (REF*)((POINTER_SIZE_INT)dst_array + dst_first_elem_offset + elem_size*dst_start);
>> +
>> +
>> +       if(class_is_instanceof(src_class, dst_class)) {
>> +         //rem obj before is for OTF GC barriers
>> +         if(WB_REM_OLD_VAR == write_barrier_function) {
>> +            for (unsigned int count = 0; count < length; count++) {
>> +               write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>> +            }
>> +         } else if(WB_REM_OBJ_SNAPSHOT == write_barrier_function) {
>> +            write_barrier_rem_obj_snapshot(dst_array);
>> +         }
>> +
>> +         memmove(dst_copy_body, src_copy_body, length * elem_size);
>> +
>> +       } else { //for the condition src is not the type of dst
>> +          Class_Handle dst_elem_clss = class_get_array_element_class(dst_class);
>> +          if(WB_REM_OBJ_SNAPSHOT == write_barrier_function) {
>> +            write_barrier_rem_obj_snapshot(dst_array);
>> +          }
>> +
>> +          for (unsigned int count = 0; count < length; count++) {
>> +             // 1, null elements copy direct
>> +             if (src_copy_body[count] == NULL) {
>> +                  if(WB_REM_OLD_VAR == write_barrier_function) {
>> +                      write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>> +                 }
>> +                  dst_copy_body[count] = NULL;
>> +                  continue;
>> +               }
>> +
>> +             // 2, For non-null elements check if types are compatible.
>> +/*
>> +#ifdef COMPRESS_REFERENCE
>> +             ManagedObject *src_elem = (ManagedObject *)uncompress_compressed_reference(src_elem_offset);
>> +             Class_Handle src_elem_clss = src_elem->vt()->clss;
>> +#else
>> +#endif
>> +*/
>> +             Class_Handle src_elem_clss = obj_get_gcvt(ref_to_obj_ptr(src_copy_body[count]))->gc_clss;
>> +
>> +             if (!class_is_instanceof(src_elem_clss, dst_elem_clss)) {
>> +                  if(WB_REM_SOURCE_OBJ == write_barrier_function) {
>> +                      write_barrier_rem_source_obj(dst_array);
>> +                  }
>> +                  return FALSE;
>> +             }
>> +
>> +             if(WB_REM_OLD_VAR == write_barrier_function) {
>> +                 write_barrier_rem_slot_oldvar((Managed_Object_Handle *)dst_copy_body+count);
>> +             }
>> +              dst_copy_body[count] = src_copy_body[count];
>> +        }
>> +      }
>> +
>> +    //rem obj after is for mostly concurrent
>> +    if(WB_REM_SOURCE_OBJ == write_barrier_function) {
>> +        write_barrier_rem_source_obj(dst_array);
>> +    }
>> +
>> +    return TRUE;
>> +}
>> +
>> +
>> +void gc_heap_wrote_object (Managed_Object_Handle p_obj_written )
>>  {
>> -  /*Concurrent Mark: Since object clone and array copy do not modify object slots,
>> -      we treat it as an new object. It has already been marked when dest object was created.
>> -      We use WB_REM_SOURCE_OBJ function here to debug.
>> -    */
>> -
>> -  if(WB_REM_SOURCE_OBJ == write_barrier_function){
>> -    Mutator *mutator = (Mutator *)gc_get_tls();
>> -    lock(mutator->dirty_set_lock);
>> -
>> -    obj_dirty_in_table((Partial_Reveal_Object *) p_obj_written);
>> -    mutator_dirtyset_add_entry(mutator, (Partial_Reveal_Object*)p_obj_written);
>> -
>> -    unlock(mutator->dirty_set_lock);
>> -  }
>>
>>   if( !gc_is_gen_mode() || !object_has_ref_field((Partial_Reveal_Object*)p_obj_written))
>>     return;
>> @@ -283,6 +395,13 @@
>>       write_barrier_rem_slot_oldvar(p_slot);
>>       *p_slot = p_target;
>>       break;
>> +    //just debugging
>> +    /*
>> +    case WB_CON_DEBUG:
>> +       write_barrier_for_check(p_obj_holding_ref, p_slot, p_target);
>> +       //*p_slot = p_target;
>> +       break;
>> +    */
>>     default:
>>       assert(0);
>>       return;
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_barrier.h Tue Oct 28 20:01:01 2008
>> @@ -32,7 +32,8 @@
>>   WB_REM_SOURCE_REF    = 0x02,
>>   WB_REM_OLD_VAR       = 0x03,
>>   WB_REM_NEW_VAR       = 0x04,
>> -  WB_REM_OBJ_SNAPSHOT  = 0x05
>> +  WB_REM_OBJ_SNAPSHOT  = 0x05,
>> +  WB_CON_DEBUG = 0x06
>>  };
>>
>>  inline void gc_set_barrier_function(unsigned int wb_function)
>> @@ -43,4 +44,3 @@
>>  #endif /* _GC_FOR_BARRIER_H_ */
>>
>>
>> -
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp Tue Oct 28 20:01:01 2008
>> @@ -203,4 +203,3 @@
>>
>>
>>
>> -
>>
>> Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
>> URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp?rev=708756&r1=708755&r2=708756&view=diff
>> ==============================================================================
>> --- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp (original)
>> +++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp Tue Oct 28 20:01:01 2008
>> @@ -30,7 +30,7 @@
>>  #include "../mark_sweep/gc_ms.h"
>>  #include "../move_compact/gc_mc.h"
>>  #include "interior_pointer.h"
>> -#include "../thread/marker.h"
>> +#include "../thread/conclctor.h"
>>  #include "../thread/collector.h"
>>  #include "../verify/verify_live_heap.h"
>>  #include "../finalizer_weakref/finalizer_weakref.h"
>> @@ -115,7 +115,10 @@
>>   collection_scheduler_initialize(gc);
>>
>>   if(gc_is_specify_con_gc()){
>> -    marker_initialize(gc);
>> +     gc->gc_concurrent_status = GC_CON_NIL;
>> +    conclctor_initialize(gc);
>> +  } else {
>> +     gc->gc_concurrent_status = GC_CON_DISABLE;
>>   }
>>
>>   collector_initialize(gc);
>> @@ -134,6 +137,9 @@
>>  {
>>   INFO2("gc.process", "GC: call GC wrapup ....");
>>   GC* gc =  p_global_gc;
>> +  // destruct threads first, and then destruct data structures
>> +  conclctor_destruct(gc);
>> +  collector_destruct(gc);
>>
>>  #if defined(USE_UNIQUE_MARK_SWEEP_GC)
>>  gc_ms_destruct((GC_MS*)gc);
>> @@ -148,8 +154,6 @@
>>  #ifndef BUILD_IN_REFERENT
>>   gc_finref_metadata_destruct(gc);
>>  #endif
>> -  collector_destruct(gc);
>> -  marker_destruct(gc);
>>
>>   if( verify_live_heap ){
>>     gc_terminate_heap_verification(gc);
>> @@ -446,4 +450,3 @@
>>
>>
>>
>> -
>>
>>
>>
>
>
>
> --
> Unless stated otherwise above:
> IBM United Kingdom Limited - Registered in England and Wales with number 741598.
> Registered office: PO Box 41, North Harbour, Portsmouth, Hampshire PO6 3AU
>



-- 
http://xiao-feng.blogspot.com