You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by wj...@apache.org on 2007/01/11 14:57:19 UTC

svn commit: r495225 [3/5] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen: javasrc/org/apache/harmony/drlvm/gc_gen/ src/common/ src/finalizer_weakref/ src/gen/ src/jni/ src/mark_compact/ src/mark_sweep/ src/thread/ src/trace_forward/ src/utils/ src/verify/

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp Thu Jan 11 05:57:16 2007
@@ -21,11 +21,25 @@
 #include "port_sysinfo.h"
 
 #include "gen.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+#include "../verify/verify_live_heap.h"
+#include "../common/space_tuner.h"
 
 /* fspace size limit is not interesting. only for manual tuning purpose */
-unsigned int min_nos_size_bytes = 2 * MB;
-unsigned int max_nos_size_bytes = 64 * MB;
+unsigned int min_nos_size_bytes = 16 * MB;
+unsigned int max_nos_size_bytes = 256 * MB;
 unsigned int NOS_SIZE = 0;
+unsigned int MIN_NOS_SIZE = 0;
+unsigned int MAX_NOS_SIZE = 0;
+
+static unsigned int MINOR_ALGO = 0;
+static unsigned int MAJOR_ALGO = 0;
+
+#ifndef STATIC_NOS_MAPPING
+void* nos_boundary;
+#endif
+
+#define RESERVE_BOTTOM ((void*)0x1000000)
 
 static void gc_gen_get_system_info(GC_Gen *gc_gen) 
 {
@@ -36,73 +50,133 @@
 void gc_gen_initialize(GC_Gen *gc_gen, unsigned int min_heap_size, unsigned int max_heap_size) 
 {
   assert(gc_gen); 
-  assert(max_heap_size <= max_heap_size_bytes);
 
-  min_heap_size = round_up_to_size(min_heap_size, GC_BLOCK_SIZE_BYTES);
-  max_heap_size = round_up_to_size(max_heap_size, GC_BLOCK_SIZE_BYTES);
+  /*Give GC a hint of gc survive ratio.*/
+  gc_gen->survive_ratio = 0.2f;
+
+  /*fixme: max_heap_size should not beyond 448 MB*/
+  max_heap_size = round_down_to_size(max_heap_size, SPACE_ALLOC_UNIT);
+  min_heap_size = round_up_to_size(min_heap_size, SPACE_ALLOC_UNIT);
+  assert(max_heap_size <= max_heap_size_bytes);
+  assert(max_heap_size > min_heap_size_bytes);
 
   gc_gen_get_system_info(gc_gen); 
+  min_nos_size_bytes *=  gc_gen->_num_processors;
+  
+  if( MIN_NOS_SIZE )  min_nos_size_bytes = MIN_NOS_SIZE;
+
+  unsigned int los_size = max_heap_size >> 7;
+  if(los_size < GC_MIN_LOS_SIZE) 
+    los_size = GC_MIN_LOS_SIZE;
+  
+  los_size = round_down_to_size(los_size, SPACE_ALLOC_UNIT);
 
-  void *reserved_base = NULL;
+  /* let's compute and reserve the space for committing */
+  
+  /* heuristic nos + mos + LOS = max, and nos*ratio = mos */
+  unsigned int nos_reserve_size,  nos_commit_size; 
+  unsigned int mos_reserve_size, mos_commit_size; 
+  unsigned int los_mos_size;
+  
+
+  if(NOS_SIZE){
+    los_mos_size = max_heap_size - NOS_SIZE;
+    mos_reserve_size = los_mos_size - los_size;  
+
+    nos_commit_size = NOS_SIZE;
+    nos_reserve_size = NOS_SIZE;
+  
+  }else{  
+    los_mos_size = max_heap_size;
+    mos_reserve_size = los_mos_size - los_size;
+    nos_commit_size = (unsigned int)(((float)(max_heap_size - los_size))/(1.0f + gc_gen->survive_ratio));
+    nos_reserve_size = mos_reserve_size;
+  }
+    
+  nos_commit_size = round_down_to_size(nos_commit_size, SPACE_ALLOC_UNIT);  
+  mos_commit_size = max_heap_size - los_size - nos_commit_size;
 
   /* allocate memory for gc_gen */
-  gc_gen->allocated_memory = NULL;
-  pool_create(&gc_gen->aux_pool, 0);
+  void* reserved_base;
+  void* reserved_end;
+  void* nos_base;
+
+#ifdef STATIC_NOS_MAPPING
+
+  assert((unsigned int)nos_boundary%SPACE_ALLOC_UNIT == 0);
+  nos_base = vm_reserve_mem(nos_boundary, nos_reserve_size);
+  if( nos_base != nos_boundary ){
+    printf("Static NOS mapping: Can't reserve memory at %x for size %x for NOS.\n", nos_boundary, nos_reserve_size);  
+    printf("Please not use static NOS mapping by undefining STATIC_NOS_MAPPING, or adjusting NOS_BOUNDARY value.\n");
+    exit(0);
+  }
+  reserved_end = (void*)((unsigned int)nos_base + nos_reserve_size);
+
+  void* los_mos_base = (void*)((unsigned int)nos_base - los_mos_size);
+  assert(!((unsigned int)los_mos_base%SPACE_ALLOC_UNIT));
+  reserved_base = vm_reserve_mem(los_mos_base, los_mos_size);
+  while( !reserved_base || reserved_base >= nos_base){
+    los_mos_base = (void*)((unsigned int)los_mos_base - SPACE_ALLOC_UNIT);
+    if(los_mos_base < RESERVE_BOTTOM){
+      printf("Static NOS mapping: Can't allocate memory at address %x for specified size %x for MOS", reserved_base, los_mos_size);  
+      exit(0);      
+    }
+    reserved_base = vm_reserve_mem(los_mos_base, los_mos_size);
+  }
   
-  apr_status_t status = port_vmem_reserve(&gc_gen->allocated_memory, 
-                  &reserved_base, max_heap_size, 
-                  PORT_VMEM_MODE_READ | PORT_VMEM_MODE_WRITE, 
-                  gc_gen->_machine_page_size_bytes, gc_gen->aux_pool);
-  
-  while(APR_SUCCESS != status){
-    max_heap_size -= gc_gen->_machine_page_size_bytes;
-    status = port_vmem_reserve(&gc_gen->allocated_memory, 
-                  &reserved_base, max_heap_size, 
-                  PORT_VMEM_MODE_READ | PORT_VMEM_MODE_WRITE, 
-                  gc_gen->_machine_page_size_bytes, gc_gen->aux_pool);  
+#else /* STATIC_NOS_MAPPING */
+
+  reserved_base = vm_reserve_mem(0, max_heap_size);
+  while( !reserved_base ){
+    printf("Non-static NOS mapping: Can't allocate memory at address %x for specified size %x", reserved_base, max_heap_size);  
+    exit(0);      
   }
-  assert(max_heap_size > min_heap_size_bytes);
-  gc_gen->reserved_heap_size = max_heap_size;
+  reserved_end = (void*)((unsigned int)reserved_base + max_heap_size);
+    
+  /* compute first time nos_boundary */
+  nos_base = (void*)((unsigned int)reserved_base + mos_commit_size + los_size);
+  /* init nos_boundary if NOS is not statically mapped */
+  nos_boundary = nos_base; 
+
+#endif  /* STATIC_NOS_MAPPING else */
+
+  gc_gen->reserved_heap_size = los_size + nos_reserve_size + mos_reserve_size;
   gc_gen->heap_start = reserved_base;
-  gc_gen->heap_end = (void*)((unsigned int)reserved_base + max_heap_size);
+  gc_gen->heap_end = reserved_end;
   gc_gen->blocks = (Block*)reserved_base;
   gc_gen->num_collections = 0;
-
-  /* heuristic nos + mos + LOS */
-  unsigned int los_size = max_heap_size >> 2;
+  gc_gen->time_collections = 0;
+  gc_gen->force_major_collect = FALSE;
+  
   gc_los_initialize(gc_gen, reserved_base, los_size);
 
-  unsigned int mos_size = max_heap_size >> 1;
   reserved_base = (void*)((unsigned int)reserved_base + los_size);
-  gc_mos_initialize(gc_gen, reserved_base, mos_size);
-  
-  unsigned int nos_size; 
-  if(NOS_SIZE){
-    assert( NOS_SIZE>=min_nos_size_bytes && NOS_SIZE<=max_nos_size_bytes);
-    nos_size = NOS_SIZE;  
-  }else
-    nos_size =  max_heap_size >> 4;
-  
-  if(nos_size < min_nos_size_bytes ) nos_size = min_nos_size_bytes;  
-  if(nos_size > max_nos_size_bytes ) nos_size = max_nos_size_bytes;  
-  
-  reserved_base = (void*)((unsigned int)reserved_base + mos_size);
-  gc_nos_initialize(gc_gen, reserved_base, nos_size); 
+  gc_mos_initialize(gc_gen, reserved_base, mos_reserve_size, mos_commit_size);
 
+  gc_nos_initialize(gc_gen, nos_base, nos_reserve_size, nos_commit_size); 
+    
   /* connect mos and nos, so that they can be compacted as one space */
   Blocked_Space* mos = (Blocked_Space*)gc_get_mos(gc_gen);
   Blocked_Space* nos = (Blocked_Space*)gc_get_nos(gc_gen);
   Block_Header* mos_last_block = (Block_Header*)&mos->blocks[mos->num_managed_blocks-1];
   Block_Header* nos_first_block = (Block_Header*)&nos->blocks[0];
   mos_last_block->next = nos_first_block;
-  assert(space_heap_end((Space*)mos) == space_heap_start((Space*)nos));
+  
+  nos->collect_algorithm = MINOR_ALGO;
+  mos->collect_algorithm = MAJOR_ALGO;
+
+  /*Give GC a hint of space survive ratio.*/
+  nos->survive_ratio = gc_gen->survive_ratio;
+  mos->survive_ratio = gc_gen->survive_ratio;
+  gc_space_tuner_initialize((GC*)gc_gen);
     
   gc_gen->committed_heap_size = space_committed_size((Space*)gc_gen->nos) +
                                 space_committed_size((Space*)gc_gen->mos) +
                                 space_committed_size((Space*)gc_gen->los);
   
-  set_native_finalizer_thread_flag(TRUE);
-  set_native_ref_enqueue_thread_flag(TRUE);
+
+  set_native_finalizer_thread_flag(!IGNORE_FINREF);
+  set_native_ref_enqueue_thread_flag(!IGNORE_FINREF);
   
   return;
 }
@@ -118,6 +192,14 @@
   gc_los_destruct(gc_gen);  
   gc_gen->los = NULL;
 
+  Space* nos = (Space*)gc_gen->nos;
+  Space* mos = (Space*)gc_gen->mos;
+  Space* los = (Space*)gc_gen->los;
+
+  vm_unmap_mem(nos->heap_start, space_committed_size(nos));
+  vm_unmap_mem(mos->heap_start, space_committed_size(mos));
+  vm_unmap_mem(los->heap_start, space_committed_size(los));
+
   return;  
 }
 
@@ -132,45 +214,138 @@
 void gc_set_los(GC_Gen* gc, Space* los){ gc->los = (Lspace*)los;}
 unsigned int gc_get_processor_num(GC_Gen* gc){ return gc->_num_processors;}
 
+
 static Boolean major_collection_needed(GC_Gen* gc)
 {
-  return mspace_free_memory_size(gc->mos) < fspace_used_memory_size(gc->nos);  
+  return space_used_memory_size((Blocked_Space*)gc->nos)*gc->survive_ratio > (space_free_memory_size((Blocked_Space*)gc->mos));
+}
+
+Boolean FORCE_FULL_COMPACT = FALSE;
+
+void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause)
+{
+  /* this is for debugging. */
+  gc->last_collect_kind = gc->collect_kind;
+  
+  if(gc->force_major_collect || cause== GC_CAUSE_LOS_IS_FULL || FORCE_FULL_COMPACT)
+    gc->collect_kind = MAJOR_COLLECTION;
+  else
+    gc->collect_kind = MINOR_COLLECTION;
+
+  return;
 }
 
-unsigned int gc_decide_collection_kind(GC_Gen* gc, unsigned int cause)
+void gc_decide_collection_algorithm(GC_Gen* gc, char* minor_algo, char* major_algo)
 {
-  if(major_collection_needed(gc) || cause== GC_CAUSE_LOS_IS_FULL)
-    return  MAJOR_COLLECTION;
+  if(!minor_algo){
+    MINOR_ALGO = MINOR_NONGEN_FORWARD_POOL;      
+    gc_disable_gen_mode();
+  
+  }else{
+    string_to_upper(minor_algo);
+     
+    if(!strcmp(minor_algo, "MINOR_NONGEN_FORWARD_POOL")){  
+      MINOR_ALGO = MINOR_NONGEN_FORWARD_POOL;
+      gc_disable_gen_mode();
+      
+    }else if(!strcmp(minor_algo, "MINOR_GEN_FORWARD_POOL")){
+      MINOR_ALGO = MINOR_GEN_FORWARD_POOL;
+      gc_enable_gen_mode();
+    
+    }else{
+      printf("\nGC algorithm setting incorrect. Will use default value.\n");  
+    
+    }
+  }
+  
+  if(!major_algo){
+    MAJOR_ALGO= MAJOR_COMPACT_SLIDE;
     
-  return MINOR_COLLECTION;     
+  }else{
+    string_to_upper(major_algo);
+
+    if(!strcmp(major_algo, "MAJOR_COMPACT_SLIDE")){
+     MAJOR_ALGO= MAJOR_COMPACT_SLIDE;
+          
+    }else if(!strcmp(major_algo, "MAJOR_COMPACT_MOVE")){
+     MAJOR_ALGO= MAJOR_COMPACT_MOVE;
+
+    }else{
+     printf("\nGC algorithm setting incorrect. Will use default algorithm.\n");  
+      
+    }
+  }
+  
+  return;
+  
 }
 
+Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */
+
 void gc_gen_reclaim_heap(GC_Gen* gc)
-{  
+{ 
+  if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE);
+
+  int64 start_time = time_now();
+
+  Blocked_Space* fspace = (Blocked_Space*)gc->nos;
+  Blocked_Space* mspace = (Blocked_Space*)gc->mos;
+  mspace->num_used_blocks = mspace->free_block_idx - mspace->first_block_idx;
+  fspace->num_used_blocks = fspace->free_block_idx - fspace->first_block_idx;
+
+  gc->collect_result = TRUE;
+  
   if(gc->collect_kind == MINOR_COLLECTION){
-    if( gc_requires_barriers()) /* normal gen gc nos collection */
-      fspace_collection(gc->nos);
-    else{ /* copy nos to mos for non-gen gc */
-      /* we don't move mos objects in MINOR_COLLECTION. This is true for both 
-        gen or non-gen collections, but only meaningful for non-gen GC, because
-        non-gen GC need mark the heap in order to find the refs from mos/los to nos.
-        This can save lots of reloc table space for slots having ref pointing to mos.
-        For gen GC, MINOR_COLLECTION doesn't really mark the heap. It has remsets that
-        have all the refs from mos/los to nos, which are actually the same thing as reloc table */
-      gc->mos->move_object = FALSE;
-      fspace_collection(gc->nos);
-      gc->mos->move_object = TRUE;
+    /* FIXME:: move_object is only useful for nongen_slide_copy */
+    gc->mos->move_object = FALSE;
+    
+    fspace_collection(gc->nos);
+    
+    gc->mos->move_object = TRUE;      
+
       
-      /* these are only needed for non-gen MINOR_COLLECTION, because 
-        both mos and los will be collected (and reset) in MAJOR_COLLECTION */
-      reset_mspace_after_copy_nursery(gc->mos);
-      reset_lspace_after_copy_nursery(gc->los);
-    }
   }else{
+
     /* process mos and nos together in one compaction */
     mspace_collection(gc->mos); /* fspace collection is included */
     lspace_collection(gc->los);
+
+  }
+
+  if(gc->collect_result == FALSE && gc->collect_kind == MINOR_COLLECTION){
+    
+    if(gc_is_gen_mode())
+      gc_clear_remset((GC*)gc);  
+    
+    /* runout mspace in minor collection */
+    assert(mspace->free_block_idx == mspace->ceiling_block_idx + 1);
+    mspace->num_used_blocks = mspace->num_managed_blocks;
+
+    IS_FALLBACK_COMPACTION = TRUE;
+
+    gc_reset_collect_result((GC*)gc);
+    gc->collect_kind = FALLBACK_COLLECTION;    
+
+    mspace_collection(gc->mos); /* fspace collection is included */
+    lspace_collection(gc->los);
+    
+    IS_FALLBACK_COMPACTION = FALSE;
+    
   }
   
+  if( gc->collect_result == FALSE){
+    printf("Out of Memory!\n");
+    assert(0);
+    exit(0);
+  }
+  
+  int64 pause_time = time_now() - start_time;
+  
+  gc->time_collections += pause_time;
+  
+  if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE);
+
+  gc_gen_adapt(gc, pause_time);
+
   return;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h Thu Jan 11 05:57:16 2007
@@ -21,13 +21,17 @@
 #ifndef _GC_GEN_H_
 #define _GC_GEN_H_
 
+extern unsigned int NOS_SIZE;
+
 #include "../common/gc_common.h"
 #include "../thread/gc_thread.h"
 #include "../trace_forward/fspace.h"
 #include "../mark_compact/mspace.h"
 #include "../mark_sweep/lspace.h"
 #include "../finalizer_weakref/finalizer_weakref_metadata.h"
-  
+
+#define SPACE_ALLOC_UNIT ( ( GC_BLOCK_SIZE_BYTES > SYSTEM_ALLOC_UNIT) ? GC_BLOCK_SIZE_BYTES : SYSTEM_ALLOC_UNIT)
+
 enum Write_Barrier_Kind{
   WRITE_BARRIER_NIL,  
   WRITE_BARRIER_SLOT,  
@@ -52,6 +56,8 @@
   unsigned int reserved_heap_size;
   unsigned int committed_heap_size;
   unsigned int num_collections;
+  int64 time_collections;
+  float survive_ratio;  
   
   /* mutation related info */
   Mutator *mutator_list;
@@ -65,21 +71,28 @@
 
   /* metadata is the pool for rootset, markstack, etc. */  
   GC_Metadata* metadata;
-  Finalizer_Weakref_Metadata *finalizer_weakref_metadata;
+  Finref_Metadata *finref_metadata;
+
   unsigned int collect_kind; /* MAJOR or MINOR */
+  unsigned int last_collect_kind;
+  Boolean collect_result; /* succeed or fail */
+  
+  Boolean generate_barrier;
+
   /* FIXME:: this is wrong! root_set belongs to mutator */
   Vector_Block* root_set;
   
-  /* mem info */
-  apr_pool_t *aux_pool;
-  port_vmem_t *allocated_memory;
+  //For_LOS_extend
+  Space_Tuner* tuner;  
   /* END of GC --> */
   
   Block* blocks;
   Fspace *nos;
   Mspace *mos;
   Lspace *los;
-    
+      
+  Boolean force_major_collect;
+  
   /* system info */ 
   unsigned int _machine_page_size_bytes;
   unsigned int _num_processors;
@@ -92,20 +105,25 @@
 void gc_gen_destruct(GC_Gen *gc);
                         
 inline unsigned int gc_gen_free_memory_size(GC_Gen* gc)
-{  return fspace_free_memory_size(gc->nos) +
-         mspace_free_memory_size(gc->mos) +
-         lspace_free_memory_size(gc->los);  }
-                        
+{  return space_free_memory_size((Blocked_Space*)gc->nos) +
+          space_free_memory_size((Blocked_Space*)gc->mos) +
+          lspace_free_memory_size(gc->los);  }
+                    
+inline unsigned int gc_gen_total_memory_size(GC_Gen* gc)
+{  return space_committed_size((Space*)gc->nos) +
+          space_committed_size((Space*)gc->mos) +
+          lspace_committed_size(gc->los);  }
+
 /////////////////////////////////////////////////////////////////////////////////////////
 
-inline void gc_nos_initialize(GC_Gen* gc, void* start, unsigned int nos_size)
-{ fspace_initialize((GC*)gc, start, nos_size); }
+inline void gc_nos_initialize(GC_Gen* gc, void* start, unsigned int nos_size, unsigned int commit_size)
+{ fspace_initialize((GC*)gc, start, nos_size, commit_size); }
 
 inline void gc_nos_destruct(GC_Gen* gc)
 { fspace_destruct(gc->nos); }
 
-inline void gc_mos_initialize(GC_Gen* gc, void* start, unsigned int mos_size)
-{ mspace_initialize((GC*)gc, start, mos_size); }
+inline void gc_mos_initialize(GC_Gen* gc, void* start, unsigned int mos_size, unsigned int commit_size)
+{ mspace_initialize((GC*)gc, start, mos_size, commit_size); }
 
 inline void gc_mos_destruct(GC_Gen* gc)
 { mspace_destruct(gc->mos); }
@@ -116,12 +134,6 @@
 inline void gc_los_destruct(GC_Gen* gc)
 { lspace_destruct(gc->los); }
 
-inline Boolean address_belongs_to_nursery(void* addr, GC_Gen* gc)
-{ return address_belongs_to_space(addr, (Space*)gc->nos); }
-
-extern void* nos_boundary;
-extern void* los_boundary;
-
 inline Space* space_of_addr(GC* gc, void* addr)
 {
   assert(address_belongs_to_gc_heap(addr, gc));
@@ -141,7 +153,11 @@
 void gc_set_los(GC_Gen* gc, Space* los);
 unsigned int gc_get_processor_num(GC_Gen* gc);
 
-unsigned int gc_decide_collection_kind(GC_Gen* gc, unsigned int cause);
+void gc_decide_collection_algorithm(GC_Gen* gc, char* minor_algo, char* major_algo);
+void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause);
+
+void gc_gen_adapt(GC_Gen* gc, int64 pause_time);
+
 void gc_gen_reclaim_heap(GC_Gen* gc);
 
 #endif /* ifndef _GC_GEN_H_ */

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp Thu Jan 11 05:57:16 2007
@@ -0,0 +1,270 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "gen.h"
+
+#define NOS_COPY_RESERVE_DELTA (GC_BLOCK_SIZE_BYTES<<5)
+
+#include <math.h>
+
+static float Tslow = 0.0f;
+static unsigned int SMax = 0;
+static unsigned int last_total_free_size = 0;
+
+static float mini_free_ratio(float k, float m)
+{
+  /*fixme: the check should be proved!*/
+  if(m < 0.005f) m = 0.005f;
+  if(k > 100.f) k = 100.f;
+  
+  float b = - (2 + 2 * k * m);
+  float c = k * m * m + 2 * m + 1;
+  float D = b * b - 4 * c;
+  if (D <= 0) {
+    //printf("output 0.8f from k: %5.3f, m: %5.3f\n", k, m);
+    return 0.8f;
+  }
+  float pm = sqrt (D) / 2 ;
+  float base = - b / 2 ;
+  float res = base - pm;
+  if (res > 1.f) res = 0.8f;
+
+  /*fixme: the check should be proved!*/
+  if (res < 0.0f) res = 0.8f;
+
+  //printf("output %5.3f from k: %5.3f, m: %5.3f\n", res, k, m);
+  return res;
+}
+
+#define GC_MOS_MIN_EXTRA_REMAIN_SIZE (4*1024*1024)
+static void gc_decide_next_collect(GC_Gen* gc, int64 pause_time)
+{
+  Blocked_Space* fspace = (Blocked_Space*)gc->nos;
+  Blocked_Space* mspace = (Blocked_Space*)gc->mos;
+
+  float survive_ratio = 0;
+
+  unsigned int mos_free_size = space_free_memory_size(mspace);
+  unsigned int nos_free_size = space_free_memory_size(fspace);
+  unsigned int total_free_size = mos_free_size  + nos_free_size;
+  
+  if(gc->collect_kind != MINOR_COLLECTION) 
+  {
+    mspace->time_collections += pause_time;
+
+    Tslow = (float)pause_time;
+    SMax = total_free_size;
+    gc->force_major_collect = FALSE;
+    
+    unsigned int major_survive_size = space_committed_size((Space*)mspace) - mos_free_size;
+    survive_ratio = (float)major_survive_size/(float)gc_gen_total_memory_size(gc);
+    mspace->survive_ratio = survive_ratio;
+  
+  }else{
+    /*Give a hint to mini_free_ratio. */
+    if(gc->num_collections == 1){
+      /*fixme: This is only set for tuning the first warehouse!*/
+      Tslow = pause_time / gc->survive_ratio;
+      SMax = (unsigned int)((float)gc->committed_heap_size * ( 1 - gc->survive_ratio ));
+      last_total_free_size = gc->committed_heap_size - gc->los->committed_heap_size;
+    }
+
+    fspace->time_collections += pause_time;  
+    unsigned int free_size_threshold;
+      
+    unsigned int minor_survive_size = last_total_free_size - total_free_size;
+
+    float k = Tslow * fspace->num_collections/fspace->time_collections;
+    float m = ((float)minor_survive_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ));
+    float free_ratio_threshold = mini_free_ratio(k, m);
+    free_size_threshold = (unsigned int)(free_ratio_threshold * (SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ) + GC_MOS_MIN_EXTRA_REMAIN_SIZE );
+
+    if ((mos_free_size + nos_free_size)< free_size_threshold)  {
+      gc->force_major_collect = TRUE;
+    }
+
+    survive_ratio = (float)minor_survive_size/(float)space_committed_size((Space*)fspace);
+    fspace->survive_ratio = survive_ratio;
+  }
+  
+  gc->survive_ratio =  (gc->survive_ratio + survive_ratio)/2.0f;
+
+  last_total_free_size = total_free_size;
+
+  return;
+}
+
+
+Boolean gc_compute_new_space_size(GC_Gen* gc, unsigned int* mos_size, unsigned int* nos_size)
+{
+  Blocked_Space* fspace = (Blocked_Space*)gc->nos;
+  Blocked_Space* mspace = (Blocked_Space*)gc->mos;
+  Blocked_Space* lspace = (Blocked_Space*)gc->los;  
+  
+  unsigned int new_nos_size;
+  unsigned int new_mos_size;
+
+  unsigned int curr_nos_size = space_committed_size((Space*)fspace);
+  unsigned int used_mos_size = space_used_memory_size(mspace);
+  unsigned int free_mos_size = space_committed_size((Space*)mspace) - used_mos_size;
+
+  unsigned int total_size;
+
+#ifdef STATIC_NOS_MAPPING
+    total_size = max_heap_size_bytes - lspace->committed_heap_size;
+#else
+    total_size = (unsigned int)gc->heap_end - (unsigned int)mspace->heap_start;
+#endif
+
+  /* check if curr nos size is too small to shrink */
+  /*
+  if(curr_nos_size <= min_nos_size_bytes){
+    //after major, should not allow this size 
+    assert(gc->collect_kind == MINOR_COLLECTION);
+    return FALSE;
+  }
+  */
+  
+  unsigned int total_free = total_size - used_mos_size;
+  /* predict NOS + NOS*ratio = total_free_size */
+  int nos_reserve_size;
+  nos_reserve_size = (int)(((float)total_free)/(1.0f + fspace->survive_ratio));
+  new_nos_size = round_down_to_size((unsigned int)nos_reserve_size, SPACE_ALLOC_UNIT);
+#ifdef STATIC_NOS_MAPPING
+  if(new_nos_size > fspace->reserved_heap_size) new_nos_size = fspace->reserved_heap_size;
+#endif  
+  if(new_nos_size > GC_MOS_MIN_EXTRA_REMAIN_SIZE) new_nos_size -= GC_MOS_MIN_EXTRA_REMAIN_SIZE ;
+
+  new_mos_size = total_size - new_nos_size;
+#ifdef STATIC_NOS_MAPPING
+  if(new_mos_size > mspace->reserved_heap_size) new_mos_size = mspace->reserved_heap_size;
+#endif
+  assert(new_nos_size + new_mos_size == total_size);
+  *nos_size = new_nos_size;
+  *mos_size = new_mos_size;
+  return TRUE;;
+}
+
+#ifndef STATIC_NOS_MAPPING
+
+void gc_gen_adapt(GC_Gen* gc, int64 pause_time)
+{
+  gc_decide_next_collect(gc, pause_time);
+
+  if(NOS_SIZE) return;
+
+  Blocked_Space* fspace = (Blocked_Space*)gc->nos;
+  Blocked_Space* mspace = (Blocked_Space*)gc->mos;
+  
+  unsigned int new_nos_size;
+  unsigned int new_mos_size;
+
+  Boolean result = gc_compute_new_space_size(gc, &new_mos_size, &new_nos_size);
+
+  if(!result) return;
+
+  unsigned int curr_nos_size = space_committed_size((Space*)fspace);
+
+  if( abs((int)new_nos_size - (int)curr_nos_size) < NOS_COPY_RESERVE_DELTA )
+    return;
+  
+  /* below are ajustment */  
+
+  nos_boundary = (void*)((unsigned int)gc->heap_end - new_nos_size);
+
+  fspace->heap_start = nos_boundary;
+  fspace->blocks = (Block*)nos_boundary;
+  fspace->committed_heap_size = new_nos_size;
+  fspace->num_managed_blocks = new_nos_size >> GC_BLOCK_SHIFT_COUNT;
+  fspace->num_total_blocks = fspace->num_managed_blocks;
+  fspace->first_block_idx = ((Block_Header*)nos_boundary)->block_idx;
+  fspace->free_block_idx = fspace->first_block_idx;
+
+  mspace->heap_end = nos_boundary;
+  mspace->committed_heap_size = new_mos_size;
+  mspace->num_managed_blocks = new_mos_size >> GC_BLOCK_SHIFT_COUNT;
+  mspace->num_total_blocks = mspace->num_managed_blocks;
+  mspace->ceiling_block_idx = ((Block_Header*)nos_boundary)->block_idx - 1;
+
+  Block_Header* mos_last_block = (Block_Header*)&mspace->blocks[mspace->num_managed_blocks-1];
+  assert(mspace->ceiling_block_idx == mos_last_block->block_idx);
+  Block_Header* nos_first_block = (Block_Header*)&fspace->blocks[0];
+  /* this is redundant: mos_last_block->next = nos_first_block; */
+
+  HelperClass_set_NosBoundary(nos_boundary);
+  
+  return;
+}
+
+#else /* ifndef STATIC_NOS_MAPPING */
+
+void gc_gen_adapt(GC_Gen* gc, int64 pause_time)
+{
+  gc_decide_next_collect(gc, pause_time);
+
+  if(NOS_SIZE) return;
+
+  unsigned int new_nos_size;
+  unsigned int new_mos_size;
+
+  Boolean result = gc_compute_new_space_size(gc, &new_mos_size, &new_nos_size);
+
+  if(!result) return;
+
+  Blocked_Space* fspace = (Blocked_Space*)gc->nos;
+  Blocked_Space* mspace = (Blocked_Space*)gc->mos;
+  
+  unsigned int curr_nos_size = space_committed_size((Space*)fspace);
+
+  if( abs((int)new_nos_size - (int)curr_nos_size) < NOS_COPY_RESERVE_DELTA )
+    return;
+      
+  unsigned int used_mos_size = space_used_memory_size((Blocked_Space*)mspace);  
+  unsigned int free_mos_size = space_free_memory_size((Blocked_Space*)mspace);  
+
+  unsigned int new_free_mos_size = new_mos_size -  used_mos_size;
+  
+  unsigned int curr_mos_end = (unsigned int)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
+  unsigned int mos_border = (unsigned int)mspace->heap_end;
+  if(  curr_mos_end + new_free_mos_size > mos_border){
+    /* we can't let mos cross border */
+    new_free_mos_size = mos_border - curr_mos_end;    
+  }
+
+  if(new_nos_size < curr_nos_size){
+  /* lets shrink nos */
+    assert(new_free_mos_size > free_mos_size);
+    blocked_space_shrink((Blocked_Space*)fspace, curr_nos_size - new_nos_size);
+    blocked_space_extend((Blocked_Space*)mspace, new_free_mos_size - free_mos_size);
+  }else if(new_nos_size > curr_nos_size){
+    /* lets grow nos */
+    assert(new_free_mos_size < free_mos_size);
+    blocked_space_shrink((Blocked_Space*)mspace, free_mos_size - new_free_mos_size);
+    blocked_space_extend((Blocked_Space*)fspace, new_nos_size - curr_nos_size);     
+  }
+
+  Block_Header* mos_last_block = (Block_Header*)&mspace->blocks[mspace->num_managed_blocks-1];
+  Block_Header* nos_first_block = (Block_Header*)&fspace->blocks[0];
+  mos_last_block->next = nos_first_block;
+  
+  return;
+}
+
+#endif /* STATIC_NOS_MAPPING */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/helper.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/helper.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/helper.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/helper.cpp Thu Jan 11 05:57:16 2007
@@ -1,31 +0,0 @@
-#include <open/vm_gc.h>
-#include <jni.h>
-#include "../thread/gc_thread.h"
-#include "../gen/gen.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Class:     org_apache_harmony_drlvm_gc_gen_GCHelper
- * Method:    TLSFreeOffset
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_TLSGCOffset(JNIEnv *e, jclass c)
-{
-    return (jint)tls_gc_offset;
-}
-
- 
-
-JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getNosBoundary(JNIEnv *e, jclass c)
-{
-    return (jint)nos_boundary;
-}
-
-
-
-#ifdef __cplusplus
-}
-#endif

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp Thu Jan 11 05:57:16 2007
@@ -0,0 +1,45 @@
+#include <open/vm_gc.h>
+#include <jni.h>
+#include "open/vm_util.h"
+#include "environment.h"
+#include "../thread/gc_thread.h"
+#include "../gen/gen.h"
+#include "java_support.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Class:     org_apache_harmony_drlvm_gc_gen_GCHelper
+ * Method:    TLSFreeOffset
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_TLSGCOffset(JNIEnv *e, jclass c)
+{
+    return (jint)tls_gc_offset;
+}
+
+JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getNosBoundary(JNIEnv *e, jclass c)
+{
+    return (jint)nos_boundary;
+}
+
+JNIEXPORT jboolean JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getGenMode(JNIEnv *e, jclass c)
+{
+    return (jboolean)gc_is_gen_mode();
+}
+
+JNIEXPORT void JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_helperCallback(JNIEnv *e, jclass c)
+{
+    java_helper_inlined = TRUE;
+
+    unsigned int obj = *(unsigned int*)c;
+    
+    Class_Handle *vm_class_ptr = (Class_Handle *)(obj + VM_Global_State::loader_env->vm_class_offset);
+    GCHelper_clss = *vm_class_ptr;
+}
+
+#ifdef __cplusplus
+}
+#endif

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp Thu Jan 11 05:57:16 2007
@@ -0,0 +1,80 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include <string.h>
+#include <jni.h>
+#include "jit_intf.h"
+#include "java_support.h"
+
+Class_Handle GCHelper_clss;
+Boolean java_helper_inlined;
+
+void HelperClass_set_GenMode(Boolean status)
+{
+  if(!java_helper_inlined) return;
+
+  unsigned int nfields = class_number_fields(GCHelper_clss);
+  unsigned int i;
+  for(i=0; i<nfields; i++){
+    Field_Handle field = class_get_field(GCHelper_clss, i);
+    if(!strcmp(field_get_name(field), "GEN_MODE")){
+      jboolean* p_gen_mode = (jboolean*)field_get_address(field);
+      *p_gen_mode = (jboolean)status;
+      break;
+    }
+  }
+  
+  assert(i<nfields);
+  
+/*
+  hythread_suspend_enable();
+  
+  //"org.apache.harmony.drlvm.gc_gen.GCHelper" 
+  jclass GCHelper = jni_env->FindClass("GCHelper");
+  jfieldID gen_mode = jni_env->GetStaticFieldID(GCHelper, "GEN_MODE", "Z");
+  assert(gen_mode);
+  
+  jni_env->SetStaticBooleanField(GCHelper, gen_mode, status?JNI_TRUE:JNI_FALSE);
+  
+  hythread_suspend_disable();
+*/  
+  return;
+}
+
+
+void HelperClass_set_NosBoundary(void* boundary)
+{
+  if(!java_helper_inlined) return;
+
+  unsigned int nfields = class_number_fields(GCHelper_clss);
+  unsigned int i;
+  for(i=0; i<nfields; i++){
+    Field_Handle field = class_get_field(GCHelper_clss, i);
+    if(!strcmp(field_get_name(field), "NOS_BOUNDARY")){
+      jint* p_nos_boundary = (jint*)field_get_address(field);
+      *p_nos_boundary = (jint)boundary;
+      break;
+    }
+  }
+  
+  assert(i<nfields);
+
+  return;
+}
\ No newline at end of file

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.h?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.h Thu Jan 11 05:57:16 2007
@@ -0,0 +1,35 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#ifndef _JAVA_SUPPORT_H_
+#define _JAVA_SUPPORT_H_
+
+#include "open/types.h"
+#include "../common/gc_platform.h"
+
+extern Class_Handle GCHelper_clss;
+extern Boolean java_helper_inlined;
+
+void HelperClass_set_GenMode(Boolean status);
+void HelperClass_set_NosBoundary(void* boundary);
+
+#endif /*_JAVA_SUPPORT_H_*/
+
+

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp Thu Jan 11 05:57:16 2007
@@ -0,0 +1,178 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "../common/gc_metadata.h"
+#include "../thread/collector.h"
+#include "../gen/gen.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+static void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+  Partial_Reveal_Object* p_obj = *p_ref;
+  if(p_obj==NULL) return;
+
+  collector_tracestack_push(collector, p_ref);
+  
+  return;
+}
+
+static void scan_object(Collector* collector, Partial_Reveal_Object **p_ref)
+{
+  Partial_Reveal_Object *p_obj = *p_ref;
+  assert(p_obj);
+  
+  if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
+    assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj)));
+    p_obj = obj_get_fw_in_oi(p_obj);
+    assert(p_obj);
+    *p_ref = p_obj;
+  }
+  
+  if(!obj_mark_in_vt(p_obj))
+    return;
+  
+  if( !object_has_ref_field(p_obj) ) return;
+  
+    /* scan array object */
+  if (object_is_array(p_obj)) {
+    Partial_Reveal_Object* array = p_obj;
+    assert(!obj_is_primitive_array(array));
+    
+    int32 array_length = vector_get_length((Vector_Handle) array);
+    for (int i = 0; i < array_length; i++) {
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i);
+      scan_slot(collector, p_ref);
+    }   
+    return;
+  }
+
+  /* scan non-array object */
+  int *offset_scanner = init_object_scanner(p_obj);
+  while (true) {
+    Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
+    if (p_ref == NULL) break; /* terminating ref slot */
+  
+    scan_slot(collector, p_ref);
+    offset_scanner = offset_next_ref(offset_scanner);
+  }
+
+#ifndef BUILD_IN_REFERENT
+  scan_weak_reference(collector, p_obj, scan_slot);
+#endif
+  
+  return;
+}
+
+
+static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref)
+{ 
+  scan_object(collector, p_ref);
+  
+  Vector_Block* trace_stack = collector->trace_stack;
+  while( !vector_stack_is_empty(trace_stack)){
+    p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack); 
+    scan_object(collector, p_ref);
+    trace_stack = collector->trace_stack;
+  }
+    
+  return; 
+}
+
+/* for marking phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+void fallback_mark_scan_heap(Collector* collector)
+{ 
+  GC* gc = collector->gc;
+  GC_Metadata* metadata = gc->metadata;
+  
+  assert(gc->collect_kind == FALLBACK_COLLECTION);
+
+  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
+   
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+  /* first step: copy all root objects to mark tasks. 
+      FIXME:: can be done sequentially before coming here to eliminate atomic ops */ 
+  while(root_set){
+    unsigned int* iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+
+      Partial_Reveal_Object* p_obj = *p_ref;
+      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+      assert(p_obj != NULL);
+      
+      collector_tracestack_push(collector, p_ref);
+
+    } 
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */    
+  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+  
+  /* second step: iterate over the mark tasks and scan objects */
+  /* get a task buf for the mark stack */
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+retry:
+  Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
+  
+  while(mark_task){
+    unsigned int* iter = vector_block_iterator_init(mark_task);
+    while(!vector_block_iterator_end(mark_task,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object **)*iter;
+      iter = vector_block_iterator_advance(mark_task,iter);
+
+      /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. 
+         degenerate my stack into mark_task, and grab another mark_task */
+      trace_object(collector, p_ref);
+    } 
+    /* run out one task, put back to the pool and grab another task */
+   vector_stack_clear(mark_task);
+   pool_put_entry(metadata->free_task_pool, mark_task);
+   mark_task = pool_get_entry(metadata->mark_task_pool);      
+  }
+  
+  /* termination detection. This is also a barrier.
+     NOTE:: We can simply spin waiting for num_finished_collectors, because each 
+     generated new task would surely be processed by its generating collector eventually. 
+     So code below is only for load balance optimization. */
+  atomic_inc32(&num_finished_collectors);
+  while(num_finished_collectors != num_active_collectors){
+    if( !pool_is_empty(metadata->mark_task_pool)){
+      atomic_dec32(&num_finished_collectors);
+      goto retry;  
+    }
+  }
+     
+  /* put back the last mark stack to the free pool */
+  mark_task = (Vector_Block*)collector->trace_stack;
+  vector_stack_clear(mark_task);
+  pool_put_entry(metadata->free_task_pool, mark_task);   
+  collector->trace_stack = NULL;
+  
+  return;
+}

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp Thu Jan 11 05:57:16 2007
@@ -25,31 +25,11 @@
   return;
 }
 
-static void mspace_init_blocks(Mspace* mspace)
-{ 
-  Block* blocks = (Block*)mspace->heap_start; 
-  Block_Header* last_block = (Block_Header*)blocks;
-  unsigned int start_idx = mspace->first_block_idx;
-  for(unsigned int i=0; i < mspace->num_managed_blocks; i++){
-    Block_Header* block = (Block_Header*)&(blocks[i]);
-    block->free = (void*)((unsigned int)block + GC_BLOCK_HEADER_SIZE_BYTES);
-    block->ceiling = (void*)((unsigned int)block + GC_BLOCK_SIZE_BYTES); 
-    block->base = block->free;
-    block->block_idx = i + start_idx;
-    block->status = BLOCK_FREE;  
-    last_block->next = block;
-    last_block = block;
-  }
-  last_block->next = NULL;
-  mspace->blocks = blocks;
-   
-  return;
-}
-
 struct GC_Gen;
 extern void gc_set_mos(GC_Gen* gc, Space* space);
-extern Space* gc_set_nos(GC_Gen* gc);
-void mspace_initialize(GC* gc, void* start, unsigned int mspace_size)
+extern Space* gc_get_nos(GC_Gen* gc);
+
+void mspace_initialize(GC* gc, void* start, unsigned int mspace_size, unsigned int commit_size)
 {
   Mspace* mspace = (Mspace*)STD_MALLOC( sizeof(Mspace));
   assert(mspace);
@@ -59,14 +39,14 @@
   mspace->num_total_blocks = mspace_size >> GC_BLOCK_SHIFT_COUNT;
 
   void* reserved_base = start;
-  int status = port_vmem_commit(&reserved_base, mspace_size, gc->allocated_memory); 
-  assert(status == APR_SUCCESS && reserved_base == start);
+  /* commit mspace mem */
+  vm_commit_mem(reserved_base, commit_size);
+  memset(reserved_base, 0, commit_size);
   
-  memset(reserved_base, 0, mspace_size);
-  mspace->committed_heap_size = mspace_size;
+  mspace->committed_heap_size = commit_size;
   mspace->heap_start = reserved_base;
-  mspace->heap_end = (void *)((unsigned int)reserved_base + mspace->reserved_heap_size);
-  mspace->num_managed_blocks = mspace_size >> GC_BLOCK_SHIFT_COUNT;
+  mspace->heap_end = (void *)((unsigned int)reserved_base + mspace_size);
+  mspace->num_managed_blocks = commit_size >> GC_BLOCK_SHIFT_COUNT;
   
   mspace->first_block_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, reserved_base);
   mspace->ceiling_block_idx = mspace->first_block_idx + mspace->num_managed_blocks - 1;
@@ -74,9 +54,11 @@
   mspace->num_used_blocks = 0;
   mspace->free_block_idx = mspace->first_block_idx;
   
-  mspace_init_blocks(mspace);
-  
-  mspace->mark_object_func = mspace_mark_object;
+  space_init_blocks((Blocked_Space*)mspace);
+
+  mspace->num_collections = 0;
+  mspace->time_collections = 0;
+  mspace->survive_ratio = 0.2f;
 
   mspace->move_object = TRUE;
   mspace->gc = gc;
@@ -90,42 +72,78 @@
 {
   //FIXME:: when map the to-half, the decommission start address should change
   mspace_destruct_blocks(mspace);
-  port_vmem_decommit(mspace->heap_start, mspace->committed_heap_size, mspace->gc->allocated_memory);
   STD_FREE(mspace);  
 }
 
-  /* for non-gen MINOR_COLLECTION, mspace has both obj and marktable to be cleared,
-     because the marking phase will mark them, but then never touch them 
-     
-     FIXME:: the marking choice between header and mark table has to be decided.
-     Obj header marking has advantage of idempotent, while table marking can prefetch 
-     If we choose only one, we will not have the two version clearings: one after
-     MAJOR_COLLECTION, one after non-gen MINOR_COLLECTION */
-     
-void reset_mspace_after_copy_nursery(Mspace* mspace)
-{ 
-  /* for major collection we do nothing, the reset is done there */
-  assert( mspace->gc->collect_kind == MINOR_COLLECTION );
-
-  unsigned int new_num_used = mspace->free_block_idx - mspace->first_block_idx;
-  unsigned int old_num_used = mspace->num_used_blocks;
+void mspace_block_iterator_init_free(Mspace* mspace)
+{
+  mspace->block_iterator = (Block_Header*)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
+}
 
-  /* At the moment, for MINOR_COLLECTION, only non-gen collection does copying.
-     The generational version does forwarding */
-  assert( !gc_requires_barriers());
-  
-  Block* blocks = mspace->blocks;
-  for(unsigned int i=0; i < old_num_used; i++){
-    Block_Header* block = (Block_Header*)&(blocks[i]);
-    block_clear_markbits(block); 
+//For_LOS_extend
+#include "../common/space_tuner.h"
+void mspace_block_iterator_init(Mspace* mspace)
+{
+  GC* gc = mspace->gc;
+  if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS){
+    unsigned int tuning_blocks = ((mspace->gc)->tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT);
+    mspace->block_iterator = (Block_Header*)&(mspace->blocks[tuning_blocks]);
+    return;
   }
+  
+  mspace->block_iterator = (Block_Header*)mspace->blocks;
+  return;
+}
 
-  for(unsigned int i=old_num_used; i < new_num_used; i++){
-    Block_Header* block = (Block_Header*)&(blocks[i]);
-    block->status = BLOCK_USED;
-  }
 
-  mspace->num_used_blocks = new_num_used;  
-  return;
+Block_Header* mspace_block_iterator_get(Mspace* mspace)
+{
+  return (Block_Header*)mspace->block_iterator;
+}
+
+Block_Header* mspace_block_iterator_next(Mspace* mspace)
+{
+  Block_Header* cur_block = (Block_Header*)mspace->block_iterator;
+  
+  while(cur_block != NULL){
+    Block_Header* next_block = cur_block->next;
+
+    Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)&mspace->block_iterator, next_block, cur_block);
+    if(temp != cur_block){
+      cur_block = (Block_Header*)mspace->block_iterator;
+      continue;
+    }
+    return cur_block;
+  }
+  /* run out space blocks */
+  return NULL;  
 }
 
+#include "../common/fix_repointed_refs.h"
+
+void mspace_fix_after_copy_nursery(Collector* collector, Mspace* mspace)
+{
+  //the first block is not set yet
+  Block_Header* curr_block = mspace_block_iterator_next(mspace);
+  unsigned int first_block_idx = mspace->first_block_idx;
+  unsigned int old_num_used = mspace->num_used_blocks;
+  unsigned int old_free_idx = first_block_idx + old_num_used;
+  unsigned int new_free_idx = mspace->free_block_idx;
+  
+  /* for NOS copy, we are sure about the last block for fixing */
+  Block_Header* space_end = (Block_Header*)&mspace->blocks[new_free_idx-first_block_idx];  
+  
+  while( curr_block < space_end){
+    assert(curr_block->status == BLOCK_USED);
+    if( curr_block->block_idx < old_free_idx)
+      /* for blocks used before nos copy */
+      block_fix_ref_after_marking(curr_block); 
+  
+    else  /* for blocks used for nos copy */
+      block_fix_ref_after_copying(curr_block); 
+         
+    curr_block = mspace_block_iterator_next(mspace);
+  }
+   
+  return;  
+}

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h Thu Jan 11 05:57:16 2007
@@ -21,7 +21,6 @@
 #ifndef _MSC_SPACE_H_
 #define _MSC_SPACE_H_
 
-#include "../common/gc_block.h"
 #include "../thread/gc_thread.h"
 
 /* Mark-compaction space is orgnized into blocks*/
@@ -32,9 +31,11 @@
   unsigned int reserved_heap_size;
   unsigned int committed_heap_size;
   unsigned int num_collections;
+  int64 time_collections;
+  float survive_ratio;
+  unsigned int collect_algorithm;
   GC* gc;
   Boolean move_object;
-  Boolean (*mark_object_func)(Mspace* space, Partial_Reveal_Object* p_obj);
   /* END of Space --> */
     
   Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
@@ -48,24 +49,22 @@
   unsigned int num_managed_blocks;
   unsigned int num_total_blocks;
   /* END of Blocked_Space --> */
-    
+  
+  volatile Block_Header* block_iterator;  
+  
 }Mspace;
 
-void mspace_initialize(GC* gc, void* reserved_base, unsigned int mspace_size);
+void mspace_initialize(GC* gc, void* reserved_base, unsigned int mspace_size, unsigned int commit_size);
 void mspace_destruct(Mspace* mspace);
 
-inline Boolean mspace_has_free_block(Mspace* mspace){ return mspace->free_block_idx <= mspace->ceiling_block_idx; }
-inline unsigned int mspace_free_memory_size(Mspace* mspace){ return GC_BLOCK_SIZE_BYTES * (mspace->ceiling_block_idx - mspace->free_block_idx + 1);  }
-inline Boolean mspace_used_memory_size(Mspace* mspace){ return GC_BLOCK_SIZE_BYTES * mspace->num_used_blocks; }
-
 void* mspace_alloc(unsigned size, Allocator *allocator);
 void mspace_collection(Mspace* mspace);
 
-void reset_mspace_after_copy_nursery(Mspace* mspace);
-
+void mspace_block_iterator_init(Mspace* mspace);
+void mspace_block_iterator_init_free(Mspace* mspace);
+Block_Header* mspace_block_iterator_next(Mspace* mspace);
+Block_Header* mspace_block_iterator_get(Mspace* mspace);
 
-Boolean mspace_mark_object(Mspace* mspace, Partial_Reveal_Object *p_obj);
-void mspace_save_reloc(Mspace* mspace, Partial_Reveal_Object** p_ref);
-void mspace_update_reloc(Mspace* mspace);
+void mspace_fix_after_copy_nursery(Collector* collector, Mspace* mspace);
 
 #endif //#ifdef _MSC_SPACE_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp Thu Jan 11 05:57:16 2007
@@ -22,13 +22,7 @@
 
 static Boolean mspace_alloc_block(Mspace* mspace, Allocator* allocator)
 {
-  Block_Header* alloc_block = (Block_Header* )allocator->alloc_block;
-  /* put back the used block */
-  if(alloc_block != NULL){ /* it is NULL at first time */
-    assert(alloc_block->status == BLOCK_IN_USE);
-    alloc_block->status = BLOCK_USED;
-    alloc_block->free = allocator->free;
-  }
+  alloc_context_reset(allocator);
 
   /* now try to get a new block */
   unsigned int old_free_idx = mspace->free_block_idx;
@@ -41,27 +35,36 @@
       continue;
     }
     /* ok, got one */
-    alloc_block = (Block_Header*)&(mspace->blocks[allocated_idx - mspace->first_block_idx]);
+    Block_Header* alloc_block = (Block_Header*)&(mspace->blocks[allocated_idx - mspace->first_block_idx]);
     assert(alloc_block->status == BLOCK_FREE);
     alloc_block->status = BLOCK_IN_USE;
-    mspace->num_used_blocks++;
-    memset(alloc_block->free, 0, GC_BLOCK_BODY_SIZE_BYTES);
     
     /* set allocation context */
-    allocator->free = alloc_block->free;
+    void* new_free = alloc_block->free;
+    allocator->free = new_free;
+
+#ifndef ALLOC_ZEROING
+
     allocator->ceiling = alloc_block->ceiling;
+    memset(new_free, 0, GC_BLOCK_BODY_SIZE_BYTES);
+
+#else
+
+    /* the first-time zeroing area includes block header, to make subsequent allocs page aligned */
+    unsigned int zeroing_size = ZEROING_SIZE - GC_BLOCK_HEADER_SIZE_BYTES;
+    allocator->ceiling = (void*)((unsigned int)new_free + zeroing_size);
+    memset(new_free, 0, zeroing_size);
+
+#endif /* #ifndef ALLOC_ZEROING */
+
+    allocator->end = alloc_block->ceiling;
     allocator->alloc_block = (Block*)alloc_block; 
     
     return TRUE;
   }
 
-  /* if Mspace is used for mutator allocation, here a collection should be triggered. 
-     else if this is only for collector allocation, when code goes here, it means 
-     Mspace is not enough to hold Nursery live objects, so the invoker of this routine 
-     should throw out-of-memory exception.
-     But because in our design, we don't do any Mspace allocation during collection, this
-     path should never be reached. That's why we assert(0) here. */  
-  assert(0);
+  /* Mspace is out, a collection should be triggered. It can be caused by mutator allocation
+     And it can be caused by collector allocation during nos forwarding. */
   return FALSE;
   
 }
@@ -84,7 +87,7 @@
   
   /* grab a new block */
   Boolean ok = mspace_alloc_block(mspace, allocator);
-  assert(ok);
+  if(!ok) return NULL; 
   
   p_return = thread_local_alloc(size, allocator);
   assert(p_return);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp Thu Jan 11 05:57:16 2007
@@ -15,21 +15,44 @@
  */
 
 /**
- * @author Xiao-Feng Li, 2006/10/05
+ * @author Xiao-Feng Li, 2006/12/12
  */
 
-#include "mspace.h"
-#include "../thread/collector.h"
-#include "../trace_forward/fspace.h"
-#include "../finalizer_weakref/finalizer_weakref.h"
+#include "mspace_collect_compact.h"
+
+Boolean IS_MOVE_COMPACT;
 
 struct GC_Gen;
 Space* gc_get_nos(GC_Gen* gc);
-Space* gc_get_mos(GC_Gen* gc);
-Space* gc_get_los(GC_Gen* gc);
 
-static void reset_mspace_after_compaction(Mspace* mspace)
+static volatile Block_Header* next_block_for_compact;
+static volatile Block_Header* next_block_for_target;
+
+void update_mspace_info_for_los_extension(Mspace *mspace)
 { 
+  Space_Tuner *tuner = mspace->gc->tuner;
+  
+  if(tuner->kind != TRANS_FROM_MOS_TO_LOS)
+    return;
+  
+  unsigned int tune_size = tuner->tuning_size;
+  unsigned int tune_blocks = tune_size >> GC_BLOCK_SHIFT_COUNT;
+
+  mspace->blocks = &mspace->blocks[tune_blocks];
+  mspace->heap_start = mspace->blocks;
+  mspace->committed_heap_size -= tune_size;
+  mspace->reserved_heap_size -= tune_size;
+  mspace->first_block_idx += tune_blocks;
+  mspace->num_managed_blocks -= tune_blocks;
+  mspace->num_total_blocks -= tune_blocks;
+  if(mspace->num_used_blocks > tune_blocks)
+    mspace->num_used_blocks -= tune_blocks;
+  else
+    mspace->num_used_blocks = 0;
+}
+
+void mspace_reset_after_compaction(Mspace* mspace)
+{
   unsigned int old_num_used = mspace->num_used_blocks;
   unsigned int new_num_used = mspace->free_block_idx - mspace->first_block_idx;
   unsigned int num_used = old_num_used>new_num_used? old_num_used:new_num_used;
@@ -38,8 +61,12 @@
   unsigned int i;
   for(i=0; i < num_used; i++){
     Block_Header* block = (Block_Header*)&(blocks[i]);
-    block_clear_mark_table(block); 
     block->status = BLOCK_USED;
+    block->free = block->new_free;
+    block->new_free = block->base;
+    block->src = NULL;
+    block->next_src = NULL;
+    assert(!block->dest_counter);
 
     if(i >= new_num_used){
       block->status = BLOCK_FREE; 
@@ -51,15 +78,16 @@
   /* we should clear the remaining blocks which are set to be BLOCK_COMPACTED or BLOCK_TARGET */
   for(; i < mspace->num_managed_blocks; i++){
     Block_Header* block = (Block_Header*)&(blocks[i]);
-    assert(block->status& (BLOCK_COMPACTED|BLOCK_TARGET));
+    assert(block->status& (BLOCK_COMPACTED|BLOCK_TARGET|BLOCK_DEST));
     block->status = BLOCK_FREE;
+    block->src = NULL;
+    block->next_src = NULL;
+    block->free = GC_BLOCK_BODY(block);
+    assert(!block->dest_counter);
   }
 }
 
-static volatile Block_Header* next_block_for_compact;
-static volatile Block_Header* next_block_for_target;
-
-static void gc_reset_block_for_collectors(GC* gc, Mspace* mspace)
+void gc_reset_block_for_collectors(GC* gc, Mspace* mspace)
 {
   unsigned int free_blk_idx = mspace->first_block_idx;
   for(unsigned int i=0; i<gc->num_active_collectors; i++){
@@ -70,49 +98,62 @@
     collector->cur_target_block = NULL;
     collector->cur_compact_block = NULL;
   }
-  mspace->free_block_idx = free_blk_idx+1;
+  mspace->free_block_idx = free_blk_idx+1;  
   return;
 }
 
-static void gc_init_block_for_collectors(GC* gc, Mspace* mspace)
+void gc_init_block_for_collectors(GC* gc, Mspace* mspace)
 {
   unsigned int i;
   Block_Header* block;
-  for(i=0; i<gc->num_active_collectors; i++){
-    Collector* collector = gc->collectors[i];
+  Space_Tuner* tuner = gc->tuner;
+  /*Needn't change LOS size.*/
+  if(tuner->kind == TRANS_NOTHING){
+    for(i=0; i<gc->num_active_collectors; i++){
+      Collector* collector = gc->collectors[i];
+      block = (Block_Header*)&mspace->blocks[i];
+      collector->cur_target_block = block;
+      collector->cur_compact_block = block;
+      block->status = BLOCK_TARGET;
+    }
+    
     block = (Block_Header*)&mspace->blocks[i];
-    collector->cur_target_block = block;
-    collector->cur_compact_block = block;
-    block->status = BLOCK_TARGET;
+    next_block_for_target = block;
+    next_block_for_compact = block;
+    return;
+  }
+  //For_LOS_extend
+  else if(tuner->kind == TRANS_FROM_MOS_TO_LOS)
+  {
+    Blocked_Space* nos = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
+    Block_Header* nos_last_block = (Block_Header*)&nos->blocks[nos->num_managed_blocks-1];
+    Block_Header* mos_first_block = (Block_Header*)&mspace->blocks[0];
+    unsigned int trans_blocks = (tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT);
+    nos_last_block->next = mos_first_block;
+    ((Block_Header*)&(mspace->blocks[trans_blocks - 1]))->next = NULL;
+    
+    for(i=0; i< gc->num_active_collectors; i++){
+      Collector* collector = gc->collectors[i];
+      block = (Block_Header*)&mspace->blocks[i + trans_blocks];
+      collector->cur_target_block = block;
+      collector->cur_compact_block = block;
+      block->status = BLOCK_TARGET;
+    }
+    
+    block = (Block_Header*)&mspace->blocks[i+trans_blocks];
+    next_block_for_target = block;
+    next_block_for_compact = block;
+    return;
   }
-  
-  block = (Block_Header*)&mspace->blocks[i];
-  next_block_for_target = block;
-  next_block_for_compact = block;
-  return;
 }
 
-static Boolean gc_collection_result(GC* gc)
-{
-  Boolean result = TRUE;
-  for(unsigned i=0; i<gc->num_active_collectors; i++){
-    Collector* collector = gc->collectors[i];
-    result &= collector->result;
-  }  
-  return result;
-}
-
-static Block_Header* mspace_get_first_compact_block(Mspace* mspace)
+Block_Header* mspace_get_first_compact_block(Mspace* mspace)
 { return (Block_Header*)mspace->blocks; }
 
-static Block_Header* mspace_get_first_target_block(Mspace* mspace)
+Block_Header* mspace_get_first_target_block(Mspace* mspace)
 { return (Block_Header*)mspace->blocks; }
 
-
-static Block_Header* mspace_get_next_compact_block1(Mspace* mspace, Block_Header* block)
-{  return block->next; }
-
-static Block_Header* mspace_get_next_compact_block(Collector* collector, Mspace* mspace)
+Block_Header* mspace_get_next_compact_block(Collector* collector, Mspace* mspace)
 { 
   /* firstly put back the compacted block. If it's not BLOCK_TARGET, it will be set to BLOCK_COMPACTED */
   unsigned int block_status = collector->cur_compact_block->status;
@@ -142,7 +183,7 @@
   return NULL;
 }
 
-static Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace)
+Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace)
 {    
   Block_Header* cur_target_block = (Block_Header*)next_block_for_target;
   
@@ -165,9 +206,9 @@
   */
 
   /* nos is higher than mos, we cant use nos block for compaction target */
-  Block_Header* mspace_heap_end = (Block_Header*)space_heap_end((Space*)mspace);
-  while( cur_target_block < mspace_heap_end ){
-    assert( cur_target_block <= collector->cur_compact_block);
+  while( cur_target_block ){
+    //For_LOS_extend
+    //assert( cur_target_block <= collector->cur_compact_block);
     Block_Header* next_target_block = cur_target_block->next;
     volatile unsigned int* p_block_status = &cur_target_block->status;
     unsigned int block_status = cur_target_block->status;
@@ -199,195 +240,57 @@
   return NULL;  
 }
 
-Boolean mspace_mark_object(Mspace* mspace, Partial_Reveal_Object *p_obj)
-{  
-#ifdef _DEBUG 
-  if( obj_is_marked_in_vt(p_obj)) return FALSE;
-#endif
+void mspace_collection(Mspace* mspace) 
+{
+  // printf("Major Collection ");
 
-  obj_mark_in_vt(p_obj);
+  mspace->num_collections++;
 
-  unsigned int obj_word_index = OBJECT_WORD_INDEX_TO_MARKBIT_TABLE(p_obj);
-  unsigned int obj_offset_in_word = OBJECT_WORD_OFFSET_IN_MARKBIT_TABLE(p_obj);   
-  
-  unsigned int *p_word = &(GC_BLOCK_HEADER(p_obj)->mark_table[obj_word_index]);
-  unsigned int word_mask = (1<<obj_offset_in_word);
-  
-  unsigned int old_value = *p_word;
-  unsigned int new_value = old_value|word_mask;
-  
-  while(old_value != new_value){
-    unsigned int temp = atomic_cas32(p_word, new_value, old_value);
-    if(temp == old_value) return TRUE;
-    old_value = *p_word;
-    new_value = old_value|word_mask;
-  }
-  return FALSE;
-}
+  GC* gc = mspace->gc;  
 
-static void mspace_compute_object_target(Collector* collector, Mspace* mspace)
-{  
-  Block_Header* curr_block = collector->cur_compact_block;
-  Block_Header* dest_block = collector->cur_target_block;
-
-  void* dest_addr = GC_BLOCK_BODY(dest_block);
- 
-  while( curr_block ){
-    unsigned int mark_bit_idx;
-    Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &mark_bit_idx);
-    
-    while( p_obj ){
-      assert( obj_is_marked_in_vt(p_obj));
-            
-      unsigned int obj_size = vm_object_size(p_obj);
-      
-      if( ((unsigned int)dest_addr + obj_size) > (unsigned int)GC_BLOCK_END(dest_block)){
-        dest_block->free = dest_addr;
-        dest_block = mspace_get_next_target_block(collector, mspace);
-        if(dest_block == NULL){ 
-          collector->result = FALSE; 
-          return; 
-        }
-        
-        dest_addr = GC_BLOCK_BODY(dest_block);
-      }
-      assert(((unsigned int)dest_addr + obj_size) <= (unsigned int)GC_BLOCK_END(dest_block));
-      
-      Obj_Info_Type obj_info = get_obj_info(p_obj);
-      if( obj_info != 0 ) {
-        collector->obj_info_map->insert(ObjectMap::value_type((Partial_Reveal_Object*)dest_addr, obj_info));
-      }
-      
-      set_forwarding_pointer_in_obj_info(p_obj, dest_addr);
+  /* init the pool before starting multiple collectors */
 
-      /* FIXME: should use alloc to handle alignment requirement */
-      dest_addr = (void *) WORD_SIZE_ROUND_UP((unsigned int) dest_addr + obj_size);
-      p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx);
-  
-    }
-    curr_block = mspace_get_next_compact_block(collector, mspace);
-  }
-  
-  return;
-}   
-
-#include "../verify/verify_live_heap.h"
+  pool_iterator_init(gc->metadata->gc_rootset_pool);
 
-static void mspace_sliding_compact(Collector* collector, Mspace* mspace)
-{
-  Block_Header* curr_block = mspace_get_first_compact_block(mspace);
-  
-  while( curr_block ){
-    unsigned int mark_bit_idx;
-    Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &mark_bit_idx);
+  /* dual mark bits will consume two bits in obj info, that makes current 
+     header hashbits only 5 bits. That's not enough. We implement on-demend
+     hash field allocation in obj during moving. move_compact doesn't support it.
+     Dual mark bits is used for MINOR_NONGEN_FORWARD algorithm */
+
+  //For_LOS_extend
+  if(gc->tuner->kind != TRANS_NOTHING){
+    // printf("for LOS extention");
+    collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);
     
-    while( p_obj ){
-      assert( obj_is_marked_in_vt(p_obj));
-      obj_unmark_in_vt(p_obj);
-      
-      unsigned int obj_size = vm_object_size(p_obj);
-      Partial_Reveal_Object *p_target_obj = get_forwarding_pointer_in_obj_info(p_obj);
-      if( p_obj != p_target_obj){
-        memmove(p_target_obj, p_obj, obj_size);
-
-        if (verify_live_heap)
-          /* we forwarded it, we need remember it for verification */
-          event_collector_move_obj(p_obj, p_target_obj, collector);
-      }
-     
-      set_obj_info(p_target_obj, 0);
- 
-      p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx);  
-    }
+  }else if (gc->collect_kind == FALLBACK_COLLECTION){
+    // printf("for Fallback");
+    collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);  
+    //IS_MOVE_COMPACT = TRUE;
+    //collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
+    //IS_MOVE_COMPACT = FALSE;
+
+  }else{
+
+    switch(mspace->collect_algorithm){
+      case MAJOR_COMPACT_SLIDE:
+        collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);    
+        break;
         
-    curr_block = mspace_get_next_compact_block1(mspace, curr_block);
-  }
-
-  return;
-} 
-
-void gc_update_repointed_refs(Collector* collector);
-
-static volatile unsigned int num_marking_collectors = 0;
-static volatile unsigned int num_installing_collectors = 0;
-
-static void mark_compact_mspace(Collector* collector) 
-{
-  GC* gc = collector->gc;
-  Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
-  Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc);
-
-  /* Pass 1: mark all live objects in heap, and save all the slots that 
-             have references  that are going to be repointed */
-  unsigned int num_active_collectors = gc->num_active_collectors;
-  
-  /* Pass 1: mark all live objects in heap, and save all the slots that 
-             have references  that are going to be repointed */
-  unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
-
-  mark_scan_heap(collector);
-
-  old_num = atomic_inc32(&num_marking_collectors);
-  if( ++old_num == num_active_collectors ){
-    /* last collector's world here */
-    /* prepare for next phase */
-    gc_init_block_for_collectors(gc, mspace); 
-    
-    collector_process_finalizer_weakref(collector);
-    
-    /* let other collectors go */
-    num_marking_collectors++; 
-  }
-  
-  while(num_marking_collectors != num_active_collectors + 1);
-  
-  /* Pass 2: assign target addresses for all to-be-moved objects */
-  atomic_cas32( &num_installing_collectors, 0, num_active_collectors+1);
-
-  mspace_compute_object_target(collector, mspace);   
-  
-  old_num = atomic_inc32(&num_installing_collectors);
-  if( ++old_num == num_active_collectors ){
-    /* single thread world */
-    if(!gc_collection_result(gc)){
-      printf("Out of Memory!\n");
-      assert(0); /* mos is out. FIXME:: throw exception */
+      case MAJOR_COMPACT_MOVE:
+        IS_MOVE_COMPACT = TRUE;
+        collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
+        IS_MOVE_COMPACT = FALSE;
+        break;
+        
+      default:
+        printf("\nThe speficied major collection algorithm doesn't exist!\n");
+        exit(0);
+        break;
     }
-    gc_reset_block_for_collectors(gc, mspace);
-    num_installing_collectors++; 
-  }
-  
-  while(num_installing_collectors != num_active_collectors + 1);
-
-  /* FIXME:: temporary. let only one thread go forward */
-  if( collector->thread_handle != 0 ) return;
-    
-  /* Pass 3: update all references whose objects are to be moved */  
-  gc_update_repointed_refs(collector);
-  
-  gc_post_process_finalizer_weakref(gc);
-    
-  /* Pass 4: do the compaction and reset blocks */  
-  next_block_for_compact = mspace_get_first_compact_block(mspace);
-  mspace_sliding_compact(collector, mspace);
-  /* FIXME:: should be collector_restore_obj_info(collector) */
-  gc_restore_obj_info(gc);
 
-  reset_mspace_after_compaction(mspace);
-  reset_fspace_for_allocation(fspace);
-  
-  return;
-}
-
-void mspace_collection(Mspace* mspace) 
-{
-  mspace->num_collections++;
-
-  GC* gc = mspace->gc;  
-
-  pool_iterator_init(gc->metadata->gc_rootset_pool);
+  }  
 
-  collector_execute_task(gc, (TaskType)mark_compact_mspace, (Space*)mspace);
-  
+  // printf("...end.\n");
   return;  
 } 
+

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h Thu Jan 11 05:57:16 2007
@@ -0,0 +1,49 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/12/12
+ */
+
+#ifndef _MSPACE_COLLECT_COMPACT_H_
+#define _MSPACE_COLLECT_COMPACT_H_
+
+#include "mspace.h"
+#include "../thread/collector.h"     
+#include "../common/space_tuner.h"
+
+void gc_reset_block_for_collectors(GC* gc, Mspace* mspace);
+void gc_init_block_for_collectors(GC* gc, Mspace* mspace);
+
+void update_mspace_info_for_los_extension(Mspace* mspace);
+void mspace_reset_after_compaction(Mspace* mspace);
+
+Block_Header* mspace_get_first_compact_block(Mspace* mspace);
+Block_Header* mspace_get_first_target_block(Mspace* mspace);
+Block_Header* mspace_get_next_compact_block(Collector* collector, Mspace* mspace);
+Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace);
+
+void slide_compact_mspace(Collector* collector);
+void move_compact_mspace(Collector* collector);
+
+void fallback_mark_scan_heap(Collector* collector);
+
+void mspace_extend_compact(Collector *collector);
+
+extern Boolean IS_MOVE_COMPACT;
+
+#endif /* _MSPACE_COLLECT_COMPACT_H_ */
+

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp Thu Jan 11 05:57:16 2007
@@ -0,0 +1,370 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Chunrong Lai, 2006/12/25
+ */
+
+#include "mspace_collect_compact.h"
+#include "../trace_forward/fspace.h"
+#include "../mark_sweep/lspace.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+#include "../gen/gen.h"
+#include "../common/fix_repointed_refs.h"
+#include "../common/interior_pointer.h"
+#include "../verify/verify_live_heap.h"
+
+static volatile Block *mos_first_new_block = NULL;
+static volatile Block *nos_first_free_block = NULL;
+static volatile Block *first_block_to_move = NULL;
+
+static void set_first_and_end_block_to_move(Collector *collector, unsigned int mem_changed_size)
+{
+  GC_Gen *gc_gen = (GC_Gen *)collector->gc;
+  Mspace *mspace = gc_gen->mos;
+  Fspace *fspace = gc_gen->nos;
+  
+  assert (!(mem_changed_size % SPACE_ALLOC_UNIT));
+  
+  unsigned int mos_added_block_num = mem_changed_size >> GC_BLOCK_SHIFT_COUNT;    // block number needing moving
+  first_block_to_move = nos_first_free_block - mos_added_block_num;
+  if(first_block_to_move < (Block *)space_heap_start((Space *)fspace))
+    first_block_to_move = (Block *)space_heap_start((Space *)fspace);
+}
+
+static unsigned int fspace_shrink(Fspace *fspace)
+{
+  void *committed_nos_end = (void *)((unsigned int)space_heap_start((Space *)fspace) + fspace->committed_heap_size);
+  
+  unsigned int nos_used_size = (unsigned int)nos_first_free_block - (unsigned int)fspace->heap_start;
+  unsigned int nos_free_size = (unsigned int)committed_nos_end - (unsigned int)nos_first_free_block;
+  unsigned int decommit_size = (nos_used_size <= nos_free_size) ? nos_used_size : nos_free_size;
+  assert(decommit_size);
+  
+  void *decommit_base = (void *)((unsigned int)committed_nos_end - decommit_size);
+  decommit_base = (void *)round_down_to_size((unsigned int)decommit_base, SPACE_ALLOC_UNIT);
+  if(decommit_base < (void *)nos_first_free_block)
+    decommit_base = (void *)((unsigned int)decommit_base + SPACE_ALLOC_UNIT);
+  decommit_size = (unsigned int)committed_nos_end - (unsigned int)decommit_base;
+  assert(decommit_size && !(decommit_size % SPACE_ALLOC_UNIT));
+  
+  Boolean result = vm_decommit_mem(decommit_base, decommit_size);
+  assert(result == TRUE);
+  
+  fspace->committed_heap_size = (unsigned int)decommit_base - (unsigned int)fspace->heap_start;
+  fspace->num_managed_blocks = fspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT;
+  
+  Block_Header *new_last_block = (Block_Header *)&fspace->blocks[fspace->num_managed_blocks - 1];
+  fspace->ceiling_block_idx = new_last_block->block_idx;
+  new_last_block->next = NULL;
+  
+  return decommit_size;
+}
+
+static void link_mspace_extended_blocks(Mspace *mspace, Fspace *fspace)
+{
+  Block_Header *old_last_mos_block = (Block_Header *)(mos_first_new_block -1);
+  old_last_mos_block->next = (Block_Header *)mos_first_new_block;
+  void *new_committed_mos_end = (void *)((unsigned int)space_heap_start((Space *)mspace) + mspace->committed_heap_size); 
+  Block_Header *new_last_mos_block = (Block_Header *)((Block *)new_committed_mos_end -1);
+  new_last_mos_block->next = (Block_Header *)space_heap_start((Space *)fspace);
+}
+
+static Block *mspace_extend_without_link(Mspace *mspace, unsigned int commit_size)
+{
+  assert(commit_size && !(commit_size % SPACE_ALLOC_UNIT));
+  
+  void *committed_mos_end = (void *)((unsigned int)space_heap_start((Space *)mspace) + mspace->committed_heap_size);
+  void *commit_base = committed_mos_end;
+  assert(!((unsigned int)committed_mos_end % SPACE_ALLOC_UNIT));
+  
+  void *result = vm_commit_mem(commit_base, commit_size);
+  assert(result == commit_base);
+  
+  void *new_end = (void *)((unsigned int)commit_base + commit_size);
+  mspace->committed_heap_size = (unsigned int)new_end - (unsigned int)mspace->heap_start;
+  
+  /* init the grown blocks */
+  Block_Header *block = (Block_Header *)commit_base;
+  Block_Header *last_block = (Block_Header *)((Block *)block -1);
+  unsigned int start_idx = last_block->block_idx + 1;
+  unsigned int i;
+  for(i=0; block < (Block_Header *)new_end; i++){
+    block_init(block);
+    block->block_idx = start_idx + i;
+    if(i != 0) last_block->next = block;
+    last_block = block;
+    block = (Block_Header *)((Block *)block + 1);
+  }
+  last_block->next = NULL;
+  mspace->ceiling_block_idx = last_block->block_idx;
+  mspace->num_managed_blocks = mspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT;
+  
+  return (Block *)commit_base;
+}
+
+static void mspace_block_iter_init_for_extension(Mspace *mspace, Block_Header *start_block)
+{
+  mspace->block_iterator = start_block;
+}
+
+static Block_Header *mspace_block_iter_next_for_extension(Mspace *mspace, Block_Header *end_block)
+{
+  Block_Header *cur_block = (Block_Header *)mspace->block_iterator;
+  
+  while(cur_block && cur_block < end_block){
+    Block_Header *next_block = cur_block->next;
+
+    Block_Header *temp = (Block_Header *)atomic_casptr((volatile void **)&mspace->block_iterator, next_block, cur_block);
+    if(temp != cur_block){
+      cur_block = (Block_Header*)mspace->block_iterator;
+      continue;
+    }
+    return cur_block;
+  }
+  /* run out space blocks */
+  return NULL;  
+}
+
+inline void object_refix_ref_slots(Partial_Reveal_Object* p_obj, void *start_address, void *end_address, unsigned int addr_diff)
+{
+  if( !object_has_ref_field(p_obj) ) return;
+  
+    /* scan array object */
+  if (object_is_array(p_obj)) {
+    Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj;
+    assert(!obj_is_primitive_array(p_obj));
+  
+    int32 array_length = array->array_len;
+    Partial_Reveal_Object** p_refs = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array));
+    
+    for (int i = 0; i < array_length; i++) {
+      Partial_Reveal_Object** p_ref = p_refs + i;
+      Partial_Reveal_Object*  p_element = *p_ref;
+      if((p_element > start_address) && (p_element < end_address))
+        *p_ref = (Partial_Reveal_Object*)((unsigned int)p_element - addr_diff);
+    }
+    return;
+  }
+  
+  /* scan non-array object */
+  int *offset_scanner = init_object_scanner(p_obj);
+  while (true) {
+    Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
+    if (p_ref == NULL) break; /* terminating ref slot */
+  
+    Partial_Reveal_Object*  p_element = *p_ref;
+    if((p_element > start_address) && (p_element < end_address))
+      *p_ref = (Partial_Reveal_Object*)((unsigned int)p_element - addr_diff);
+    offset_scanner = offset_next_ref(offset_scanner);
+  }
+
+  return;
+}
+
+static void mspace_refix_repointed_refs(Collector *collector, Mspace* mspace, void *start_address, void *end_address, unsigned int addr_diff)
+{
+  Block_Header *mspace_first_free_block = (Block_Header *)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
+  
+  while(Block_Header *block = mspace_block_iter_next_for_extension(mspace, mspace_first_free_block)){
+    Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)block->base;
+    Partial_Reveal_Object *block_end = (Partial_Reveal_Object *)block->new_free;   // new_free or free depends on whether reset is done or not
+    while(p_obj < block_end){
+      object_refix_ref_slots(p_obj, start_address, end_address, addr_diff);
+      p_obj = obj_end(p_obj);
+    }
+  }
+}
+
+static void lspace_refix_repointed_refs(Collector* collector, Lspace* lspace, void *start_address, void *end_address, unsigned int addr_diff)
+{
+  unsigned int start_pos = 0;
+  Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &start_pos);
+  while( p_obj){
+    assert(obj_is_marked_in_vt(p_obj));
+    object_refix_ref_slots(p_obj, start_address, end_address, addr_diff);
+    p_obj = lspace_get_next_marked_object(lspace, &start_pos);
+  }
+}
+
+
+static void gc_reupdate_repointed_sets(GC* gc, Pool* pool, void *start_address, void *end_address, unsigned int addr_diff)
+{
+  GC_Metadata *metadata = gc->metadata;
+  assert(gc->collect_kind != MINOR_COLLECTION);
+  
+  pool_iterator_init(pool);
+
+  while(Vector_Block *root_set = pool_iterator_next(pool)){
+    unsigned int *iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+
+      Partial_Reveal_Object *p_obj = *p_ref;
+      if((p_obj > start_address) && (p_obj < end_address))
+        *p_ref = (Partial_Reveal_Object*)((unsigned int)p_obj - addr_diff);
+    }
+  }
+}
+
+static void gc_refix_rootset(Collector *collector, void *start_address, void *end_address, unsigned int addr_diff)
+{
+  GC *gc = collector->gc;  
+  GC_Metadata *metadata = gc->metadata;
+
+  /* only for MAJOR_COLLECTION and FALLBACK_COLLECTION */
+  assert(gc->collect_kind != MINOR_COLLECTION);
+  
+  gc_reupdate_repointed_sets(gc, metadata->gc_rootset_pool, start_address, end_address, addr_diff);
+  
+#ifndef BUILD_IN_REFERENT
+  gc_update_finref_repointed_refs(gc);
+#endif
+
+  update_rootset_interior_pointer();
+}
+
+static void move_compacted_blocks_to_mspace(Collector *collector, unsigned int addr_diff)
+{
+  GC_Gen *gc_gen = (GC_Gen *)collector->gc;
+  Mspace *mspace = gc_gen->mos;
+  Fspace *fspace = gc_gen->nos;
+  
+  while(Block_Header *block = mspace_block_iter_next_for_extension(mspace, (Block_Header *)nos_first_free_block)){
+    Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)block->base;
+    void *src_base = (void *)block->base;
+    void *block_end = block->new_free;   // new_free or free depends on whether reset is done or not
+    unsigned int size = (unsigned int)block_end - (unsigned int)src_base;
+    Block_Header *dest_block = GC_BLOCK_HEADER((void *)((unsigned int)src_base - addr_diff));
+    memmove(dest_block->base, src_base, size);
+    dest_block->new_free = (void *)((unsigned int)block_end - addr_diff);
+    if(verify_live_heap)
+      while (p_obj < block_end) {
+        event_collector_doublemove_obj(p_obj, (Partial_Reveal_Object *)((unsigned int)p_obj - addr_diff), collector);
+    	 p_obj = obj_end(p_obj);
+      }
+  }
+}
+
+static volatile unsigned int num_space_changing_collectors = 0;
+
+#ifndef STATIC_NOS_MAPPING
+void mspace_extend_compact(Collector *collector)
+{
+  GC_Gen *gc_gen = (GC_Gen *)collector->gc;
+  Mspace *mspace = gc_gen->mos;
+  Fspace *fspace = gc_gen->nos;
+  Lspace *lspace = gc_gen->los;
+  
+  unsigned int num_active_collectors = gc_gen->num_active_collectors;
+  unsigned int old_num;
+  atomic_cas32( &num_space_changing_collectors, 0, num_active_collectors + 1);
+  old_num = atomic_inc32(&num_space_changing_collectors);
+  if( ++old_num == num_active_collectors ){
+     Block *old_nos_boundary = fspace->blocks;
+     nos_boundary = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
+     assert(nos_boundary > old_nos_boundary);
+     unsigned int mem_change_size = ((Block *)nos_boundary - old_nos_boundary) << GC_BLOCK_SHIFT_COUNT;
+     fspace->heap_start = nos_boundary;
+     fspace->blocks = (Block *)nos_boundary;
+     fspace->committed_heap_size -= mem_change_size;
+     fspace->num_managed_blocks = fspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT;
+     fspace->num_total_blocks = fspace->num_managed_blocks;
+     fspace->first_block_idx = ((Block_Header *)nos_boundary)->block_idx;
+     fspace->free_block_idx = fspace->first_block_idx;
+     
+     mspace->heap_end = nos_boundary;
+     mspace->committed_heap_size += mem_change_size;
+     mspace->num_managed_blocks = mspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT;
+     mspace->num_total_blocks = mspace->num_managed_blocks;
+     mspace->ceiling_block_idx = ((Block_Header *)nos_boundary)->block_idx - 1;
+
+     num_space_changing_collectors ++;
+  }
+  while(num_space_changing_collectors != num_active_collectors + 1);
+}
+
+#else
+static volatile unsigned int num_refixing_collectors = 0;
+static volatile unsigned int num_moving_collectors = 0;
+
+void mspace_extend_compact(Collector *collector)
+{
+  GC_Gen *gc_gen = (GC_Gen *)collector->gc;
+  Mspace *mspace = gc_gen->mos;
+  Fspace *fspace = gc_gen->nos;
+  Lspace *lspace = gc_gen->los;
+  
+  unsigned int num_active_collectors = gc_gen->num_active_collectors;
+  unsigned int old_num;
+  
+  Block *nos_first_block = fspace->blocks;
+  nos_first_free_block = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
+  assert(nos_first_free_block > nos_first_block);
+  
+  while(nos_first_free_block > nos_first_block){
+    
+    atomic_cas32( &num_space_changing_collectors, 0, num_active_collectors + 1);
+    old_num = atomic_inc32(&num_space_changing_collectors);
+    if( old_num == 0 ){
+      unsigned int mem_changed_size = fspace_shrink(fspace);
+      mos_first_new_block = mspace_extend_without_link(mspace, mem_changed_size);
+      
+      set_first_and_end_block_to_move(collector, mem_changed_size);
+      //mspace_block_iter_init_for_extension(mspace, (Block_Header *)first_block_to_move);
+      mspace_block_iter_init_for_extension(mspace, (Block_Header *)mspace->blocks);
+    
+      num_space_changing_collectors++;
+    }
+    while(num_space_changing_collectors != num_active_collectors + 1);
+    
+    atomic_cas32( &num_refixing_collectors, 0, num_active_collectors+1);
+    
+    mspace_refix_repointed_refs(collector, mspace, (void *)first_block_to_move, (void *)nos_first_free_block, (first_block_to_move - mos_first_new_block) << GC_BLOCK_SHIFT_COUNT);
+    
+    old_num = atomic_inc32(&num_refixing_collectors);
+    if( ++old_num == num_active_collectors ){
+      /* init the iterator: prepare for refixing */
+      lspace_refix_repointed_refs(collector, lspace, (void *)first_block_to_move, (void *)nos_first_free_block, (first_block_to_move - mos_first_new_block) << GC_BLOCK_SHIFT_COUNT);
+      gc_refix_rootset(collector, (void *)first_block_to_move, (void *)nos_first_free_block, (first_block_to_move - mos_first_new_block) << GC_BLOCK_SHIFT_COUNT);
+      link_mspace_extended_blocks(mspace, fspace);
+      mspace_block_iter_init_for_extension(mspace, (Block_Header *)first_block_to_move);
+      num_refixing_collectors++;
+    }
+    while(num_refixing_collectors != num_active_collectors + 1);
+    
+    
+    atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1);
+    
+    move_compacted_blocks_to_mspace(collector, (first_block_to_move - mos_first_new_block) << GC_BLOCK_SHIFT_COUNT);
+    
+    old_num = atomic_inc32(&num_moving_collectors);
+    if( ++old_num == num_active_collectors ){
+      if(first_block_to_move == nos_first_block) {
+        void *new_committed_mos_end = (void *)((unsigned int)space_heap_start((Space *)mspace) + mspace->committed_heap_size); 
+        Block_Header *new_last_mos_block = (Block_Header *)((Block *)new_committed_mos_end -1);
+        mspace->free_block_idx = new_last_mos_block->block_idx + 1;
+      }else{
+        mspace->free_block_idx = ((Block_Header*)first_block_to_move)->block_idx;
+      }
+      nos_first_free_block =first_block_to_move;
+      num_moving_collectors++;
+    }
+    while(num_moving_collectors != num_active_collectors + 1);
+  }
+}
+#endif