You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by xl...@apache.org on 2007/05/24 10:17:26 UTC

svn commit: r541224 [1/2] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ mark_compact/ mark_sweep/ utils/

Author: xli
Date: Thu May 24 01:17:25 2007
New Revision: 541224

URL: http://svn.apache.org/viewvc?view=rev&rev=541224
Log:
HARMONY-3825 : [drlvm][gc_gen]Patch for heap size extension

Modified:
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/los_extention_mark_scan.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/seq_list.h

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Thu May 24 01:17:25 2007
@@ -132,23 +132,34 @@
   if (is_property_set("gc.mx", VM_PROPERTIES) == 1) {
     max_heap_size = get_size_property("gc.mx");
 
-    if (max_heap_size < min_heap_size)
+    if (max_heap_size < min_heap_size){
       max_heap_size = min_heap_size;
-    if (0 == max_heap_size) 
+      printf("Max heap size: too small, reset to %d MB!\n", max_heap_size/MB);
+    }
+    if (0 == max_heap_size){
       max_heap_size = HEAP_SIZE_DEFAULT;
+      printf("Max heap size: zero, reset to %d MB! \n", max_heap_size/MB);
+    }
  
     min_heap_size = max_heap_size / 10;
-    if (min_heap_size < min_heap_size_bytes) min_heap_size = min_heap_size_bytes;
+    if (min_heap_size < min_heap_size_bytes){
+      min_heap_size = min_heap_size_bytes;
+//      printf("Min heap size: too small, reset to %d MB! \n", min_heap_size/MB);
+    }
   }
 
   if (is_property_set("gc.ms", VM_PROPERTIES) == 1) {
     min_heap_size = get_size_property("gc.ms");
-    if (min_heap_size < min_heap_size_bytes) 
+    if (min_heap_size < min_heap_size_bytes){
       min_heap_size = min_heap_size_bytes;
+      printf("Min heap size: too small, reset to %d MB! \n", min_heap_size/MB);    
+    } 
   }
 
-  if (min_heap_size > max_heap_size)
+  if (min_heap_size > max_heap_size){
     max_heap_size = min_heap_size;
+    printf("Max heap size: too small, reset to %d MB\n", max_heap_size / MB);
+  }
 
   min_heap_size_bytes = min_heap_size;
   max_heap_size_bytes = max_heap_size;
@@ -251,6 +262,11 @@
   gc_gen_assign_free_area_to_mutators((GC_Gen*)gc);
 }
 
+void gc_adjust_heap_size(GC* gc, int64 pause_time)
+{
+  gc_gen_adjust_heap_size((GC_Gen*)gc, pause_time);
+}
+
 void gc_copy_interior_pointer_table_to_rootset();
 
 void gc_reclaim_heap(GC* gc, unsigned int gc_cause)
@@ -263,18 +279,10 @@
   gc->cause = gc_cause;
   gc_decide_collection_kind((GC_Gen*)gc, gc_cause);
 
-
-  //For_LOS_extend!
-#ifdef GC_FIXED_SIZE_TUNER
-  gc_space_tune_before_gc_fixed_size(gc, gc_cause);
-#else
-  gc_space_tune_prepare(gc, gc_cause);
-  gc_space_tune_before_gc(gc, gc_cause);
-#endif
+  gc_compute_space_tune_size_before_marking(gc, gc_cause);
 
 #ifdef MARK_BIT_FLIPPING
-  if(gc_match_kind(gc, MINOR_COLLECTION))
-    mark_bit_flip();
+  if(gc_match_kind(gc, MINOR_COLLECTION)) mark_bit_flip();
 #endif
   
   gc_metadata_verify(gc, TRUE);
@@ -291,21 +299,22 @@
   /* this has to be done after all mutators are suspended */
   gc_reset_mutator_context(gc);
 
-  if(!IGNORE_FINREF )
-    gc_set_obj_with_fin(gc);
+  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
 
   gc_gen_reclaim_heap((GC_Gen*)gc);
   
   gc_reset_interior_pointer_table();
-    
+
   gc_metadata_verify(gc, FALSE);
 
   int64 pause_time = time_now() - start_time;  
   gc->time_collections += pause_time;
+
+  gc_adjust_heap_size(gc, pause_time);
+
   gc_gen_adapt((GC_Gen*)gc, pause_time);
 
-  if(gc_is_gen_mode())
-    gc_prepare_mutator_remset(gc);
+  if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
   
   if(!IGNORE_FINREF ){
     gc_put_finref_to_vm(gc);
@@ -317,16 +326,12 @@
 #endif
   }
 
-  //For_LOS_extend!
   gc_space_tuner_reset(gc);
-  
+
   gc_assign_free_area_to_mutators(gc);
-  
+
   vm_resume_threads_after();
   return;
 }
-
-
-
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Thu May 24 01:17:25 2007
@@ -388,7 +388,6 @@
   Vector_Block* root_set;
   Vector_Block* uncompressed_root_set;
 
-  //For_LOS_extend
   Space_Tuner* tuner;
 
 }GC;
@@ -443,6 +442,9 @@
 
 extern void* los_boundary;
 
+/*This flag indicate whether lspace is using a sliding compaction
+ *Fixme: check if the performance is a problem with this global flag.
+ */
 extern Boolean* p_global_lspace_move_obj;
 inline Boolean obj_is_moved(Partial_Reveal_Object* p_obj)
 {  return ((p_obj >= los_boundary) || (*p_global_lspace_move_obj)); }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp Thu May 24 01:17:25 2007
@@ -237,6 +237,7 @@
   if(!p_obj) return 0;
   assert(address_belongs_to_gc_heap(p_obj, p_global_gc));
   Obj_Info_Type info = get_obj_info_raw(p_obj);
+  unsigned int new_info = 0;
   int hash;
   
   switch(info & HASHCODE_MASK){
@@ -250,7 +251,13 @@
       hash = hashcode_lookup(p_obj,info);
       break;
     case HASHCODE_UNSET:
-      set_obj_info(p_obj, info | HASHCODE_SET_BIT);
+      new_info = (unsigned int)(info | HASHCODE_SET_BIT);
+      while (true) {
+        unsigned int temp = atomic_cas32(&p_obj->obj_info, new_info, info);
+        if (temp == info) break;
+        info = get_obj_info_raw(p_obj);
+        new_info =  (unsigned int)(info | HASHCODE_SET_BIT);
+      }
       hash = hashcode_gen((void*)p_obj);
       break;
     default:

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h Thu May 24 01:17:25 2007
@@ -161,7 +161,8 @@
 
   void* new_end = (void*)((POINTER_SIZE_INT)commit_base + commit_size);
   space->committed_heap_size = (POINTER_SIZE_INT)new_end - (POINTER_SIZE_INT)space->heap_start;
-  
+  /*Fixme: For_Heap_Adjust, but need fix if static mapping.*/
+  space->heap_end = new_end;
   /* init the grown blocks */
   Block_Header* block = (Block_Header*)commit_base;
   Block_Header* last_block = (Block_Header*)((Block*)block -1);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/hashcode.h Thu May 24 01:17:25 2007
@@ -53,6 +53,7 @@
 typedef struct Hashcode_Buf{
   Seq_List* list;
   POINTER_SIZE_INT* checkpoint;
+  SpinLock lock;
 }Hashcode_Buf;
 
 extern GC_Metadata gc_metadata;
@@ -117,6 +118,7 @@
 inline int hashcode_buf_lookup(Partial_Reveal_Object* p_obj,Hashcode_Buf* hashcode_buf)
 {
   POINTER_SIZE_INT obj_addr = (POINTER_SIZE_INT)p_obj;
+  lock(hashcode_buf->lock);
   Seq_List* list = hashcode_buf->list; 
   seq_list_iterate_init(list);
   while(seq_list_has_next(list)){
@@ -132,11 +134,13 @@
         iter = vector_block_iterator_advance(curr_block, iter);
         POINTER_SIZE_INT hashcode = (POINTER_SIZE_INT)*iter;
         iter = vector_block_iterator_advance(curr_block, iter);
+        unlock(hashcode_buf->lock);
         return *(int*)&hashcode;
       }
     }
   }
   assert(0);
+  unlock(hashcode_buf->lock);
   return 0;
 }
 
@@ -345,374 +349,6 @@
 
 inline void precompute_hashcode_extend_size(Partial_Reveal_Object* p_obj, void* dest_addr,
                                                unsigned int * obj_size_precompute)
-{
-  if(hashcode_is_set(p_obj) && !hashcode_is_attached(p_obj)){ 
-    if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj)
-        *obj_size_precompute += GC_OBJECT_ALIGNMENT;
-  }
-}
-
-inline int obj_lookup_hashcode_in_buf(Partial_Reveal_Object *p_obj);
-inline int hashcode_lookup(Partial_Reveal_Object* p_obj,Obj_Info_Type obj_info)
-{
-  int hash;
-  if(hashcode_is_attached(p_obj)){
-    int offset = vm_object_size(p_obj);
-    unsigned char* pos = (unsigned char *)p_obj;
-    hash = *(int*) (pos + offset);
-  }else if(hashcode_is_buffered(p_obj)){
-    hash = obj_lookup_hashcode_in_buf(p_obj);
-  }
-  return hash;
-}
-#endif //_HASHCODE_H_
-#ifndef _HASHCODE_H_
-#define _HASHCODE_H_
-
-#include "gc_common.h"
-#include "../utils/vector_block.h"
-#include "../utils/seq_list.h"
-
-#define HASHCODE_MASK         0x1C
-
-#define HASHCODE_SET_BIT      0x04
-#define HASHCODE_ATTACHED_BIT 0x08
-#define HASHCODE_BUFFERED_BIT 0x10
-
-#define HASHCODE_EXTENDED_VT_BIT 0x02
-
-enum Hashcode_Kind{
-  HASHCODE_UNSET            = 0x0,
-  HASHCODE_SET_UNALLOCATED  = HASHCODE_SET_BIT,
-  HASHCODE_SET_ATTACHED     = HASHCODE_SET_BIT | HASHCODE_ATTACHED_BIT,
-  HASHCODE_SET_BUFFERED     = HASHCODE_SET_BIT | HASHCODE_BUFFERED_BIT
-};
-
-inline Boolean obj_is_sethash_in_vt(Partial_Reveal_Object* p_obj){
-  return (Boolean)((POINTER_SIZE_INT)obj_get_vt_raw(p_obj) & HASHCODE_EXTENDED_VT_BIT);
-}
-
-inline void obj_sethash_in_vt(Partial_Reveal_Object* p_obj){
-  VT vt = obj_get_vt_raw(p_obj);
-  obj_set_vt(p_obj,(VT)((POINTER_SIZE_INT)vt | HASHCODE_EXTENDED_VT_BIT));
-}
-
-inline Boolean hashcode_is_set(Partial_Reveal_Object* p_obj)
-{ 
-  Obj_Info_Type obj_info = get_obj_info_raw(p_obj);
-  return obj_info & HASHCODE_SET_BIT;
-}
-
-inline Boolean hashcode_is_attached(Partial_Reveal_Object* p_obj)
-{ 
-  Obj_Info_Type obj_info = get_obj_info_raw(p_obj);
-  return obj_info & HASHCODE_ATTACHED_BIT; 
-}
-
-inline Boolean hashcode_is_buffered(Partial_Reveal_Object* p_obj)
-{
-  Obj_Info_Type obj_info = get_obj_info_raw(p_obj);
-  return obj_info & HASHCODE_BUFFERED_BIT; 
-}
-
-inline int hashcode_gen(void* addr)
-{ return (int)(POINTER_SIZE_INT)addr; }
-
-typedef struct Hashcode_Buf{
-  Seq_List* list;
-  POINTER_SIZE_INT* checkpoint;
-}Hashcode_Buf;
-
-extern GC_Metadata gc_metadata;
-Vector_Block* free_set_pool_get_entry(GC_Metadata *metadata);
-void free_set_pool_put_entry(Vector_Block* block, GC_Metadata *metadata);
-
-inline void hashcode_buf_set_checkpoint(Hashcode_Buf* hashcode_buf)
-{ hashcode_buf->checkpoint = vector_block_get_last_entry((Vector_Block*)hashcode_buf->list->end); }
-
-inline Hashcode_Buf* hashcode_buf_create()
-{
-  Hashcode_Buf* hashcode_buf = (Hashcode_Buf*) STD_MALLOC(sizeof(Hashcode_Buf));
-  memset(hashcode_buf, 0, sizeof(Hashcode_Buf));
-  hashcode_buf->list = seq_list_create();
-  return hashcode_buf;
-}
-
-inline void hashcode_buf_remove(Hashcode_Buf* hashcode_buf, Vector_Block* block)
-{
-  Seq_List* list = hashcode_buf->list; 
-  seq_list_remove(list, (List_Node*) block);
-  vector_block_clear(block);
-  free_set_pool_put_entry(block, &gc_metadata);
-}
-
-inline void hashcode_buf_clear(Hashcode_Buf* hashcode_buf)
-{
-  //push vector block back to free list
-  Seq_List* list = hashcode_buf->list; 
-  seq_list_iterate_init(list);
-  
-  while(seq_list_has_next(list)){
-    Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);;
-    vector_block_clear(curr_block);
-    free_set_pool_put_entry(curr_block, &gc_metadata);
-  }
-  seq_list_clear(list);
-  return;
-}
-
-inline void hashcode_buf_destory(Hashcode_Buf* hashcode_buf)
-{
-  Seq_List* list = hashcode_buf->list; 
-  hashcode_buf_clear(hashcode_buf);
-  seq_list_destruct(list);
-  STD_FREE((void*)hashcode_buf);
-}
-
-inline void hashcode_buf_init(Hashcode_Buf* hashcode_buf)
-{
-  Seq_List* list = hashcode_buf->list; 
-#ifdef _DEBUG
-  seq_list_iterate_init(list);
-  assert(!seq_list_has_next(list));
-#endif
-  Vector_Block* free_block = free_set_pool_get_entry(&gc_metadata);
-  seq_list_add(list, (List_Node*)free_block);
-  hashcode_buf_set_checkpoint(hashcode_buf);
-  return;
-}
-
-inline int hashcode_buf_lookup(Partial_Reveal_Object* p_obj,Hashcode_Buf* hashcode_buf)
-{
-  POINTER_SIZE_INT obj_addr = (POINTER_SIZE_INT)p_obj;
-  Seq_List* list = hashcode_buf->list; 
-  seq_list_iterate_init(list);
-  while(seq_list_has_next(list)){
-    Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list); 
-    POINTER_SIZE_INT *iter = vector_block_iterator_init(curr_block);
-    
-    while(!vector_block_iterator_end(curr_block, iter)){  
-      POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter;
-      if(obj_addr != addr){
-        iter = vector_block_iterator_advance(curr_block, iter);
-        iter = vector_block_iterator_advance(curr_block, iter);
-      }else{
-        iter = vector_block_iterator_advance(curr_block, iter);
-        POINTER_SIZE_INT hashcode = (POINTER_SIZE_INT)*iter;
-        iter = vector_block_iterator_advance(curr_block, iter);
-        return *(int*)&hashcode;
-      }
-    }
-  }
-  assert(0);
-  return 0;
-}
-
-inline void hashcode_buf_add(Partial_Reveal_Object* p_obj, int32 hashcode, Hashcode_Buf* hashcode_buf)
-{
-  Seq_List* list = hashcode_buf->list; 
-  Vector_Block* tail_block = (Vector_Block*)seq_list_end_node(list);
-  vector_block_add_entry(tail_block, (POINTER_SIZE_INT) p_obj);
-  POINTER_SIZE_INT hashcode_var = 0;
-  *(int*) &hashcode_var = hashcode;
-  vector_block_add_entry(tail_block, hashcode_var);
-
-  if(!vector_block_is_full(tail_block)) return;
-  
-  tail_block = free_set_pool_get_entry(&gc_metadata);
-  seq_list_add(list, (List_Node*)tail_block);
-  return;
-}
-
-inline void hashcode_buf_refresh_all(Hashcode_Buf* hashcode_buf, POINTER_SIZE_INT dist)
-{
-  Seq_List* list = hashcode_buf->list; 
-  seq_list_iterate_init(list);
-  while(seq_list_has_next(list)){
-    Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);;
-    POINTER_SIZE_INT *iter = vector_block_iterator_init(curr_block);
-    while(!vector_block_iterator_end(curr_block, iter)){
-      POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter;
-      *iter = addr - dist;
-      iter =vector_block_iterator_advance(curr_block, iter);
-      iter =vector_block_iterator_advance(curr_block, iter);
-    }
-  }
-  return;
-}
-
-inline void hashcode_buf_rollback_new_entry(Hashcode_Buf* hashcode_buf)
-{
-  Vector_Block* first_block = VECTOR_BLOCK_HEADER(hashcode_buf->checkpoint);
-  POINTER_SIZE_INT* iter = hashcode_buf->checkpoint;
-  while(!vector_block_iterator_end(first_block, iter)){
-    Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
-    Obj_Info_Type oi = get_obj_info_raw(p_obj);
-    set_obj_info(p_obj, oi & ~HASHCODE_BUFFERED_BIT); 
-    iter =vector_block_iterator_advance(first_block, iter);
-    iter =vector_block_iterator_advance(first_block, iter);
-  }
-  first_block->tail = hashcode_buf->checkpoint;
-
-  Seq_List* list = hashcode_buf->list; 
-  seq_list_iterate_init_after_node(list, (List_Node*)first_block);
-  while(seq_list_has_next(list)){
-    Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);;
-    iter = vector_block_iterator_init(curr_block);
-    while(!vector_block_iterator_end(curr_block, iter)){
-      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
-      Obj_Info_Type oi = get_obj_info_raw(p_obj);
-      set_obj_info(p_obj, oi & ~HASHCODE_BUFFERED_BIT); 
-      iter =vector_block_iterator_advance(curr_block, iter);
-      iter =vector_block_iterator_advance(curr_block, iter);
-    }
-    hashcode_buf_remove(hashcode_buf, curr_block);
-  } 
-  return;
-}
-
-inline void hashcode_buf_transfer_new_entry(Hashcode_Buf* old_buf, Hashcode_Buf* new_buf)
-{
-  hashcode_buf_set_checkpoint(new_buf);
-
-  Vector_Block* first_block = VECTOR_BLOCK_HEADER(old_buf->checkpoint);
-  POINTER_SIZE_INT* iter = old_buf->checkpoint;
-  while(!vector_block_iterator_end(first_block, iter)){
-    Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
-
-    iter =vector_block_iterator_advance(first_block, iter);
-    POINTER_SIZE_INT hashcode = (POINTER_SIZE_INT)*iter;
-    iter =vector_block_iterator_advance(first_block, iter);
-    hashcode_buf_add(p_obj, *(int*) &hashcode, new_buf);
-  }
-  first_block->tail = old_buf->checkpoint;
-
-  Seq_List* list = old_buf->list; 
-  seq_list_iterate_init_after_node(list, (List_Node*)first_block);
-  while(seq_list_has_next(list)){
-    Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);;
-    iter = vector_block_iterator_init(curr_block);
-    while(!vector_block_iterator_end(curr_block, iter)){
-      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
-      iter =vector_block_iterator_advance(curr_block, iter);
-      POINTER_SIZE_INT hashcode = (POINTER_SIZE_INT)*iter;
-      iter =vector_block_iterator_advance(curr_block, iter);
-
-      hashcode_buf_add(p_obj, *(int*) &hashcode, new_buf);
-    }
-    hashcode_buf_remove(old_buf, curr_block);
-  } 
-  return;
-}
-
-inline void hashcode_buf_refresh_new_entry(Hashcode_Buf* hashcode_buf, POINTER_SIZE_INT dist)
-{
-  Vector_Block* first_block = VECTOR_BLOCK_HEADER(hashcode_buf->checkpoint);
-  POINTER_SIZE_INT* iter = hashcode_buf->checkpoint;
-  while(!vector_block_iterator_end(first_block, iter)){
-    POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter;
-    *iter = addr - dist;
-
-    iter =vector_block_iterator_advance(first_block, iter);
-    iter =vector_block_iterator_advance(first_block, iter);
-  }
-
-  Seq_List* list = hashcode_buf->list; 
-  seq_list_iterate_init_after_node(list, (List_Node*)first_block);
-  while(seq_list_has_next(list)){
-    Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);;
-    iter = vector_block_iterator_init(curr_block);
-    while(!vector_block_iterator_end(curr_block, iter)){
-      POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter;
-      *iter = addr - dist;
-
-      iter =vector_block_iterator_advance(curr_block, iter);
-      iter =vector_block_iterator_advance(curr_block, iter);
-    }
-  } 
-  hashcode_buf_set_checkpoint(hashcode_buf);
-  return;
-}
-
-void collector_hashcodeset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref);
-
-inline Obj_Info_Type slide_compact_process_hashcode(Partial_Reveal_Object* p_obj, void* dest_addr, 
-                                                                                unsigned int* p_obj_size, Collector* collector, 
-                                                                                Hashcode_Buf* old_buf, Hashcode_Buf* new_buf)
-{
-  Obj_Info_Type obj_info = get_obj_info(p_obj);
-  POINTER_SIZE_INT hashcode;
-
-  switch(obj_info & HASHCODE_MASK){
-    case HASHCODE_SET_UNALLOCATED:
-      if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj){
-        *p_obj_size += GC_OBJECT_ALIGNMENT; 
-        obj_info = obj_info | HASHCODE_ATTACHED_BIT;
-        *(int*) &hashcode = hashcode_gen(p_obj);
-        POINTER_SIZE_INT obj_end_pos = (POINTER_SIZE_INT)dest_addr + vm_object_size(p_obj);
-        collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)obj_end_pos);
-        collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)hashcode);
-      } 
-      break;
-      
-    case HASHCODE_SET_ATTACHED:
-      obj_sethash_in_vt(p_obj);
-      break;
-      
-    case HASHCODE_SET_BUFFERED:
-      *(int*) &hashcode = hashcode_buf_lookup(p_obj, old_buf);
-      if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj){
-        *p_obj_size += GC_OBJECT_ALIGNMENT; 
-        obj_info = obj_info & ~HASHCODE_BUFFERED_BIT;
-        obj_info = obj_info | HASHCODE_ATTACHED_BIT;
-        POINTER_SIZE_INT obj_end_pos = (POINTER_SIZE_INT)dest_addr + vm_object_size(p_obj);
-        collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)obj_end_pos);
-        collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)hashcode);
-      }else{
-        hashcode_buf_add((Partial_Reveal_Object*)dest_addr, *(int*) &hashcode, new_buf);          
-      }
-      break;
-      
-    case HASHCODE_UNSET:
-      break;
-      
-    default:
-      assert(0);
-  
-  }
-  return obj_info;
-}
-
-inline void move_compact_process_hashcode(Partial_Reveal_Object* p_obj,  
-                                                                                 Hashcode_Buf* old_buf, Hashcode_Buf* new_buf)
-{
-  if(hashcode_is_set(p_obj) && !hashcode_is_attached(p_obj)){
-    int hashcode;
-    if(hashcode_is_buffered(p_obj)){
-      /*already buffered objects;*/
-      hashcode = hashcode_buf_lookup(p_obj, old_buf);
-      hashcode_buf_add(p_obj, hashcode, new_buf);
-    }else{
-      /*objects need buffering.*/
-      hashcode = hashcode_gen(p_obj);
-      hashcode_buf_add(p_obj, hashcode, new_buf);
-      Obj_Info_Type oi = get_obj_info_raw(p_obj);
-      set_obj_info(p_obj, oi | HASHCODE_BUFFERED_BIT);
-    }
-  }
-}
-
-inline Obj_Info_Type trace_forward_process_hashcode(Partial_Reveal_Object* p_obj, 
-                                                               Obj_Info_Type oi, unsigned int p_obj_size)
-{
-    oi  |= HASHCODE_ATTACHED_BIT;
-    *(int *)(((char*)p_obj) + p_obj_size - GC_OBJECT_ALIGNMENT) = hashcode_gen(p_obj);
-    assert(vm_object_size(p_obj) != 0);
-    return oi;
-}
-
-inline void precompute_hashcode_extend_size(Partial_Reveal_Object* p_obj, 
-                                                                              void* dest_addr, unsigned int * obj_size_precompute)
 {
   if(hashcode_is_set(p_obj) && !hashcode_is_attached(p_obj)){ 
     if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj)

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp Thu May 24 01:17:25 2007
@@ -27,38 +27,36 @@
 Space* gc_get_mos(GC_Gen* gc);
 Space* gc_get_nos(GC_Gen* gc);
 Space* gc_get_los(GC_Gen* gc);
-POINTER_SIZE_INT mspace_get_expected_threshold(Mspace* mspace);
+float mspace_get_expected_threshold_ratio(Mspace* mspace);
 POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace);
     
-/*Prepare the paramenters which are to be used to compute new los size.*/
-void gc_space_tune_prepare(GC* gc, unsigned int cause)
+void gc_decide_space_tune(GC* gc, unsigned int cause)
 {
-  if(gc_match_kind(gc, MINOR_COLLECTION))
-  	return;
-  
   Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
   Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);  
   Space* lspace = (Space*)gc_get_los((GC_Gen*)gc);  
   Space_Tuner* tuner = gc->tuner;
-
+  //debug_adjust
   assert(fspace->free_block_idx >= fspace->first_block_idx);
   unsigned int nos_alloc_size = (fspace->free_block_idx - fspace->first_block_idx) * GC_BLOCK_SIZE_BYTES;
   fspace->alloced_size = nos_alloc_size;
   /*Fixme: LOS_Adaptive: There should be a condition here, that fspace->collection_num != 0*/
   mspace->alloced_size += (unsigned int)((float)nos_alloc_size * fspace->survive_ratio);
-  /*For_statistic alloc speed: Speed could be represented by sum of alloced size.*/
-  tuner->speed_los += lspace->alloced_size;
+  /*For_statistic alloc speed: Speed could be represented by sum of alloced size.
+   *The right of this time los/mos alloc speed is the biggest.
+   */
+  tuner->speed_los = lspace->alloced_size;
   tuner->speed_los = (tuner->speed_los + tuner->old_speed_los) >> 1;
-  tuner->speed_mos += mspace->alloced_size;
+  tuner->speed_mos = mspace->alloced_size;
   tuner->speed_mos = (tuner->speed_mos + tuner->old_speed_mos) >> 1;
   
   /*For_statistic wasted memory*/
   POINTER_SIZE_INT curr_used_los = lspace->surviving_size + lspace->alloced_size;
-  assert(curr_used_los <= lspace->committed_heap_size);
   POINTER_SIZE_INT curr_wast_los = lspace->committed_heap_size - curr_used_los;
   tuner->wast_los += curr_wast_los;
   POINTER_SIZE_INT curr_used_mos = mspace->surviving_size + mspace->alloced_size;
-  POINTER_SIZE_INT expected_mos = mspace_get_expected_threshold((Mspace*)mspace);
+  float expected_mos_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace);
+  POINTER_SIZE_INT expected_mos = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * expected_mos_ratio);
   POINTER_SIZE_INT curr_wast_mos = 0;
   if(expected_mos > curr_used_mos)
     curr_wast_mos = expected_mos - curr_used_mos;
@@ -68,270 +66,263 @@
   /*For_statistic ds in heuristic*/
   tuner->current_ds = (unsigned int)((float)fspace->committed_heap_size * fspace->survive_ratio);
   /*Fixme: Threshold should be computed by heuristic. tslow, total recycled heap size shold be statistic.*/
-  tuner->threshold = tuner->current_ds;
-  if(tuner->threshold > 8 * MB) tuner->threshold = 8 * MB;
+  tuner->threshold_waste = tuner->current_ds;
+  if(tuner->threshold_waste > 8 * MB) tuner->threshold_waste = 8 * MB;
   tuner->min_tuning_size = tuner->current_ds;
   if(tuner->min_tuning_size > 4 * MB) tuner->min_tuning_size = 4 * MB;  
-  return;
-}
 
-/*Check the tuning size, if too small, cancle the tuning.*/
-void check_space_tuner(GC* gc)
-{
-	POINTER_SIZE_INT los_fail_sz_uped = 0;
-	
-  Space_Tuner* tuner = gc->tuner;
-  if((!tuner->need_tune) && (!tuner->force_tune)){
-    assert(tuner->kind == TRANS_NOTHING);
-    assert(tuner->tuning_size == 0);
-    return;
-  }
-  Space* lspace = (Space*)gc_get_los((GC_Gen*)gc);
-  if((!tuner->force_tune) && (tuner->tuning_size < tuner->min_tuning_size)){
-    tuner->tuning_size = 0;
-    goto check_size;
-  }
-  if((tuner->need_tune) && (!tuner->force_tune)) goto check_size;
-  /*tuner->force_tune must be true here!*/
-  los_fail_sz_uped = lspace_get_failure_size((Lspace*)lspace);
-  assert(!(los_fail_sz_uped % KB));
+  if(tuner->speed_los == 0) tuner->speed_los = 16;
+  if(tuner->speed_mos == 0) tuner->speed_mos = 16;
+
+  /*Needn't tune if dw does not reach threshold.*/  
+  if(tuner->current_dw > tuner->threshold_waste)  tuner->need_tune = 1;
+  /*If LOS is full, we should tune at lease "tuner->least_tuning_size" size*/
+  if(gc->cause == GC_CAUSE_LOS_IS_FULL) tuner->force_tune = 1;
 
-  if(tuner->kind == TRANS_FROM_LOS_TO_MOS){
-    tuner->kind = TRANS_FROM_MOS_TO_LOS;
-    tuner->tuning_size = 0;
-    lspace->move_object = 0;
-  }
-  if(tuner->tuning_size < los_fail_sz_uped){
-    tuner->tuning_size = los_fail_sz_uped;
-  }
-  
-check_size:
-  tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
-  if(tuner->tuning_size == 0){
-    tuner->kind = TRANS_NOTHING;
-    lspace->move_object = 0;
-  }
-  
   return;
 }
 
-
 extern POINTER_SIZE_INT min_los_size_bytes;
 extern POINTER_SIZE_INT min_none_los_size_bytes;
-/*Give the tuning kind, and tuning size hint*/
-void gc_space_tune_before_gc(GC* gc, unsigned int cause)
+
+void gc_compute_space_tune_size_before_marking(GC* gc, unsigned int cause)
 {
-  if(gc_match_kind(gc, MINOR_COLLECTION)) return;
+  if(gc_match_kind(gc, MINOR_COLLECTION))  return;
+  
+  gc_decide_space_tune(gc, cause);
+  
   Space_Tuner* tuner = gc->tuner;
   if((tuner->speed_los == 0) && ( tuner->speed_mos == 0)) return;
-  if(tuner->speed_los == 0) tuner->speed_los = 16;
-  if(tuner->speed_mos == 0) tuner->speed_mos = 16;
-
-  /*Needn't tune if dw does not reach threshold.*/  
-  if(tuner->current_dw > tuner->threshold)  tuner->need_tune = 1;
-  /*If LOS is full, we should tune at lease "tuner->least_tuning_size" size*/
-  if(gc->cause == GC_CAUSE_LOS_IS_FULL) tuner->force_tune = 1;
   if((!tuner->need_tune) && (!tuner->force_tune)) return;
-
+  
   Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
   Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
   Space* lspace = (Space*)gc_get_los((GC_Gen*)gc);
 
-  POINTER_SIZE_INT los_expect_survive_sz = (POINTER_SIZE_INT)((float)(lspace->surviving_size + lspace->alloced_size) * lspace->survive_ratio);
-  POINTER_SIZE_INT los_expect_free_sz = ((lspace->committed_heap_size > los_expect_survive_sz) ? 
-                                                            (lspace->committed_heap_size - los_expect_survive_sz) : 0);
+  POINTER_SIZE_INT los_expect_surviving_sz = (POINTER_SIZE_INT)((float)(lspace->surviving_size + lspace->alloced_size) * lspace->survive_ratio);
+  POINTER_SIZE_INT los_expect_free_sz = ((lspace->committed_heap_size > los_expect_surviving_sz) ? 
+                                                            (lspace->committed_heap_size - los_expect_surviving_sz) : 0);
   POINTER_SIZE_INT mos_expect_survive_sz = (POINTER_SIZE_INT)((float)(mspace->surviving_size + mspace->alloced_size) * mspace->survive_ratio);
-  POINTER_SIZE_INT mos_expect_threshold = mspace_get_expected_threshold((Mspace*)mspace);
+  float mos_expect_threshold_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace);
+  POINTER_SIZE_INT mos_expect_threshold = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * mos_expect_threshold_ratio);
   POINTER_SIZE_INT mos_expect_free_sz = ((mos_expect_threshold > mos_expect_survive_sz)?
                                                             (mos_expect_threshold - mos_expect_survive_sz) : 0);
-  POINTER_SIZE_INT total_free = los_expect_free_sz + mos_expect_free_sz;
-  assert(total_free <= gc->committed_heap_size);
+  POINTER_SIZE_INT total_expect_free_sz = los_expect_free_sz + mos_expect_free_sz;
+
   float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los  + tuner->speed_mos);
-  POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_free * new_los_ratio);
-  assert(new_free_los_sz <= gc->committed_heap_size);  
-  POINTER_SIZE_INT max_tuning_size = 0;
+  POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_expect_free_sz * new_los_ratio);
+
   /*LOS_Extend:*/
   if((new_free_los_sz > los_expect_free_sz) )
   { 
-    if ( (!tuner->force_tune) && (new_free_los_sz - los_expect_free_sz < tuner->min_tuning_size) ){
-      tuner->kind = TRANS_NOTHING;
-      tuner->tuning_size = 0;
-      return;
-    }
     tuner->kind = TRANS_FROM_MOS_TO_LOS;
-    tuner->tuning_size = round_down_to_size(new_free_los_sz - los_expect_free_sz, GC_BLOCK_SIZE_BYTES);
-    POINTER_SIZE_INT non_los_sz = mspace->committed_heap_size + fspace->committed_heap_size;
-    if(non_los_sz > min_none_los_size_bytes)
-      max_tuning_size = non_los_sz - min_none_los_size_bytes;
-    if(tuner->tuning_size > max_tuning_size) tuner->tuning_size = max_tuning_size;    
+    tuner->tuning_size = new_free_los_sz - los_expect_free_sz;
   }
   /*LOS_Shrink:*/
-  if((new_free_los_sz < los_expect_free_sz))
+  else if(new_free_los_sz < los_expect_free_sz)
   {
-    if ( (!tuner->force_tune) && (los_expect_free_sz - new_free_los_sz < tuner->min_tuning_size) ){
-      tuner->kind = TRANS_NOTHING;
-      tuner->tuning_size = 0;
-      return;
-    }
     tuner->kind = TRANS_FROM_LOS_TO_MOS;
+    tuner->tuning_size = los_expect_free_sz - new_free_los_sz;
     lspace->move_object = 1;
-    assert(lspace->committed_heap_size >= min_los_size_bytes);
-    max_tuning_size = lspace->committed_heap_size - min_los_size_bytes;
-    POINTER_SIZE_INT tuning_size = los_expect_free_sz - new_free_los_sz;
-    if(tuning_size > max_tuning_size) tuning_size = max_tuning_size;
-    tuner->tuning_size = round_down_to_size(tuning_size, GC_BLOCK_SIZE_BYTES);
   }
-  if( (tuner->tuning_size == 0) && (!tuner->force_tune) ){
+  /*Nothing*/
+  else
+  {    
+    tuner->tuning_size = 0;
+  }
+
+  /*If not force tune, and the tuning size is too small, tuner will not take effect.*/
+  if( (!tuner->force_tune) && (tuner->tuning_size < tuner->min_tuning_size) ){
     tuner->kind = TRANS_NOTHING;
+    tuner->tuning_size = 0;
     lspace->move_object = 0;
-    return;
   }
-  check_space_tuner(gc);
-  return;
-}
 
-void gc_space_tune_before_gc_fixed_size(GC* gc, unsigned int cause)
-{
-  if(gc_match_kind(gc, MINOR_COLLECTION)) return;
-  Space_Tuner* tuner = gc->tuner;
-  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
-  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
-  Space* lspace = (Space*)gc_get_los((GC_Gen*)gc);
-
-  if(cause == GC_CAUSE_LOS_IS_FULL){
-    tuner->kind = TRANS_FROM_MOS_TO_LOS;
-    POINTER_SIZE_INT los_fail_sz = lspace_get_failure_size((Lspace*)lspace);
-    if(los_fail_sz > GC_LOS_MIN_VARY_SIZE){
-      /*Fixme: we should set the least_tuning_size after finding out the biggest free area in LOS, this number could be zero*/
-      tuner->tuning_size = los_fail_sz;
-      tuner->least_tuning_size = los_fail_sz;
-      tuner->conservative_tuning_size = los_fail_sz;
-    }else{
-      tuner->tuning_size = GC_LOS_MIN_VARY_SIZE;
-      tuner->least_tuning_size = los_fail_sz;         
-      tuner->conservative_tuning_size = ((tuner->tuning_size + tuner->min_tuning_size) >> 1);
-    }
-    POINTER_SIZE_INT none_los_size;
-#ifdef STATIC_NOS_MAPPING
-    none_los_size = mspace->committed_heap_size;
-#else
-    none_los_size = mspace->committed_heap_size + fspace->committed_heap_size;
-#endif
-    if(tuner->tuning_size > none_los_size){
-      tuner->tuning_size = tuner->conservative_tuning_size;
-    }
-    if(tuner->tuning_size > none_los_size){
-      tuner->tuning_size = tuner->least_tuning_size;
-    }
-    if((tuner->tuning_size + gc->num_active_collectors * GC_BLOCK_SIZE_BYTES) >= none_los_size){
-      tuner->tuning_size = 0;
-    }
-  }
-  else
-  /*LOS_Shrink: Fixme: Very simple strategy now. */
-  {
-    return;
-    tuner->kind = TRANS_FROM_LOS_TO_MOS;
-    lspace->move_object = TRUE;
-    tuner->tuning_size = GC_LOS_MIN_VARY_SIZE >> 1;
-  }
-  
-  /*Fixme: Should MOS heap_start must be 64k aligned?*/
-  tuner->tuning_size = round_up_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
-  if(tuner->tuning_size == 0){
+  /*If los or non-los is already the smallest size, there is no need to tune anymore.*/
+  if(((lspace->committed_heap_size <= min_los_size_bytes) && (tuner->kind == TRANS_FROM_LOS_TO_MOS)) ||
+      ((fspace->committed_heap_size + mspace->committed_heap_size <= min_none_los_size_bytes) && (tuner->kind == TRANS_FROM_MOS_TO_LOS))){
+    assert((lspace->committed_heap_size == min_los_size_bytes) || (fspace->committed_heap_size + mspace->committed_heap_size == min_none_los_size_bytes));
     tuner->kind = TRANS_NOTHING;
+    tuner->tuning_size = 0;
     lspace->move_object = 0;
   }
+  
+  if(tuner->force_tune){
+    if(tuner->kind == TRANS_FROM_LOS_TO_MOS){
+      tuner->kind = TRANS_FROM_MOS_TO_LOS;
+      tuner->reverse = 1;
+    }
+  }
 
-  return;  
+  return;
 }
 
 #include "../thread/collector.h"
 #include "../mark_sweep/lspace.h"
-Boolean gc_space_retune(GC *gc)
+
+static POINTER_SIZE_INT non_los_live_obj_size;
+static  POINTER_SIZE_INT los_live_obj_size;
+static void gc_compute_live_object_size_after_marking(GC* gc)
+{
+  non_los_live_obj_size = 0;
+  los_live_obj_size = 0;
+
+  unsigned int collector_num = gc->num_active_collectors;
+  for(unsigned int i = collector_num; i--;){
+    Collector *collector = gc->collectors[i];
+    non_los_live_obj_size += collector->non_los_live_obj_size;
+    los_live_obj_size += collector->los_live_obj_size;
+  }
+
+  non_los_live_obj_size += ((collector_num << 2) << GC_BLOCK_SHIFT_COUNT);
+  non_los_live_obj_size = round_up_to_size(non_los_live_obj_size, GC_BLOCK_SIZE_BYTES);
+
+  los_live_obj_size += ((collector_num << 2) << GC_BLOCK_SHIFT_COUNT);
+  los_live_obj_size = round_up_to_size(los_live_obj_size, GC_BLOCK_SIZE_BYTES);
+
+}
+
+void gc_compute_space_tune_size_after_marking(GC *gc)
 {
-  Lspace *los = (Lspace*)gc_get_los((GC_Gen*)gc);
+  gc_compute_live_object_size_after_marking(gc);
+
+  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
+  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
+  Lspace *lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
   Space_Tuner* tuner = gc->tuner;
-  /*LOS_Extend:*/
-  if(tuner->kind == TRANS_FROM_MOS_TO_LOS){
-    POINTER_SIZE_INT non_los_live_obj_size = 0;
-    unsigned int collector_num = gc->num_active_collectors;
-    for(unsigned int i = collector_num; i--;){
-      Collector *collector = gc->collectors[i];
-      non_los_live_obj_size += collector->non_los_live_obj_size;
-    }
-    non_los_live_obj_size += GC_BLOCK_SIZE_BYTES * collector_num * 4;
-    non_los_live_obj_size = round_up_to_size(non_los_live_obj_size, GC_BLOCK_SIZE_BYTES);
-    POINTER_SIZE_INT max_free_for_tuning = 0;
-    if (gc->committed_heap_size > los->committed_heap_size + non_los_live_obj_size)
-      max_free_for_tuning = gc->committed_heap_size - los->committed_heap_size - non_los_live_obj_size;
-
-    if(!tuner->force_tune){
-    /*This should not happen! If GC is not issued by los, then it's not necessary to extend it*/
-      if(max_free_for_tuning < tuner->tuning_size)
-        tuner->tuning_size = max_free_for_tuning;
-      if(tuner->tuning_size == 0){
+  
+  POINTER_SIZE_INT max_tuning_size = 0;  
+  POINTER_SIZE_INT non_los_size = mspace->committed_heap_size + fspace->committed_heap_size;
+  /*We should assure that the non_los area is no less than min_none_los_size_bytes*/
+  POINTER_SIZE_INT max_tune_for_min_non_los = 0;
+  if(non_los_size > min_none_los_size_bytes)
+    max_tune_for_min_non_los = non_los_size - min_none_los_size_bytes;
+  POINTER_SIZE_INT max_tune_for_min_los = 0;
+  //debug_adjust
+  assert(lspace->committed_heap_size >= min_los_size_bytes);
+  max_tune_for_min_los = lspace->committed_heap_size - min_los_size_bytes;
+
+  /*If the tuning strategy give a bigger tuning_size than failure size, we just follow the strategy and set noforce.*/
+  Boolean doforce = TRUE;
+  POINTER_SIZE_INT failure_size = lspace_get_failure_size((Lspace*)lspace);  
+  if( (tuner->kind == TRANS_FROM_MOS_TO_LOS) && (!tuner->reverse) && (tuner->tuning_size > failure_size) )
+    doforce = FALSE;
+
+  /*If force tune*/
+  if( (tuner->force_tune) && (doforce) ){
+    
+    tuner->tuning_size = failure_size;
+    
+    /*We should assure that the tuning size is no more than the free space of non_los area*/
+    if( gc->committed_heap_size > lspace->committed_heap_size + non_los_live_obj_size )
+      max_tuning_size = gc->committed_heap_size - lspace->committed_heap_size - non_los_live_obj_size;
+
+    if(max_tuning_size > max_tune_for_min_non_los)
+      max_tuning_size = max_tune_for_min_non_los;
+
+    /*Round up to satisfy LOS alloc demand.*/
+    tuner->tuning_size = round_up_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
+    max_tuning_size = round_down_to_size(max_tuning_size, GC_BLOCK_SIZE_BYTES);
+
+    /*If the tuning size is too large, we did nothing and wait for the OOM of JVM*/
+    /*Fixme: if the heap size is not mx, we can extend the whole heap size*/
+    if(tuner->tuning_size > max_tuning_size){
+      tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);
+      max_tuning_size = round_down_to_size(max_tuning_size, SPACE_ALLOC_UNIT);
+        //debug_adjust
+      assert(max_heap_size_bytes >= gc->committed_heap_size);
+      POINTER_SIZE_INT extend_heap_size = 0;
+      POINTER_SIZE_INT potential_max_heap_size = max_tuning_size + max_heap_size_bytes - gc->committed_heap_size;
+
+      //debug_adjust
+      assert(!(potential_max_heap_size % SPACE_ALLOC_UNIT));
+      if(tuner->tuning_size > potential_max_heap_size){
+        tuner->tuning_size = 0;
         tuner->kind = TRANS_NOTHING;
-        los->move_object = 0;
+        lspace->move_object = 0;      
+      }else{
+        extend_heap_size = tuner->tuning_size - max_tuning_size;
+        blocked_space_extend(fspace, (unsigned int)extend_heap_size);
+        gc->committed_heap_size += extend_heap_size;
+        tuner->kind = TRANS_FROM_MOS_TO_LOS;
+        lspace->move_object = 0;        
       }
-      return TRUE;
-    }
-    /*force tune here!*/
-    POINTER_SIZE_INT min_tuning_uped = round_up_to_size(los->failure_size, GC_BLOCK_SIZE_BYTES);
-    if(min_tuning_uped > max_free_for_tuning){
-      tuner->tuning_size = 0;
-      tuner->kind = TRANS_NOTHING;
-      los->move_object = 0;
-      return FALSE;
     }
-    if(tuner->tuning_size < min_tuning_uped){
-      assert(tuner->tuning_size < max_free_for_tuning);
-      tuner->tuning_size = min_tuning_uped;
-      return TRUE;
-    }else/*tuner->tuning_size >= min_tuning_uped*/{
-      if(tuner->tuning_size > max_free_for_tuning)
-        tuner->tuning_size = max_free_for_tuning;
-      return TRUE;
+    else
+    {
+      tuner->kind = TRANS_FROM_MOS_TO_LOS;
+      lspace->move_object = 0;
     }
   }
-  else// if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS)
+  /*No force tune, LOS_Extend:*/
+  else if(tuner->kind == TRANS_FROM_MOS_TO_LOS)
   {
-    POINTER_SIZE_INT los_live_obj_size = 0;
-    unsigned int collector_num = gc->num_active_collectors;
-    for(unsigned int i = collector_num; i--;){
-      Collector *collector = gc->collectors[i];
-      los_live_obj_size += collector->los_live_obj_size;
+    if (gc->committed_heap_size > lspace->committed_heap_size + non_los_live_obj_size){
+      max_tuning_size = gc->committed_heap_size - lspace->committed_heap_size - non_los_live_obj_size;
+      if(max_tuning_size > max_tune_for_min_non_los)
+        max_tuning_size = max_tune_for_min_non_los;
+      if( tuner->tuning_size > max_tuning_size)
+        tuner->tuning_size = max_tuning_size;
+      /*Round down so as not to break max_tuning_size*/
+      tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
+    }else{ 
+      tuner->tuning_size = 0;
     }
-    los_live_obj_size = round_up_to_size(los_live_obj_size, GC_BLOCK_SIZE_BYTES);
-    los_live_obj_size += (collector_num << 2 << GC_BLOCK_SHIFT_COUNT);
-    
-    Lspace *los = (Lspace*)gc_get_los((GC_Gen*)gc);
-    Space_Tuner *tuner = gc->tuner;
-    POINTER_SIZE_INT los_max_shrink_size = 0;
-    if(los->committed_heap_size > los_live_obj_size)
-      los_max_shrink_size = los->committed_heap_size - los_live_obj_size;
-    if(tuner->tuning_size > los_max_shrink_size) 
-      tuner->tuning_size = los_max_shrink_size;
-    assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
-    if(tuner->tuning_size == 0){
-      tuner->kind = TRANS_NOTHING;
-      los->move_object = 0;
-      return TRUE;
-    }else 
-      return TRUE;
   }
+  /*No force tune, LOS Shrink*/
+  else
+  {    
+    if(lspace->committed_heap_size > los_live_obj_size){
+      max_tuning_size = lspace->committed_heap_size - los_live_obj_size;
+      if(max_tuning_size > max_tune_for_min_los)
+        max_tuning_size = max_tune_for_min_los;
+      if(tuner->tuning_size > max_tuning_size) 
+        tuner->tuning_size = max_tuning_size;
+      /*Round down so as not to break max_tuning_size*/
+      tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
+    }else{
+      /* this is possible because of the reservation in gc_compute_live_object_size_after_marking*/        
+      tuner->tuning_size = 0;
+    }
+  }
+  if(tuner->tuning_size == 0){
+    tuner->kind = TRANS_NOTHING;
+    lspace->move_object = 0;
+  }
+  return;
+  
 }
 
 void  gc_space_tuner_reset(GC* gc)
 {
+  Space_Tuner* tuner = gc->tuner;
   if( !gc_match_kind(gc, MINOR_COLLECTION)){
-    Space_Tuner* tuner = gc->tuner;
-    POINTER_SIZE_INT old_slos = tuner->speed_los;
-    POINTER_SIZE_INT old_smos = tuner->speed_mos;
-    memset(tuner, 0, sizeof(Space_Tuner));
-    tuner->old_speed_los = old_slos;
-    tuner->old_speed_mos = old_smos;
+    /*Clear the fields every major collection except the wast area statistic.*/
+    tuner->tuning_size = 0;
+    tuner->interim_blocks = NULL;
+    tuner->need_tune = FALSE;
+    tuner->force_tune = FALSE;
+
+    tuner->old_speed_los = tuner->speed_los;
+    tuner->old_speed_mos = tuner->speed_mos;
+    tuner->speed_los = 0;
+    tuner->speed_mos = 0;
+
+    tuner->current_dw  = 0;
+    tuner->current_ds = 0;
+
+    tuner->threshold_waste = 0;
+    tuner->min_tuning_size = 0;
+    /*Reset the sum up of wast area size only if los is changed.*/
+    if(tuner->kind != TRANS_NOTHING){
+      tuner->wast_los = 0;
+      tuner->wast_mos = 0;
+    }
+    tuner->kind = TRANS_NOTHING;    
   }
+  
+  return;  
+  
 }
 
 void gc_space_tuner_initialize(GC* gc)
@@ -343,3 +334,61 @@
     tuner->tuning_size = 0;
     gc->tuner = tuner;
 }
+
+/*Malloc and initialize fake blocks for LOS_Shrink*/
+void gc_space_tuner_init_fake_blocks_for_los_shrink(GC* gc)
+{
+  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
+  Space_Tuner* tuner = gc->tuner;
+  Block_Header* mos_first_block = (Block_Header*)&mspace->blocks[0];
+  unsigned int trans_blocks = (unsigned int)(tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT);
+  tuner->interim_blocks = (Block_Header*)STD_MALLOC(trans_blocks * sizeof(Block_Header));
+  Block_Header* los_trans_fake_blocks = tuner->interim_blocks;
+  memset(los_trans_fake_blocks, 0, trans_blocks * sizeof(Block_Header));
+  void* trans_base = (void*)((POINTER_SIZE_INT)mos_first_block - tuner->tuning_size);
+  unsigned int start_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, trans_base);
+  Block_Header* last_block = los_trans_fake_blocks;
+
+  for(unsigned int i = 0; i < trans_blocks; i ++){
+      Block_Header* curr_block = &los_trans_fake_blocks[i];
+      curr_block->block_idx = start_idx + i;
+      curr_block->base = (void*)((POINTER_SIZE_INT)trans_base + i * GC_BLOCK_SIZE_BYTES + GC_BLOCK_HEADER_SIZE_BYTES);
+      curr_block->free = curr_block->base ;
+      curr_block->new_free = curr_block->free;
+      curr_block->ceiling = (void*)((POINTER_SIZE_INT)curr_block->base + GC_BLOCK_BODY_SIZE_BYTES);
+      curr_block->status = BLOCK_COMPACTED;
+#ifdef USE_32BITS_HASHCODE
+      curr_block->hashcode_buf = hashcode_buf_create();
+#endif
+      last_block->next = curr_block;
+      last_block = curr_block;
+  }
+  last_block->next = mos_first_block;
+}
+
+/*Copy the fake blocks into real blocks, reconnect these new block into main list of mspace.
+  *Free the fake blocks. The infomation of mspace is not updated yet.
+ */
+void gc_space_tuner_release_fake_blocks_for_los_shrink(GC* gc)
+{
+  Space_Tuner *tuner = gc->tuner;
+  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
+  
+  POINTER_SIZE_INT tune_size = tuner->tuning_size;
+  unsigned int tune_blocks = (unsigned int)(tune_size >> GC_BLOCK_SHIFT_COUNT);
+
+  Block* blocks = (Block*)((POINTER_SIZE_INT)mspace->blocks - tune_size);
+  Block_Header* last_real_block = (Block_Header*)blocks;
+  unsigned int i;
+  for(i=0; i < tune_blocks; i++){
+    Block_Header* real_block = (Block_Header*)&(blocks[i]);
+    Block_Header* fake_block = &tuner->interim_blocks[i];
+    memcpy((void*)real_block, (void*)fake_block, sizeof(Block_Header));
+    last_real_block->next = real_block;
+    last_real_block = real_block;
+  }
+  last_real_block->next = (Block_Header*)mspace->blocks;
+  STD_FREE(tuner->interim_blocks);
+  return;
+}
+

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h Thu May 24 01:17:25 2007
@@ -27,6 +27,8 @@
 #define GC_LOS_MIN_VARY_SIZE ( 2 * MB )
 //#define GC_FIXED_SIZE_TUNER
 
+extern POINTER_SIZE_INT max_heap_size_bytes;
+
 //For_LOS_extend
 enum Transform_Kind {
   TRANS_NOTHING = 0,
@@ -36,19 +38,22 @@
 
 typedef struct Space_Tuner{
     Transform_Kind kind;
-
+    /*This flag is set if the los tuning status changes in the process of tuning*/
+    Boolean reverse;
     POINTER_SIZE_INT tuning_size;
-    POINTER_SIZE_INT conservative_tuning_size;
-    POINTER_SIZE_INT least_tuning_size;
     /*Used for LOS_Shrink*/
     Block_Header* interim_blocks;
+    /*This flag is set when tuning strategy decide to tune los size.
+      *i.e. wasted memory is greater than wast_threshold.
+      */
     Boolean need_tune;
+    /*This flag is set if gc is caused by los alloc failure.*/
     Boolean force_tune;
     
-    /*LOS alloc speed sciecne last los variation*/    
+    /*LOS alloc speed since last major*/
     POINTER_SIZE_INT speed_los;
     POINTER_SIZE_INT old_speed_los;
-    /*MOS alloc speed sciecne last los variation*/    
+    /*MOS alloc speed since last major*/
     POINTER_SIZE_INT speed_mos;
     POINTER_SIZE_INT old_speed_mos;
     
@@ -61,22 +66,17 @@
     /*NOS survive size of last minor, this could be the least meaningful space unit when talking about tuning.*/
     POINTER_SIZE_INT current_ds;
 
-    /*Threshold for deta wast*/
-    POINTER_SIZE_INT threshold;
+    /*Threshold for deta waste*/
+    POINTER_SIZE_INT threshold_waste;
     /*Minimun tuning size for los variation*/
     POINTER_SIZE_INT min_tuning_size;
-
-    /*Cost of normal major compaction*/
-    unsigned int fast_cost;
-    /*Cost of major compaction when changing LOS size*/    
-    unsigned int slow_cost;    
 }Space_Tuner;
 
-void gc_space_tune_prepare(GC* gc, unsigned int cause);
-void gc_space_tune_before_gc(GC* gc, unsigned int cause);
-void gc_space_tune_before_gc_fixed_size(GC* gc, unsigned int cause);
-Boolean gc_space_retune(GC *gc);
+void gc_compute_space_tune_size_before_marking(GC* gc, unsigned int cause);
+void gc_compute_space_tune_size_after_marking(GC *gc);
 void gc_space_tuner_reset(GC* gc);
 void gc_space_tuner_initialize(GC* gc);
+void gc_space_tuner_init_fake_blocks_for_los_shrink(GC* gc);
+void gc_space_tuner_release_fake_blocks_for_los_shrink(GC* gc);
 
 #endif /* _SPACE_TUNER_H_ */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp Thu May 24 01:17:25 2007
@@ -125,7 +125,7 @@
     for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
       REF* p_ref = (REF *)iter;
       if(IS_FALLBACK_COMPACTION)
-      fallback_update_fw_ref(p_ref);  // in case that this collection is FALLBACK_COLLECTION
+        fallback_update_fw_ref(p_ref);  // in case that this collection is FALLBACK_COLLECTION
       Partial_Reveal_Object *p_obj = read_slot(p_ref);
       if(!p_obj)
         continue;
@@ -281,7 +281,7 @@
       assert(p_obj);
       REF* p_referent_field = obj_get_referent_field(p_obj);
       if(IS_FALLBACK_COMPACTION)
-      fallback_update_fw_ref(p_referent_field);
+        fallback_update_fw_ref(p_referent_field);
       Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
       
       if(!p_referent){  // referent field has been cleared
@@ -598,7 +598,7 @@
       assert(p_obj);
       REF* p_referent_field = obj_get_referent_field(p_obj);
       if(IS_FALLBACK_COMPACTION)
-      fallback_update_fw_ref(p_referent_field);
+        fallback_update_fw_ref(p_referent_field);
       Partial_Reveal_Object* p_referent = read_slot(p_referent_field);
       
       if(!p_referent){  // referent field has been cleared

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp Thu May 24 01:17:25 2007
@@ -74,14 +74,14 @@
 
   min_nos_size_bytes *=  gc_gen->_num_processors;
 
-  POINTER_SIZE_INT min_nos_size_threshold = max_heap_size>>5;
+  POINTER_SIZE_INT min_nos_size_threshold = min_heap_size>>5;
   if(min_nos_size_bytes  > min_nos_size_threshold){
     min_nos_size_bytes = round_down_to_size(min_nos_size_threshold,SPACE_ALLOC_UNIT);
   }
   
   if( MIN_NOS_SIZE )  min_nos_size_bytes = MIN_NOS_SIZE;
 
-  POINTER_SIZE_INT los_size = max_heap_size >> 7;
+  POINTER_SIZE_INT los_size = min_heap_size >> 7;
   if(INIT_LOS_SIZE) los_size = INIT_LOS_SIZE;
   if(los_size < min_los_size_bytes ) 
     los_size = min_los_size_bytes ;
@@ -95,25 +95,25 @@
   POINTER_SIZE_INT mos_reserve_size, mos_commit_size; 
   POINTER_SIZE_INT los_mos_size;
   
-  /*Give GC a hint of gc survive ratio.*/
+  /*Give GC a hint of gc survive ratio. And the last_survive_ratio field is used in heap size adjustment*/
   gc_gen->survive_ratio = 0.2f;
 
   if(NOS_SIZE){
-    los_mos_size = max_heap_size - NOS_SIZE;
+    los_mos_size = min_heap_size - NOS_SIZE;
     mos_reserve_size = los_mos_size - los_size;  
 
     nos_commit_size = NOS_SIZE;
     nos_reserve_size = NOS_SIZE;
   
   }else{  
-    los_mos_size = max_heap_size;
+    los_mos_size = min_heap_size;
     mos_reserve_size = los_mos_size - los_size;
-    nos_commit_size = (POINTER_SIZE_INT)(((float)(max_heap_size - los_size))/(1.0f + gc_gen->survive_ratio));
+    nos_commit_size = (POINTER_SIZE_INT)(((float)(min_heap_size - los_size))/(1.0f + gc_gen->survive_ratio));
     nos_reserve_size = mos_reserve_size;
   }
     
   nos_commit_size = round_down_to_size(nos_commit_size, SPACE_ALLOC_UNIT);  
-  mos_commit_size = max_heap_size - los_size - nos_commit_size;
+  mos_commit_size = min_heap_size - los_size - nos_commit_size;
 
   /* allocate memory for gc_gen */
   void* reserved_base;
@@ -145,8 +145,8 @@
     }
     reserved_base = vm_reserve_mem(los_mos_base, los_mos_size);
   }
-  
-#else /* STATIC_NOS_MAPPING */
+/* NON_STATIC_NOS_MAPPING */  
+#else 
 
   reserved_base = NULL;
   if(large_page_hint){
@@ -160,18 +160,29 @@
     }
   }
   
-  if(reserved_base==NULL){
+  if(reserved_base == NULL){
+    Boolean max_size_reduced = 0;
     reserved_base = vm_reserve_mem((void*)0, max_heap_size + SPACE_ALLOC_UNIT);
-    reserved_base = (void*)round_up_to_size((POINTER_SIZE_INT)reserved_base, SPACE_ALLOC_UNIT);
-    assert((POINTER_SIZE_INT)reserved_base%SPACE_ALLOC_UNIT == 0);
-
     while( !reserved_base ){
-      printf("Non-static NOS mapping: Can't allocate memory at address %x for specified size %x", reserved_base, max_heap_size);  
-      exit(0);      
+      max_size_reduced = 1;
+      max_heap_size -= SPACE_ALLOC_UNIT;
+      reserved_base = vm_reserve_mem((void*)0, max_heap_size + SPACE_ALLOC_UNIT);
+    }
+
+    if(max_size_reduced){
+      printf("Max heap size: can't be reserved, reduced to %d MB according to virtual memory limitation.\n", max_heap_size/MB);
+    }
+
+    if(max_heap_size < min_heap_size){
+      printf("Heap size: invalid, please reimput a smaller \"ms\" paramenter!\n");  
+      exit(0);
     }
+    reserved_base = (void*)round_up_to_size((POINTER_SIZE_INT)reserved_base, SPACE_ALLOC_UNIT);
+    assert((POINTER_SIZE_INT)reserved_base%SPACE_ALLOC_UNIT == 0);
   }
 
   reserved_end = (void*)((POINTER_SIZE_INT)reserved_base + max_heap_size);
+
     
   /* compute first time nos_boundary */
   nos_base = (void*)((POINTER_SIZE_INT)reserved_base + mos_commit_size + los_size);
@@ -208,9 +219,6 @@
   nos->collect_algorithm = MINOR_ALGO;
   mos->collect_algorithm = MAJOR_ALGO;
 
-  /*Give GC a hint of space survive ratio.*/
-//  nos->survive_ratio = gc_gen->survive_ratio;
-//  mos->survive_ratio = gc_gen->survive_ratio;
   gc_space_tuner_initialize((GC*)gc_gen);
 
   gc_gen_mode_adapt_init(gc_gen);
@@ -219,7 +227,6 @@
                                 space_committed_size((Space*)gc_gen->mos) +
                                 space_committed_size((Space*)gc_gen->los);
   
-
   set_native_finalizer_thread_flag(!IGNORE_FINREF);
   set_native_ref_enqueue_thread_flag(!IGNORE_FINREF);
   
@@ -271,12 +278,6 @@
 
 unsigned int gc_get_processor_num(GC_Gen* gc){ return gc->_num_processors;}
 
-
-static Boolean major_collection_needed(GC_Gen* gc)
-{
-  return space_used_memory_size((Blocked_Space*)gc->nos)*gc->survive_ratio > (space_free_memory_size((Blocked_Space*)gc->mos));
-}
-
 Boolean FORCE_FULL_COMPACT = FALSE;
 
 void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause)
@@ -356,6 +357,69 @@
   return;     
 }
 
+void gc_gen_adjust_heap_size(GC_Gen* gc, int64 pause_time)
+{
+  if(gc_match_kind((GC*)gc, MINOR_COLLECTION)) return;
+  if(gc->committed_heap_size == max_heap_size_bytes) return;
+  
+  Mspace* mos = gc->mos;
+  Fspace* nos = gc->nos;
+  Lspace* los = gc->los;
+  /*We can not tolerate gc->survive_ratio be greater than threshold twice continuously.
+   *Or, we must adjust heap size
+   */
+  static unsigned int tolerate = 0;
+
+  POINTER_SIZE_INT heap_total_size = los->committed_heap_size + mos->committed_heap_size + nos->committed_heap_size;
+  assert(heap_total_size == gc->committed_heap_size);
+
+  assert(nos->surviving_size == 0);  
+  POINTER_SIZE_INT heap_surviving_size = mos->surviving_size + los->surviving_size; 
+  assert(heap_total_size > heap_surviving_size);
+
+  float heap_survive_ratio = (float)heap_surviving_size / (float)heap_total_size;
+  float threshold_survive_ratio = 0.3f;
+  float regular_survive_ratio = 0.125f;
+
+  POINTER_SIZE_INT new_heap_total_size = 0;
+  POINTER_SIZE_INT adjust_size = 0;
+
+  if(heap_survive_ratio < threshold_survive_ratio) return;
+
+  if(++tolerate < 2) return;
+  tolerate = 0;
+  
+  new_heap_total_size = (POINTER_SIZE_INT)((float)heap_surviving_size / regular_survive_ratio);
+  new_heap_total_size = round_down_to_size(new_heap_total_size, SPACE_ALLOC_UNIT);
+
+
+  if(new_heap_total_size <= heap_total_size) return;
+  if(new_heap_total_size > max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL) 
+    new_heap_total_size = max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL;
+
+  adjust_size = new_heap_total_size - heap_total_size;
+  assert( !(adjust_size % SPACE_ALLOC_UNIT) );
+  if(adjust_size == 0) return;
+  
+#ifdef STATIC_NOS_MAPPING
+  /*Fixme: Static mapping have other bugs to be fixed first.*/
+  assert(!large_page_hint);
+  return;
+#else
+  assert(!large_page_hint);
+  POINTER_SIZE_INT old_nos_size = nos->committed_heap_size;
+  blocked_space_extend(nos, (unsigned int)adjust_size);
+  nos->survive_ratio = (float)old_nos_size * nos->survive_ratio / (float)nos->committed_heap_size;
+  /*Fixme: gc fields should be modified according to nos extend*/
+  gc->committed_heap_size += adjust_size;
+  //debug_adjust
+  assert(gc->committed_heap_size == los->committed_heap_size + mos->committed_heap_size + nos->committed_heap_size);
+#endif
+  
+// printf("heap_size: %x MB , heap_survive_ratio: %f\n", gc->committed_heap_size/MB, heap_survive_ratio);
+
+}
+
 Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */
 
 void gc_gen_reclaim_heap(GC_Gen* gc)
@@ -372,41 +436,31 @@
   if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){
     /* FIXME:: move_object is only useful for nongen_slide_copy */
     gc->mos->move_object = FALSE;
-    
     fspace_collection(gc->nos);
-    
     gc->mos->move_object = TRUE;      
-
-      
   }else{
-
     /* process mos and nos together in one compaction */
     mspace_collection(gc->mos); /* fspace collection is included */
     lspace_collection(gc->los);
-
   }
 
   if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){
-    if(gc_is_gen_mode())
-      gc_clear_remset((GC*)gc);  
+    if(gc_is_gen_mode()) gc_clear_remset((GC*)gc);  
     
     /* runout mspace in minor collection */
     assert(mspace->free_block_idx == mspace->ceiling_block_idx + 1);
     mspace->num_used_blocks = mspace->num_managed_blocks;
 
     IS_FALLBACK_COMPACTION = TRUE;
-
     gc_reset_collect_result((GC*)gc);
     gc->collect_kind = FALLBACK_COLLECTION;    
 
-    if(verify_live_heap)
-      event_gc_collect_kind_changed((GC*)gc);
+    if(verify_live_heap) event_gc_collect_kind_changed((GC*)gc);
 
     mspace_collection(gc->mos); /* fspace collection is included */
     lspace_collection(gc->los);
     
     IS_FALLBACK_COMPACTION = FALSE;
-    
   }
   
   if( gc->collect_result == FALSE){
@@ -423,7 +477,6 @@
 #endif
 
   assert(!gc->los->move_object);
-
   return;
 }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h Thu May 24 01:17:25 2007
@@ -169,6 +169,8 @@
 void gc_gen_reclaim_heap(GC_Gen* gc);
 
 void gc_gen_assign_free_area_to_mutators(GC_Gen* gc);
+
+void gc_gen_adjust_heap_size(GC_Gen* gc, int64 pause_time);
   
 void gc_gen_mode_adapt_init(GC_Gen *gc);
 
@@ -177,5 +179,6 @@
 extern Boolean GEN_NONGEN_SWITCH ;
 
 #endif /* ifndef _GC_GEN_H_ */
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp Thu May 24 01:17:25 2007
@@ -25,11 +25,9 @@
 #define NOS_COPY_RESERVE_DELTA (GC_BLOCK_SIZE_BYTES<<1)
 /*Tune this number in case that MOS could be too small, so as to avoid or put off fall back.*/
 #define GC_MOS_MIN_EXTRA_REMAIN_SIZE (36*MB)
-/*Switch on this MACRO when we want lspace->survive_ratio to be sensitive.*/
-//#define NOS_SURVIVE_RATIO_SENSITIVE
 
 struct Mspace;
-void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold);
+void mspace_set_expected_threshold_ratio(Mspace* mspace, float threshold_ratio);
 
 static float Tslow = 0.0f;
 static POINTER_SIZE_INT SMax = 0;
@@ -187,7 +185,7 @@
   return;
 }
 
-void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold);
+void mspace_set_expected_threshold_ratio(Mspace* mspace, float threshold_ratio);
 
 static void gc_decide_next_collect(GC_Gen* gc, int64 pause_time)
 {
@@ -200,7 +198,8 @@
   POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace);
   POINTER_SIZE_INT total_free_size = mos_free_size  + nos_free_size;
   if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)) gc->force_gen_mode = FALSE;
-  if(!gc->force_gen_mode){  
+  if(!gc->force_gen_mode){
+    /*For major collection:*/
     if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)){
       mspace->time_collections += pause_time;
   
@@ -213,48 +212,44 @@
       
       /*If major is caused by LOS, or collection kind is EXTEND_COLLECTION, all survive ratio is not updated.*/
       if((gc->cause != GC_CAUSE_LOS_IS_FULL) && (!gc_match_kind((GC*)gc, EXTEND_COLLECTION))){
-        POINTER_SIZE_INT major_survive_size = space_committed_size((Space*)mspace) - mos_free_size;
-        survive_ratio = (float)major_survive_size/(float)space_committed_size((Space*)mspace);
+        POINTER_SIZE_INT major_surviving_size = space_committed_size((Space*)mspace) - mos_free_size;
+        survive_ratio = (float)major_surviving_size/(float)space_committed_size((Space*)mspace);
         mspace->survive_ratio = survive_ratio;
       }
-      /*For LOS_Shrink:*/
-      if(gc->tuner->kind != TRANS_NOTHING){
-        POINTER_SIZE_INT mspace_size_threshold = (space_committed_size((Space*)mspace) + space_committed_size((Space*)fspace)) >> 1;
-        mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold );
-      }
-  #ifdef NOS_SURVIVE_RATIO_SENSITIVE
-      /*If this major is caused by fall back compaction, 
-         we must give fspace->survive_ratio a conservative and reasonable number to avoid next fall back.*/
-      //fspace->survive_ratio = mspace->survive_ratio;
-      /*In fallback compaction, the survive_ratio of mspace must be 1.*/
-      if(gc_match_kind((GC*)gc, FALLBACK_COLLECTION))
-      	fspace->survive_ratio = 1;
-      	
-  #endif
+      /*If there is no minor collection at all, we must give mspace expected threshold a reasonable value.*/
+      if((gc->tuner->kind != TRANS_NOTHING) && (fspace->num_collections == 0))
+        mspace_set_expected_threshold_ratio((Mspace *)mspace, 0.5f);
+      /*If this major is caused by fall back compaction, we must give fspace->survive_ratio 
+        *a conservative and reasonable number to avoid next fall back.
+        *In fallback compaction, the survive_ratio of mspace must be 1.*/
+      if(gc_match_kind((GC*)gc, FALLBACK_COLLECTION)) fspace->survive_ratio = 1;
+    /*For minor collection:*/    
     }else{
       /*Give a hint to mini_free_ratio. */
       if(fspace->num_collections == 1){
         /*fixme: This is only set for tuning the first warehouse!*/
         Tslow = pause_time / gc->survive_ratio;
-        SMax = (POINTER_SIZE_INT)((float)gc->committed_heap_size * ( 1 - gc->survive_ratio ));
+        SMax = (POINTER_SIZE_INT)((float)(gc->committed_heap_size - gc->los->committed_heap_size) * ( 1 - gc->survive_ratio ));
         last_total_free_size = gc->committed_heap_size - gc->los->committed_heap_size;
       }
   
       fspace->time_collections += pause_time;  
       POINTER_SIZE_INT free_size_threshold;
-        
-      POINTER_SIZE_INT minor_survive_size = last_total_free_size - total_free_size;
+
+      POINTER_SIZE_INT minor_surviving_size = last_total_free_size - total_free_size;
   
       float k = Tslow * fspace->num_collections/fspace->time_collections;
-      float m = ((float)minor_survive_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ));
+      float m = ((float)minor_surviving_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ));
       float free_ratio_threshold = mini_free_ratio(k, m);
-      free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * (SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ) + GC_MOS_MIN_EXTRA_REMAIN_SIZE );
+
+      if(SMax > GC_MOS_MIN_EXTRA_REMAIN_SIZE)
+        free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * (SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ) + GC_MOS_MIN_EXTRA_REMAIN_SIZE );
+      else
+        free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * SMax);
   
-      if ((mos_free_size + nos_free_size)< free_size_threshold)  {
-        gc->force_major_collect = TRUE;
-      }
+      if ((mos_free_size + nos_free_size)< free_size_threshold) gc->force_major_collect = TRUE;
   
-      survive_ratio = (float)minor_survive_size/(float)space_committed_size((Space*)fspace);
+      survive_ratio = (float)minor_surviving_size/(float)space_committed_size((Space*)fspace);
       fspace->survive_ratio = survive_ratio;
       /*For LOS_Adaptive*/
       POINTER_SIZE_INT mspace_committed_size = space_committed_size((Space*)mspace);
@@ -262,12 +257,12 @@
       if(mspace_committed_size  + fspace_committed_size > free_size_threshold){
         POINTER_SIZE_INT mspace_size_threshold;
         mspace_size_threshold = mspace_committed_size  + fspace_committed_size - free_size_threshold;
-        mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold );
+        float mspace_size_threshold_ratio = (float)mspace_size_threshold / (mspace_committed_size  + fspace_committed_size);
+        mspace_set_expected_threshold_ratio((Mspace *)mspace, mspace_size_threshold_ratio);
       }
     }
-    
-    gc->survive_ratio =  (gc->survive_ratio + survive_ratio)/2.0f;
   
+    gc->survive_ratio =  (gc->survive_ratio + survive_ratio)/2.0f;
     last_total_free_size = total_free_size;
   }
 
@@ -295,7 +290,9 @@
 #ifdef STATIC_NOS_MAPPING
     total_size = max_heap_size_bytes - lspace->committed_heap_size;
 #else
-    total_size = (POINTER_SIZE_INT)gc->heap_end - (POINTER_SIZE_INT)mspace->heap_start;
+    POINTER_SIZE_INT curr_heap_commit_end = 
+                              (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_NULL + gc->committed_heap_size;
+    total_size = curr_heap_commit_end - (POINTER_SIZE_INT)mspace->heap_start;
 #endif
 
   POINTER_SIZE_INT total_free = total_size - used_mos_size;
@@ -306,16 +303,15 @@
   POINTER_SIZE_INT nos_reserve_size;
   nos_reserve_size = (POINTER_SIZE_INT)(((float)total_free)/(1.0f + fspace->survive_ratio));
   /*NOS should not be zero, if there is only one block in non-los, i.e. in the former if sentence,
-    if total_free = GC_BLOCK_SIZE_BYTES, then the computed nos_reserve_size is between zero
-    and GC_BLOCK_SIZE_BYTES. In this case, we assign this block to NOS*/
+    *if total_free = GC_BLOCK_SIZE_BYTES, then the computed nos_reserve_size is between zero
+    *and GC_BLOCK_SIZE_BYTES. In this case, we assign this block to NOS*/
   if(nos_reserve_size <= GC_BLOCK_SIZE_BYTES)  nos_reserve_size = GC_BLOCK_SIZE_BYTES;
 
 #ifdef STATIC_NOS_MAPPING
   if(nos_reserve_size > fspace->reserved_heap_size) nos_reserve_size = fspace->reserved_heap_size;
 #endif  
-  //To reserve some MOS space to avoid fallback situation. 
-  //But we need ensure nos has at least one block 
-  //if(new_nos_size > GC_MOS_MIN_EXTRA_REMAIN_SIZE) new_nos_size -= GC_MOS_MIN_EXTRA_REMAIN_SIZE ;
+  /*To reserve some MOS space to avoid fallback situation. 
+   *But we need ensure nos has at least one block */
   POINTER_SIZE_INT reserve_in_mos = GC_MOS_MIN_EXTRA_REMAIN_SIZE;
   while (reserve_in_mos >= GC_BLOCK_SIZE_BYTES){
     if(nos_reserve_size >= reserve_in_mos + GC_BLOCK_SIZE_BYTES){
@@ -328,7 +324,7 @@
   new_nos_size = round_down_to_size((POINTER_SIZE_INT)nos_reserve_size, GC_BLOCK_SIZE_BYTES); 
 
   if(gc->force_gen_mode){
-    new_nos_size = min_nos_size_bytes;//round_down_to_size((unsigned int)(gc->gen_minor_adaptor->adapt_nos_size), SPACE_ALLOC_UNIT);
+    new_nos_size = min_nos_size_bytes;
   }
   
   new_mos_size = total_size - new_nos_size;
@@ -342,7 +338,6 @@
 }
 
 #ifndef STATIC_NOS_MAPPING
-
 void gc_gen_adapt(GC_Gen* gc, int64 pause_time)
 {
   gc_decide_next_collect(gc, pause_time);
@@ -366,8 +361,9 @@
     return;
   
   /* below are ajustment */  
-
-  nos_boundary = (void*)((POINTER_SIZE_INT)gc->heap_end - new_nos_size);
+  POINTER_SIZE_INT curr_heap_commit_end = 
+                             (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_NULL + gc->committed_heap_size;
+  nos_boundary = (void*)(curr_heap_commit_end - new_nos_size);
 
   fspace->heap_start = nos_boundary;
   fspace->blocks = (Block*)nos_boundary;
@@ -394,8 +390,8 @@
   return;
 }
 
-#else /* ifndef STATIC_NOS_MAPPING */
-
+/* ifdef STATIC_NOS_MAPPING */
+#else
 void gc_gen_adapt(GC_Gen* gc, int64 pause_time)
 {
   gc_decide_next_collect(gc, pause_time);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp Thu May 24 01:17:25 2007
@@ -96,7 +96,7 @@
 /* for marking phase termination detection */
 static volatile unsigned int num_finished_collectors = 0;
 
-void fallback_mark_scan_heap(Collector* collector)
+void mark_scan_heap_for_fallback(Collector* collector)
 { 
   GC* gc = collector->gc;
   GC_Metadata* metadata = gc->metadata;
@@ -208,4 +208,5 @@
   fspace_block_iterate_init((Fspace*)((GC_Gen*)collector->gc)->nos);
 }
 #endif
+
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/los_extention_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/los_extention_mark_scan.cpp?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/los_extention_mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/los_extention_mark_scan.cpp Thu May 24 01:17:25 2007
@@ -108,7 +108,7 @@
    So we abondoned this design. We no longer use the repset to remember repointed slots 
 */
   
-void los_adaptation_mark_scan_heap(Collector *collector)
+void mark_scan_heap_for_space_tune(Collector *collector)
 {
   GC* gc = collector->gc;
   GC_Metadata* metadata = gc->metadata;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp Thu May 24 01:17:25 2007
@@ -75,8 +75,7 @@
   mspace->move_object = TRUE;
   mspace->gc = gc;
 
-  /*For_LOS adaptive: The threshold is initiated by half of MOS + NOS commit size.*/
-  mspace->expected_threshold = (unsigned int)( ( (float)mspace->committed_heap_size * (1.f + 1.f / gc->survive_ratio) ) * 0.5f );
+  mspace->expected_threshold_ratio = 0.5f;
 
   gc_set_mos((GC_Gen*)gc, (Space*)mspace);
 
@@ -96,22 +95,13 @@
   mspace->block_iterator = (Block_Header*)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
 }
 
-//For_LOS_extend
 #include "../common/space_tuner.h"
 void mspace_block_iterator_init(Mspace* mspace)
 {
-  GC* gc = mspace->gc;
-  if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS){
-    unsigned int tuning_blocks = (unsigned int)((mspace->gc)->tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT);
-    mspace->block_iterator = (Block_Header*)&(mspace->blocks[tuning_blocks]);
-    return;
-  }
-  
   mspace->block_iterator = (Block_Header*)mspace->blocks;
   return;
 }
 
-
 Block_Header* mspace_block_iterator_get(Mspace* mspace)
 {
   return (Block_Header*)mspace->block_iterator;
@@ -165,15 +155,15 @@
 }
 
 /*For_LOS adaptive.*/
-void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold)
+void mspace_set_expected_threshold_ratio(Mspace* mspace, float threshold_ratio)
 {
-    mspace->expected_threshold = threshold;
+    mspace->expected_threshold_ratio = threshold_ratio;
     return;
 }
 
-POINTER_SIZE_INT mspace_get_expected_threshold(Mspace* mspace)
+float mspace_get_expected_threshold_ratio(Mspace* mspace)
 {
-    return mspace->expected_threshold;
+    return mspace->expected_threshold_ratio;
 }
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h Thu May 24 01:17:25 2007
@@ -56,7 +56,7 @@
   
   volatile Block_Header* block_iterator;    
   /*Threshold computed by NOS adaptive*/
-  POINTER_SIZE_INT expected_threshold;
+  float expected_threshold_ratio;
 }Mspace;
 
 void mspace_initialize(GC* gc, void* reserved_base, POINTER_SIZE_INT mspace_size, POINTER_SIZE_INT commit_size);
@@ -72,6 +72,6 @@
 
 void mspace_fix_after_copy_nursery(Collector* collector, Mspace* mspace);
 
-void mspace_set_expected_threshold(Mspace* mspace, unsigned int threshold);
+void mspace_set_expected_threshold_ratio(Mspace* mspace, float threshold_ratio);
 
 #endif //#ifdef _MSC_SPACE_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp Thu May 24 01:17:25 2007
@@ -29,72 +29,30 @@
 static volatile Block_Header* next_block_for_compact;
 static volatile Block_Header* next_block_for_target;
 
-void mspace_update_info_for_los_extension(Mspace *mspace)
-{ 
-  Space_Tuner *tuner = mspace->gc->tuner;
-  
-  if(tuner->kind != TRANS_FROM_MOS_TO_LOS) return;
-  
-  POINTER_SIZE_INT tune_size = tuner->tuning_size;
-  unsigned int tune_blocks = (unsigned int)(tune_size >> GC_BLOCK_SHIFT_COUNT);
-
-#ifdef USE_32BITS_HASHCODE
-  unsigned int index = 0;
-  for(; index < tune_blocks; index++){
-    Block* curr_block = &mspace->blocks[index];
-    hashcode_buf_destory(((Block_Header*)curr_block)->hashcode_buf);
-  }
-#endif
-
-  mspace->blocks = &mspace->blocks[tune_blocks];
-  mspace->heap_start = mspace->blocks;
-  mspace->committed_heap_size -= tune_size;
-  mspace->reserved_heap_size -= tune_size;
-  mspace->first_block_idx += tune_blocks;
-  mspace->num_managed_blocks -= tune_blocks;
-  mspace->num_total_blocks -= tune_blocks;
-  if(mspace->num_used_blocks > tune_blocks)
-    mspace->num_used_blocks -= tune_blocks;
-  else
-    mspace->num_used_blocks = 0;
-}
-
-void mspace_update_info_for_los_shrink(Mspace* mspace)
+void mspace_update_info_after_space_tuning(Mspace* mspace)
 {
   Space_Tuner *tuner = mspace->gc->tuner;
-  if(tuner->kind != TRANS_FROM_LOS_TO_MOS) return;
-
-  POINTER_SIZE_INT tune_size = tuner->tuning_size;
-  unsigned int tune_blocks = (unsigned int)(tune_size >> GC_BLOCK_SHIFT_COUNT);
-
-  /*Update mspace infomation.*/
-  mspace->blocks = (Block*)((POINTER_SIZE_INT)mspace->blocks - tune_size);
-  mspace->heap_start = (void*)(mspace->blocks);
-  mspace->committed_heap_size += tune_size;
-  mspace->first_block_idx -= tune_blocks;
-  mspace->num_managed_blocks += tune_blocks;
-  mspace->num_total_blocks += tune_blocks;
-}
-
-/*Copy the fake blocks into real blocks, reconnect these new block into main list of mspace.*/
-void mspace_settle_fake_blocks_for_los_shrink(Mspace* mspace)
-{
-  Space_Tuner *tuner = mspace->gc->tuner;  
-  if(tuner->kind != TRANS_FROM_LOS_TO_MOS) return;  
-
   POINTER_SIZE_INT tune_size = tuner->tuning_size;
   unsigned int tune_blocks = (unsigned int)(tune_size >> GC_BLOCK_SHIFT_COUNT);
-
-  Block* blocks = (Block*)((POINTER_SIZE_INT)mspace->blocks - tune_size);
-  unsigned int i;
-  for(i=0; i < tune_blocks; i++){
-    Block_Header* real_block = (Block_Header*)&(blocks[i]);
-    Block_Header* fake_block = &tuner->interim_blocks[i];
-    memcpy((void*)real_block, (void*)fake_block, sizeof(Block_Header));
-    real_block->next = (Block_Header*)((POINTER_SIZE_INT)real_block + GC_BLOCK_SIZE_BYTES);
+  
+  if(tuner->kind == TRANS_FROM_MOS_TO_LOS){
+    mspace->blocks = &mspace->blocks[tune_blocks];
+    mspace->heap_start = mspace->blocks;
+    mspace->committed_heap_size -= tune_size;
+    mspace->reserved_heap_size -= tune_size;
+    mspace->first_block_idx += tune_blocks;
+    mspace->num_managed_blocks -= tune_blocks;
+    mspace->num_total_blocks -= tune_blocks;
+    if(mspace->num_used_blocks > tune_blocks) mspace->num_used_blocks -= tune_blocks;
+    else mspace->num_used_blocks = 0;
+  }else if(tuner->kind == TRANS_FROM_LOS_TO_MOS){
+    mspace->blocks = (Block*)((POINTER_SIZE_INT)mspace->blocks - tune_size);
+    mspace->heap_start = (void*)(mspace->blocks);
+    mspace->committed_heap_size += tune_size;
+    mspace->first_block_idx -= tune_blocks;
+    mspace->num_managed_blocks += tune_blocks;
+    mspace->num_total_blocks += tune_blocks;
   }
-
-  return;
 }
 
 void mspace_reset_after_compaction(Mspace* mspace)
@@ -158,6 +116,9 @@
   Block_Header* block;
   Space_Tuner* tuner = gc->tuner;
   Block_Header* nos_last_block;
+  Block_Header* mos_first_block = (Block_Header*)&mspace->blocks[0];  
+  unsigned int trans_blocks = (unsigned int)(tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT);  
+  
   /*Needn't change LOS size.*/
   if(tuner->kind == TRANS_NOTHING){
     for(i=0; i<gc->num_active_collectors; i++){
@@ -182,8 +143,7 @@
     else
       /*If nos->num_managed_blocks is zero, we take mos_last_block as nos_last_block instead.*/
       nos_last_block = (Block_Header*)&mspace->blocks[mspace->num_managed_blocks - 1];
-    Block_Header* mos_first_block = (Block_Header*)&mspace->blocks[0];
-    unsigned int trans_blocks = (unsigned int)(tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT);
+
     nos_last_block->next = mos_first_block;
     ((Block_Header*)&(mspace->blocks[trans_blocks - 1]))->next = NULL;
     
@@ -201,39 +161,17 @@
     return;
   }else
   {
-    Block_Header* mos_first_block = (Block_Header*)&mspace->blocks[0];
-    unsigned int trans_blocks = (unsigned int)(tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT);
-    gc->tuner->interim_blocks = (Block_Header*)STD_MALLOC(trans_blocks * sizeof(Block_Header));
-    Block_Header* los_trans_fake_blocks = gc->tuner->interim_blocks;
-    memset(los_trans_fake_blocks, 0, trans_blocks * sizeof(Block_Header));
-    void* trans_base = (void*)((POINTER_SIZE_INT)mos_first_block - tuner->tuning_size);
-    unsigned int start_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, trans_base);
-    Block_Header* last_block = los_trans_fake_blocks;
-
-    for(i = 0; i < trans_blocks; i ++){
-        Block_Header* curr_block = &los_trans_fake_blocks[i];
-        curr_block->block_idx = start_idx + i;
-        curr_block->base = (void*)((POINTER_SIZE_INT)trans_base + i * GC_BLOCK_SIZE_BYTES + GC_BLOCK_HEADER_SIZE_BYTES);
-        curr_block->free = curr_block->base ;
-        curr_block->new_free = curr_block->free;
-        curr_block->ceiling = (void*)((POINTER_SIZE_INT)curr_block->base + GC_BLOCK_BODY_SIZE_BYTES);
-        curr_block->status = BLOCK_COMPACTED;
-#ifdef USE_32BITS_HASHCODE
-        curr_block->hashcode_buf = hashcode_buf_create();
-#endif
-        last_block->next = curr_block;
-        last_block = curr_block;
-    }
-    last_block->next = mos_first_block;
+    gc_space_tuner_init_fake_blocks_for_los_shrink(gc);
 
     Collector* collector = gc->collectors[0];
-    collector->cur_target_block = los_trans_fake_blocks;
+    collector->cur_target_block = tuner->interim_blocks;
     collector->cur_target_block->status = BLOCK_TARGET;
+
     if(trans_blocks >= gc->num_active_collectors)
       collector->cur_compact_block = mos_first_block;
     else
-      collector->cur_compact_block = los_trans_fake_blocks;
-            
+      collector->cur_compact_block = gc->tuner->interim_blocks;
+
     collector->cur_compact_block->status = BLOCK_IN_COMPACT;
     
     for(i=1; i< gc->num_active_collectors; i++){

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h Thu May 24 01:17:25 2007
@@ -28,9 +28,7 @@
 void gc_reset_block_for_collectors(GC* gc, Mspace* mspace);
 void gc_init_block_for_collectors(GC* gc, Mspace* mspace);
 
-void mspace_update_info_for_los_extension(Mspace* mspace);
-void mspace_update_info_for_los_shrink(Mspace* mspace);
-void mspace_settle_fake_blocks_for_los_shrink(Mspace* mspace);
+void mspace_update_info_after_space_tuning(Mspace* mspace);
 void mspace_reset_after_compaction(Mspace* mspace);
 
 Block_Header* mspace_get_first_compact_block(Mspace* mspace);
@@ -41,8 +39,8 @@
 void slide_compact_mspace(Collector* collector);
 void move_compact_mspace(Collector* collector);
 
-void fallback_mark_scan_heap(Collector* collector);
-void los_adaptation_mark_scan_heap(Collector *collector);
+void mark_scan_heap_for_fallback(Collector* collector);
+void mark_scan_heap_for_space_tune(Collector *collector);
 
 void mspace_extend_compact(Collector *collector);
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp?view=diff&rev=541224&r1=541223&r2=541224
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp Thu May 24 01:17:25 2007
@@ -170,7 +170,7 @@
   if(!gc_match_kind(gc, FALLBACK_COLLECTION))
        mark_scan_heap(collector);  
   else
-       fallback_mark_scan_heap(collector);
+       mark_scan_heap_for_fallback(collector);
 
   old_num = atomic_inc32(&num_marking_collectors);
   if( ++old_num == num_active_collectors ){
@@ -225,7 +225,6 @@
     /* last collector's world here */
     lspace_fix_repointed_refs(collector, lspace);   
     gc_fix_rootset(collector);
-    mspace_update_info_for_los_extension(mspace);
     num_fixing_collectors++; 
   }
   while(num_fixing_collectors != num_active_collectors + 1);