You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by wj...@apache.org on 2007/01/11 14:57:19 UTC

svn commit: r495225 [1/5] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen: javasrc/org/apache/harmony/drlvm/gc_gen/ src/common/ src/finalizer_weakref/ src/gen/ src/jni/ src/mark_compact/ src/mark_sweep/ src/thread/ src/trace_forward/ src/utils/ src/verify/

Author: wjwashburn
Date: Thu Jan 11 05:57:16 2007
New Revision: 495225

URL: http://svn.apache.org/viewvc?view=rev&rev=495225
Log:
Harmony 2945, Harmony 2965
parallel generation and non-generational collectors added
parallel moving compactor added
passes "build test" on winxp and liux rhel4 w/ gcc 4.0.2


Added:
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_space.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan_pool.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/space_tuner.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_adapt.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_natives.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/java_support.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp
Modified:
    harmony/enhanced/drlvm/trunk/vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/interior_pointer.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/interior_pointer.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/mark_scan.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gc_for_barrier.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/jni/helper.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/free_area_pool.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/gc_thread.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bidir_list.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/bit_ops.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_pool.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/sync_stack.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/utils/vector_block.h
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.h

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java Thu Jan 11 05:57:16 2007
@@ -26,12 +26,14 @@
 
 public class GCHelper {
 
-    static {System.loadLibrary("gc_gen");}
+    static {
+      System.loadLibrary("gc_gen");
+      helperCallback();
+    }
 
     public static final int TLS_GC_OFFSET = TLSGCOffset();
 
-    public static Address alloc(int objSize, int allocationHandle)   throws InlinePragma {
-  
+    public static Address alloc(int objSize, int allocationHandle) throws InlinePragma {
         Address TLS_BASE = VMHelper.getTlsBaseAddress();
 
         Address allocator_addr = TLS_BASE.plus(TLS_GC_OFFSET);
@@ -67,29 +69,28 @@
         return VMHelper.newVectorUsingAllocHandle(arrayLen, elemSize, allocationHandle);
     }
 
-
-
     /** NOS (nursery object space) is higher in address than other spaces.
        The boundary currently is produced in GC initialization. It can
        be a constant in future.
     */
-    public static final int NOS_BOUNDARY = getNosBoundary();
 
-    public static void write_barrier_slot_rem(Address p_objBase, Address p_objSlot, Address p_source)  throws InlinePragma {
+    public static final int NOS_BOUNDARY = getNosBoundary();
+    public static boolean GEN_MODE = getGenMode();
 
+    public static void write_barrier_slot_rem(Address p_objBase, Address p_objSlot, Address p_target)  throws InlinePragma {
+      
        /* If the slot is in NOS or the target is not in NOS, we simply return*/
-        if(p_objSlot.toInt() >= NOS_BOUNDARY || p_source.toInt() < NOS_BOUNDARY) {
-            p_objSlot.store(p_source);
+        if(p_objSlot.toInt() >= NOS_BOUNDARY || p_target.toInt() < NOS_BOUNDARY || !GEN_MODE) {
+            p_objSlot.store(p_target);
             return;
         }
 
-        /* Otherwise, we need remember it in native code. */
-        VMHelper.writeBarrier(p_objBase, p_objSlot, p_source);
+        VMHelper.writeBarrier(p_objBase, p_objSlot, p_target);
     }
 
-
-    private static native int getNosBoundary();
-    
+    private static native int helperCallback();
+    private static native boolean getGenMode(); 
+    private static native int getNosBoundary();    
     private static native int TLSGCOffset();
 }
 

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h?view=auto&rev=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/fix_repointed_refs.h Thu Jan 11 05:57:16 2007
@@ -0,0 +1,119 @@
+/*
+ *  Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/12/12
+ */
+ 
+#ifndef _FIX_REPOINTED_REFS_H_
+#define _FIX_REPOINTED_REFS_H_
+
+#include "gc_common.h"
+extern Boolean IS_MOVE_COMPACT;
+
+inline void slot_fix(Partial_Reveal_Object** p_ref)
+{
+  Partial_Reveal_Object* p_obj = *p_ref;
+  if(!p_obj) return;
+
+  if(IS_MOVE_COMPACT){
+    if(obj_is_moved(p_obj))
+      *p_ref = obj_get_fw_in_table(p_obj);
+  }else{
+    if(obj_is_fw_in_oi(p_obj) && obj_is_moved(p_obj)){
+      /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens.
+       * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time.
+       * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted,
+       * for whose which can be scanned in MOS & NOS must have been set fw bit in oi.
+       */
+      assert((unsigned int)obj_get_fw_in_oi(p_obj) > DUAL_MARKBITS);
+      *p_ref = obj_get_fw_in_oi(p_obj);
+    }
+  }
+    
+  return;
+}
+
+inline void object_fix_ref_slots(Partial_Reveal_Object* p_obj)
+{
+  if( !object_has_ref_field(p_obj) ) return;
+  
+    /* scan array object */
+  if (object_is_array(p_obj)) {
+    Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj;
+    assert(!obj_is_primitive_array(p_obj));
+    
+    int32 array_length = array->array_len;
+    Partial_Reveal_Object** p_refs = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array));
+    for (int i = 0; i < array_length; i++) {
+      slot_fix(p_refs + i);
+    }   
+    return;
+  }
+
+  /* scan non-array object */
+  int *offset_scanner = init_object_scanner(p_obj);
+  while (true) {
+    Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
+    if (p_ref == NULL) break; /* terminating ref slot */
+  
+    slot_fix(p_ref);
+    offset_scanner = offset_next_ref(offset_scanner);
+  }
+
+  return;
+}
+
+inline void block_fix_ref_after_copying(Block_Header* curr_block)
+{
+  unsigned int cur_obj = (unsigned int)curr_block->base;
+  unsigned int block_end = (unsigned int)curr_block->free;
+  while(cur_obj < block_end){
+    object_fix_ref_slots((Partial_Reveal_Object*)cur_obj);   
+    cur_obj = (unsigned int)cur_obj + vm_object_size((Partial_Reveal_Object*)cur_obj);
+  }
+  return;
+}
+
+inline void block_fix_ref_after_marking(Block_Header* curr_block)
+{
+  void* start_pos;
+  Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos);
+  
+  while( p_obj ){
+    assert( obj_is_marked_in_vt(p_obj));
+    obj_unmark_in_vt(p_obj);
+    object_fix_ref_slots(p_obj);   
+    p_obj = block_get_next_marked_object(curr_block, &start_pos);  
+  }
+  return;
+}
+
+inline void block_fix_ref_after_repointing(Block_Header* curr_block)
+{
+  void* start_pos;
+  Partial_Reveal_Object* p_obj = block_get_first_marked_obj_after_prefetch(curr_block, &start_pos);
+  
+  while( p_obj ){
+    assert( obj_is_marked_in_vt(p_obj));
+    object_fix_ref_slots(p_obj);   
+    p_obj = block_get_next_marked_obj_after_prefetch(curr_block, &start_pos);  
+  }
+  return;
+}
+
+
+#endif /* #ifndef _FIX_REPOINTED_REFS_H_ */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_block.h Thu Jan 11 05:57:16 2007
@@ -21,7 +21,9 @@
 #ifndef _BLOCK_H_
 #define _BLOCK_H_
 
-#include "../common/gc_common.h"
+#include "gc_common.h"
+
+#define SYSTEM_ALLOC_UNIT 0x10000
 
 #define GC_BLOCK_SHIFT_COUNT 15
 #define GC_BLOCK_SIZE_BYTES (1 << GC_BLOCK_SHIFT_COUNT)
@@ -33,17 +35,22 @@
   BLOCK_USED = 0x4,
   BLOCK_IN_COMPACT = 0x8,
   BLOCK_COMPACTED = 0x10,
-  BLOCK_TARGET = 0x20
+  BLOCK_TARGET = 0x20,
+  BLOCK_DEST = 0x40
 };
 
 typedef struct Block_Header {
   void* base;                       
   void* free;                       
   void* ceiling;                    
+  void* new_free; /* used only during compaction */
   unsigned int block_idx;           
   volatile unsigned int status;
+  volatile unsigned int dest_counter;
+  Partial_Reveal_Object* src;
+  Partial_Reveal_Object* next_src;
   Block_Header* next;
-  unsigned int mark_table[1];  /* entry num == MARKBIT_TABLE_SIZE_WORDS */
+  unsigned int table[1]; /* entry num == OFFSET_TABLE_SIZE_WORDS */
 }Block_Header;
 
 typedef union Block{
@@ -51,17 +58,22 @@
     unsigned char raw_bytes[GC_BLOCK_SIZE_BYTES];
 }Block;
 
-#define GC_BLOCK_HEADER_VARS_SIZE_BYTES (unsigned int)&(((Block_Header*)0)->mark_table)
+#define GC_BLOCK_HEADER_VARS_SIZE_BYTES (unsigned int)&(((Block_Header*)0)->table)
 
-/* BlockSize - MarkbitTable*32 = HeaderVars + MarkbitTable
-   => MarkbitTable = (BlockSize - HeaderVars)/33 */
-#define MARKBIT_TABLE_COMPUTE_DIVISOR 33
-/* +1 to round up*/
-#define MARKBIT_TABLE_COMPUTED_SIZE_BYTE ((GC_BLOCK_SIZE_BYTES-GC_BLOCK_HEADER_VARS_SIZE_BYTES)/MARKBIT_TABLE_COMPUTE_DIVISOR + 1)
-#define MARKBIT_TABLE_SIZE_BYTES ((MARKBIT_TABLE_COMPUTED_SIZE_BYTE + MASK_OF_BYTES_PER_WORD)&~MASK_OF_BYTES_PER_WORD)
-#define MARKBIT_TABLE_SIZE_WORDS (MARKBIT_TABLE_SIZE_BYTES >> BIT_SHIFT_TO_BYTES_PER_WORD)
+#define SECTOR_SIZE_SHIFT_COUNT  8
+#define SECTOR_SIZE_BYTES        (1 << SECTOR_SIZE_SHIFT_COUNT)
+#define SECTOR_SIZE_WORDS        (SECTOR_SIZE_BYTES >> BIT_SHIFT_TO_BYTES_PER_WORD)
+/* one offset_table word maps to one SECTOR_SIZE_WORDS sector */
+
+/* BlockSize - OffsetTableSize*SECTOR_SIZE_WORDS = HeaderVarsSize + OffsetTableSize
+   => OffsetTableSize = (BlockSize - HeaderVars)/(SECTOR_SIZE_WORDS+1) */
+#define OFFSET_TABLE_COMPUTE_DIVISOR       (SECTOR_SIZE_WORDS + 1)
+#define OFFSET_TABLE_COMPUTED_SIZE_BYTE ((GC_BLOCK_SIZE_BYTES-GC_BLOCK_HEADER_VARS_SIZE_BYTES)/OFFSET_TABLE_COMPUTE_DIVISOR + 1)
+#define OFFSET_TABLE_SIZE_BYTES ((OFFSET_TABLE_COMPUTED_SIZE_BYTE + MASK_OF_BYTES_PER_WORD)&~MASK_OF_BYTES_PER_WORD)
+#define OFFSET_TABLE_SIZE_WORDS (OFFSET_TABLE_SIZE_BYTES >> BIT_SHIFT_TO_BYTES_PER_WORD)
+#define OBJECT_INDEX_TO_OFFSET_TABLE(p_obj)   (ADDRESS_OFFSET_IN_BLOCK_BODY(p_obj) >> SECTOR_SIZE_SHIFT_COUNT)
 
-#define GC_BLOCK_HEADER_SIZE_BYTES (MARKBIT_TABLE_SIZE_BYTES + GC_BLOCK_HEADER_VARS_SIZE_BYTES)
+#define GC_BLOCK_HEADER_SIZE_BYTES (OFFSET_TABLE_SIZE_BYTES + GC_BLOCK_HEADER_VARS_SIZE_BYTES)
 #define GC_BLOCK_BODY_SIZE_BYTES (GC_BLOCK_SIZE_BYTES - GC_BLOCK_HEADER_SIZE_BYTES)
 #define GC_BLOCK_BODY(block) ((void*)((unsigned int)(block) + GC_BLOCK_HEADER_SIZE_BYTES))
 #define GC_BLOCK_END(block) ((void*)((unsigned int)(block) + GC_BLOCK_SIZE_BYTES))
@@ -75,122 +87,168 @@
 #define ADDRESS_OFFSET_TO_BLOCK_HEADER(addr) ((unsigned int)((unsigned int)addr&GC_BLOCK_LOW_MASK))
 #define ADDRESS_OFFSET_IN_BLOCK_BODY(addr) ((unsigned int)(ADDRESS_OFFSET_TO_BLOCK_HEADER(addr)- GC_BLOCK_HEADER_SIZE_BYTES))
 
-#define OBJECT_BIT_INDEX_TO_MARKBIT_TABLE(p_obj)    (ADDRESS_OFFSET_IN_BLOCK_BODY(p_obj) >> 2)
-#define OBJECT_WORD_INDEX_TO_MARKBIT_TABLE(p_obj)   (OBJECT_BIT_INDEX_TO_MARKBIT_TABLE(p_obj) >> BIT_SHIFT_TO_BITS_PER_WORD)
-#define OBJECT_WORD_OFFSET_IN_MARKBIT_TABLE(p_obj)  (OBJECT_BIT_INDEX_TO_MARKBIT_TABLE(p_obj) & BIT_MASK_TO_BITS_PER_WORD)
-
-inline Partial_Reveal_Object* block_get_first_marked_object(Block_Header* block, unsigned int* mark_bit_idx)
-{
-  unsigned int* mark_table = block->mark_table;
-  unsigned int* table_end = mark_table + MARKBIT_TABLE_SIZE_WORDS;
-  
-  unsigned j=0;
-  unsigned int k=0;
-  while( (mark_table + j) < table_end){
-    unsigned int markbits = *(mark_table+j);
-    if(!markbits){ j++; continue; }
-    while(k<32){
-        if( !(markbits& (1<<k)) ){ k++; continue;}
-        unsigned int word_index = (j<<BIT_SHIFT_TO_BITS_PER_WORD) + k;
-        Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)((unsigned int*)GC_BLOCK_BODY(block) + word_index);
-        assert(obj_is_marked_in_vt(p_obj)); 
-        
-        *mark_bit_idx = word_index;
-      return p_obj;
-    }
-    j++;
-    k=0;
-  }          
-  *mark_bit_idx = 0;
-  return NULL;   
-}
-
-inline Partial_Reveal_Object* block_get_next_marked_object(Block_Header* block, unsigned int* mark_bit_idx)
-{
-  unsigned int* mark_table = block->mark_table;
-  unsigned int* table_end = mark_table + MARKBIT_TABLE_SIZE_WORDS;
-  unsigned int bit_index = *mark_bit_idx;
-  
-  unsigned int j = bit_index >> BIT_SHIFT_TO_BITS_PER_WORD;
-  unsigned int k = (bit_index & BIT_MASK_TO_BITS_PER_WORD) + 1;  
-     
-  while( (mark_table + j) < table_end){
-    unsigned int markbits = *(mark_table+j);
-    if(!markbits){ j++; continue; }
-    while(k<32){
-      if( !(markbits& (1<<k)) ){ k++; continue;}
-      
-      unsigned int word_index = (j<<BIT_SHIFT_TO_BITS_PER_WORD) + k;
-      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)((unsigned int*)GC_BLOCK_BODY(block) + word_index);      
-      assert(obj_is_marked_in_vt(p_obj));
-      
-      *mark_bit_idx = word_index;
-      return p_obj;
-    }
-    j++;
-    k=0;
-  }        
-  
-  *mark_bit_idx = 0;
-  return NULL;   
+inline void block_init(Block_Header* block)
+{
+  block->free = (void*)((unsigned int)block + GC_BLOCK_HEADER_SIZE_BYTES);
+  block->ceiling = (void*)((unsigned int)block + GC_BLOCK_SIZE_BYTES); 
+  block->base = block->free;
+  block->new_free = block->free;
+  block->status = BLOCK_FREE;
+  block->dest_counter = 0;
+  block->src = NULL;
+  block->next_src = NULL;
+}
 
+inline Partial_Reveal_Object *obj_end(Partial_Reveal_Object *obj)
+{
+  return (Partial_Reveal_Object *)((unsigned int)obj + vm_object_size(obj));
 }
 
-inline void block_clear_mark_table(Block_Header* block)
+inline Partial_Reveal_Object *next_marked_obj_in_block(Partial_Reveal_Object *cur_obj, Partial_Reveal_Object *block_end)
 {
-  unsigned int* mark_table = block->mark_table;
-  memset(mark_table, 0, MARKBIT_TABLE_SIZE_BYTES);
-  return;
+  while(cur_obj < block_end){
+    if( obj_is_marked_in_vt(cur_obj))
+      return cur_obj;
+    cur_obj = obj_end(cur_obj);
+  }
+  
+  return NULL;
+}
+
+inline Partial_Reveal_Object* block_get_first_marked_object(Block_Header* block, void** start_pos)
+{
+  Partial_Reveal_Object* cur_obj = (Partial_Reveal_Object*)block->base;
+  Partial_Reveal_Object* block_end = (Partial_Reveal_Object*)block->free;
+
+  Partial_Reveal_Object* first_marked_obj = next_marked_obj_in_block(cur_obj, block_end);
+  if(!first_marked_obj)
+    return NULL;
+  
+  *start_pos = obj_end(first_marked_obj);
+  
+  return first_marked_obj;
+}
+
+inline Partial_Reveal_Object* block_get_next_marked_object(Block_Header* block, void** start_pos)
+{
+  Partial_Reveal_Object* cur_obj = *(Partial_Reveal_Object**)start_pos;
+  Partial_Reveal_Object* block_end = (Partial_Reveal_Object*)block->free;
+
+  Partial_Reveal_Object* next_marked_obj = next_marked_obj_in_block(cur_obj, block_end);
+  if(!next_marked_obj)
+    return NULL;
+  
+  *start_pos = obj_end(next_marked_obj);
+  
+  return next_marked_obj;
+}
+
+inline Partial_Reveal_Object *block_get_first_marked_obj_prefetch_next(Block_Header *block, void **start_pos)
+{
+  Partial_Reveal_Object *cur_obj = (Partial_Reveal_Object *)block->base;
+  Partial_Reveal_Object *block_end = (Partial_Reveal_Object *)block->free;
+  
+  Partial_Reveal_Object *first_marked_obj = next_marked_obj_in_block(cur_obj, block_end);
+  if(!first_marked_obj)
+    return NULL;
+  
+  Partial_Reveal_Object *next_obj = obj_end(first_marked_obj);
+  *start_pos = next_obj;
+  
+  if(next_obj >= block_end)
+    return first_marked_obj;
+  
+  Partial_Reveal_Object *next_marked_obj = next_marked_obj_in_block(next_obj, block_end);
+  
+  if(next_marked_obj){
+    if(next_marked_obj != next_obj)
+      set_obj_info(next_obj, (Obj_Info_Type)next_marked_obj);
+  } else {
+    set_obj_info(next_obj, 0);
+  }
+  
+  return first_marked_obj;
+}
+
+inline Partial_Reveal_Object *block_get_first_marked_obj_after_prefetch(Block_Header *block, void **start_pos)
+{
+  return block_get_first_marked_object(block, start_pos);
+}
+
+inline Partial_Reveal_Object *block_get_next_marked_obj_prefetch_next(Block_Header *block, void **start_pos)
+{
+  Partial_Reveal_Object *cur_obj = *(Partial_Reveal_Object **)start_pos;
+  Partial_Reveal_Object *block_end = (Partial_Reveal_Object *)block->free;
+
+  if(cur_obj >= block_end)
+    return NULL;
+  
+  Partial_Reveal_Object *cur_marked_obj;
+  
+  if(obj_is_marked_in_vt(cur_obj))
+    cur_marked_obj = cur_obj;
+  else
+    cur_marked_obj = (Partial_Reveal_Object *)get_obj_info_raw(cur_obj);
+  
+  if(!cur_marked_obj)
+    return NULL;
+  
+  Partial_Reveal_Object *next_obj = obj_end(cur_marked_obj);
+  *start_pos = next_obj;
+  
+  if(next_obj >= block_end)
+    return cur_marked_obj;
+  
+  Partial_Reveal_Object *next_marked_obj = next_marked_obj_in_block(next_obj, block_end);
+  
+  if(next_marked_obj){
+    if(next_marked_obj != next_obj)
+      set_obj_info(next_obj, (Obj_Info_Type)next_marked_obj);
+  } else {
+    set_obj_info(next_obj, 0);
+  }
+  
+  return cur_marked_obj;  
 }
 
-inline void block_clear_markbits(Block_Header* block)
+inline Partial_Reveal_Object *block_get_next_marked_obj_after_prefetch(Block_Header *block, void **start_pos)
 {
-  unsigned int* mark_table = block->mark_table;
-  unsigned int* table_end = mark_table + MARKBIT_TABLE_SIZE_WORDS;
+  Partial_Reveal_Object *cur_obj = *(Partial_Reveal_Object **)start_pos;
+  Partial_Reveal_Object *block_end = (Partial_Reveal_Object *)block->free;
+
+  if(cur_obj >= block_end)
+    return NULL;
+  
+  Partial_Reveal_Object *cur_marked_obj;
+  
+  if(obj_is_marked_in_vt(cur_obj) || obj_is_fw_in_oi(cur_obj))
+    cur_marked_obj = cur_obj;
+  else
+    cur_marked_obj = (Partial_Reveal_Object *)get_obj_info_raw(cur_obj);
+  
+  if(!cur_marked_obj)
+    return NULL;
   
-  unsigned j=0;
-  while( (mark_table + j) < table_end){
-    unsigned int markbits = *(mark_table+j);
-    if(!markbits){ j++; continue; }
-    unsigned int k=0;
-    while(k<32){
-        if( !(markbits& (1<<k)) ){ k++; continue;}
-        unsigned int word_index = (j<<BIT_SHIFT_TO_BITS_PER_WORD) + k;
-        Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)((unsigned int*)GC_BLOCK_BODY(block) + word_index);
-        assert(obj_is_marked_in_vt(p_obj));
-        obj_unmark_in_vt(p_obj);
-        k++;
-    }
-    j++;
-  } 
-
-  block_clear_mark_table(block);
-  return;     
-}
-
-typedef struct Blocked_Space {
-  /* <-- first couple of fields are overloadded as Space */
-  void* heap_start;
-  void* heap_end;
-  unsigned int reserved_heap_size;
-  unsigned int committed_heap_size;
-  unsigned int num_collections;
-  GC* gc;
-  Boolean move_object;
-  Boolean (*mark_object_func)(Space* space, Partial_Reveal_Object* p_obj);
-  /* END of Space --> */
-
-  Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
-  
-  /* FIXME:: the block indices should be replaced with block header addresses */
-  unsigned int first_block_idx;
-  unsigned int ceiling_block_idx;
-  volatile unsigned int free_block_idx;
-  
-  unsigned int num_used_blocks;
-  unsigned int num_managed_blocks;
-  unsigned int num_total_blocks;
-  /* END of Blocked_Space --> */
-}Blocked_Space;
+  Partial_Reveal_Object *next_obj = obj_end(cur_marked_obj);
+  *start_pos = next_obj;
+  
+  return cur_marked_obj;
+}
+
+inline Partial_Reveal_Object * obj_get_fw_in_table(Partial_Reveal_Object *p_obj)
+{
+  /* only for inter-sector compaction */
+  unsigned int index    = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj);
+  Block_Header *curr_block = GC_BLOCK_HEADER(p_obj);
+  return (Partial_Reveal_Object *)(((unsigned int)p_obj) - curr_block->table[index]);
+}
+
+inline void block_clear_table(Block_Header* block)
+{
+  unsigned int* table = block->table;
+  memset(table, 0, OFFSET_TABLE_SIZE_BYTES);
+  return;
+}
+
 
 #endif //#ifndef _BLOCK_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.cpp Thu Jan 11 05:57:16 2007
@@ -21,14 +21,26 @@
 #include "gc_common.h"
 #include "gc_metadata.h"
 #include "../thread/mutator.h"
-#include "../verify/verify_live_heap.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
+#include "../gen/gen.h"
+#include "../common/space_tuner.h"
+#include "interior_pointer.h"
+
+unsigned int Cur_Mark_Bit = 0x1;
+unsigned int Cur_Forward_Bit = 0x2;
 
-extern Boolean NEED_BARRIER;
-extern unsigned int NUM_COLLECTORS;
 extern Boolean GC_VERIFY;
+
 extern unsigned int NOS_SIZE;
-extern Boolean NOS_PARTIAL_FORWARD;
+extern unsigned int MIN_NOS_SIZE;
+
+extern Boolean FORCE_FULL_COMPACT;
+extern Boolean MINOR_ALGORITHM;
+extern Boolean MAJOR_ALGORITHM;
+
+extern unsigned int NUM_COLLECTORS;
+extern unsigned int MINOR_COLLECTORS;
+extern unsigned int MAJOR_COLLECTORS;
 
 unsigned int HEAP_SIZE_DEFAULT = 256 * MB;
 unsigned int min_heap_size_bytes = 32 * MB;
@@ -105,7 +117,7 @@
   return res;
 }
 
-void gc_parse_options() 
+void gc_parse_options(GC* gc) 
 {
   unsigned int max_heap_size = HEAP_SIZE_DEFAULT;
   unsigned int min_heap_size = min_heap_size_bytes;
@@ -138,62 +150,128 @@
     NOS_SIZE = get_size_property("gc.nos_size");
   }
 
+  if (is_property_set("gc.min_nos_size", VM_PROPERTIES) == 1) {
+    MIN_NOS_SIZE = get_size_property("gc.min_nos_size");
+  }
+
   if (is_property_set("gc.num_collectors", VM_PROPERTIES) == 1) {
     unsigned int num = get_int_property("gc.num_collectors");
     NUM_COLLECTORS = (num==0)? NUM_COLLECTORS:num;
   }
 
-  if (is_property_set("gc.gen_mode", VM_PROPERTIES) == 1) {
-    NEED_BARRIER = get_boolean_property("gc.gen_mode");
+  /* GC algorithm decision */
+  /* Step 1: */
+  char* minor_algo = NULL;
+  char* major_algo = NULL;
+  
+  if (is_property_set("gc.minor_algorithm", VM_PROPERTIES) == 1) {
+    minor_algo = get_property("gc.minor_algorithm", VM_PROPERTIES);
+  }
+  
+  if (is_property_set("gc.major_algorithm", VM_PROPERTIES) == 1) {
+    major_algo = get_property("gc.major_algorithm", VM_PROPERTIES);
+  }
+  
+  gc_decide_collection_algorithm((GC_Gen*)gc, minor_algo, major_algo);
+  gc->generate_barrier = gc_is_gen_mode();
+
+  if( minor_algo) destroy_property_value(minor_algo);
+  if( major_algo) destroy_property_value(major_algo);
+
+  /* Step 2: */
+  /* NOTE:: this has to stay after above!! */
+  if (is_property_set("gc.force_major_collect", VM_PROPERTIES) == 1) {
+    FORCE_FULL_COMPACT = get_boolean_property("gc.force_major_collect");
+    if(FORCE_FULL_COMPACT){
+      gc_disable_gen_mode();
+      gc->generate_barrier = FALSE;
+    }
+  }
+
+  /* Step 3: */
+  /* NOTE:: this has to stay after above!! */
+  if (is_property_set("gc.generate_barrier", VM_PROPERTIES) == 1) {
+    Boolean generate_barrier = get_boolean_property("gc.generate_barrier");
+    gc->generate_barrier = generate_barrier || gc->generate_barrier;
   }
 
   if (is_property_set("gc.nos_partial_forward", VM_PROPERTIES) == 1) {
     NOS_PARTIAL_FORWARD = get_boolean_property("gc.nos_partial_forward");
   }
+    
+  if (is_property_set("gc.minor_collectors", VM_PROPERTIES) == 1) {
+    MINOR_COLLECTORS = get_int_property("gc.minor_collectors");
+  }
+
+  if (is_property_set("gc.major_collectors", VM_PROPERTIES) == 1) {
+    MAJOR_COLLECTORS = get_int_property("gc.major_collectors");
+  }
+
+  if (is_property_set("gc.ignore_finref", VM_PROPERTIES) == 1) {
+    IGNORE_FINREF = get_boolean_property("gc.ignore_finref");
+  }
 
   if (is_property_set("gc.verify", VM_PROPERTIES) == 1) {
     GC_VERIFY = get_boolean_property("gc.verify");
   }
   
-  return;  
+  return;
 }
 
-struct GC_Gen;
-void gc_gen_reclaim_heap(GC_Gen* gc);
-unsigned int gc_decide_collection_kind(GC_Gen* gc, unsigned int gc_cause);
+void gc_copy_interior_pointer_table_to_rootset();
 
 void gc_reclaim_heap(GC* gc, unsigned int gc_cause)
 { 
-  gc->num_collections++;
+  /* FIXME:: before mutators suspended, the ops below should be very careful
+     to avoid racing with mutators. */
+  gc->num_collections++;  
 
-  gc->collect_kind = gc_decide_collection_kind((GC_Gen*)gc, gc_cause);
-  //gc->collect_kind = MAJOR_COLLECTION;
+  gc_decide_collection_kind((GC_Gen*)gc, gc_cause);
 
+
+  //For_LOS_extend!
+  gc_space_tune(gc, gc_cause);
+
+
+#ifdef MARK_BIT_FLIPPING
+  if(gc->collect_kind == MINOR_COLLECTION)
+    mark_bit_flip();
+#endif
+  
   gc_metadata_verify(gc, TRUE);
-  gc_finalizer_weakref_metadata_verify((GC*)gc, TRUE);
+#ifndef BUILD_IN_REFERENT
+  gc_finref_metadata_verify((GC*)gc, TRUE);
+#endif
   
   /* Stop the threads and collect the roots. */
   gc_reset_rootset(gc);  
   vm_enumerate_root_set_all_threads();
+  gc_copy_interior_pointer_table_to_rootset();
   gc_set_rootset(gc); 
   
-  gc_set_objects_with_finalizer(gc);
-  
-  if(verify_live_heap) gc_verify_heap(gc, TRUE);
+  /* this has to be done after all mutators are suspended */
+  gc_reset_mutator_context(gc);
 
-  gc_gen_reclaim_heap((GC_Gen*)gc);  
-  
-  if(verify_live_heap) gc_verify_heap(gc, FALSE);
+  if(!IGNORE_FINREF )
+    gc_set_obj_with_fin(gc);
   
+  gc_gen_reclaim_heap((GC_Gen*)gc);
+  gc_reset_interior_pointer_table();
+    
   gc_metadata_verify(gc, FALSE);
-  gc_finalizer_weakref_metadata_verify(gc, FALSE);
+
+  if(gc_is_gen_mode())
+    gc_prepare_mutator_remset(gc);
   
-  gc_reset_finalizer_weakref_metadata(gc);
-  gc_reset_mutator_context(gc);
+  if(!IGNORE_FINREF ){
+    gc_reset_finref_metadata(gc);
+    gc_activate_finref_threads((GC*)gc);
+  }
+
+  //For_LOS_extend!
+  gc_space_tuner_reset(gc);
   
-  gc_activate_finalizer_weakref_threads((GC*)gc);
   vm_resume_threads_after();
-
   return;
 }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_common.h Thu Jan 11 05:57:16 2007
@@ -21,9 +21,6 @@
 #ifndef _GC_COMMON_H_
 #define _GC_COMMON_H_
 
-#include <assert.h>
-#include <map>
-
 #include "port_vmem.h"
 
 #include "platform_lowlevel.h"
@@ -37,17 +34,18 @@
 #include "gc_for_class.h"
 #include "gc_platform.h"
 
+#include "../gen/gc_for_barrier.h"
+
 #define null 0
 
-#define MB  1048576
-#define KB  1024
+#define KB  (1<<10)
+#define MB  (1<<20)
 
 #define BYTES_PER_WORD 4
 #define BITS_PER_BYTE 8 
 #define BITS_PER_WORD 32
 
 #define MASK_OF_BYTES_PER_WORD (BYTES_PER_WORD-1) /* 0x11 */
-#define WORD_SIZE_ROUND_UP(addr)  (((unsigned int)addr+MASK_OF_BYTES_PER_WORD)& ~MASK_OF_BYTES_PER_WORD) 
 
 #define BIT_SHIFT_TO_BYTES_PER_WORD 2 /* 2 */
 #define BIT_SHIFT_TO_BITS_PER_BYTE 3
@@ -60,13 +58,29 @@
 
 typedef void (*TaskType)(void*);
 
-typedef std::map<Partial_Reveal_Object*, Obj_Info_Type> ObjectMap;
+enum Collection_Algorithm{
+  COLLECTION_ALGOR_NIL,
+  
+  /*minor nongen collection*/
+  MINOR_NONGEN_FORWARD_POOL,
+  
+  /* minor gen collection */
+  MINOR_GEN_FORWARD_POOL,
+  
+  /* major collection */
+  MAJOR_COMPACT_SLIDE,
+  MAJOR_COMPACT_MOVE
+  
+};
 
 enum Collection_Kind {
   MINOR_COLLECTION,
-  MAJOR_COLLECTION  
+  MAJOR_COLLECTION,
+  FALLBACK_COLLECTION  
 };
 
+extern Boolean IS_FALLBACK_COMPACTION;  /* only for mark/fw bits debugging purpose */
+
 enum GC_CAUSE{
   GC_CAUSE_NIL,
   GC_CAUSE_NOS_IS_FULL,
@@ -74,28 +88,31 @@
   GC_CAUSE_RUNTIME_FORCE_GC
 };
 
-inline unsigned int vm_object_size(Partial_Reveal_Object *obj)
-{
-  Boolean arrayp = object_is_array (obj);
-  if (arrayp) {
-    return vm_vector_size(obj_get_class_handle(obj), vector_get_length((Vector_Handle)obj));
-  } else {
-    return nonarray_object_size(obj);
-  }
-}
-
 inline POINTER_SIZE_INT round_up_to_size(POINTER_SIZE_INT size, int block_size) 
 {  return (size + block_size - 1) & ~(block_size - 1); }
 
 inline POINTER_SIZE_INT round_down_to_size(POINTER_SIZE_INT size, int block_size) 
 {  return size & ~(block_size - 1); }
 
-inline Boolean obj_is_in_gc_heap(Partial_Reveal_Object *p_obj)
+/****************************************/
+/* Return a pointer to the ref field offset array. */
+inline int* object_ref_iterator_init(Partial_Reveal_Object *obj)
+{
+  GC_VTable_Info *gcvt = obj_get_gcvt(obj);  
+  return gcvt->gc_ref_offset_array;    
+}
+
+inline Partial_Reveal_Object** object_ref_iterator_get(int* iterator, Partial_Reveal_Object* obj)
 {
-  return p_obj >= gc_heap_base_address() && p_obj < gc_heap_ceiling_address();
+  return (Partial_Reveal_Object**)((int)obj + *iterator);
 }
 
-/* Return a pointer to the ref field offset array. */
+inline int* object_ref_iterator_next(int* iterator)
+{
+  return iterator+1;
+}
+
+/* original design */
 inline int *init_object_scanner (Partial_Reveal_Object *obj) 
 {
   GC_VTable_Info *gcvt = obj_get_gcvt(obj);  
@@ -106,104 +123,151 @@
 {    return (*offset == 0)? NULL: (void*)((Byte*) obj + *offset); }
 
 inline int *offset_next_ref (int *offset) 
-{  return (int *)((Byte *)offset + sizeof (int)); }
+{  return offset + 1; }
+
+/****************************************/
 
-Boolean obj_is_forwarded_in_vt(Partial_Reveal_Object *obj);
 inline Boolean obj_is_marked_in_vt(Partial_Reveal_Object *obj) 
-{  return ((POINTER_SIZE_INT)obj->vt_raw & MARK_BIT_MASK); }
+{  return ((POINTER_SIZE_INT)obj_get_vt_raw(obj) & CONST_MARK_BIT); }
 
-inline void obj_mark_in_vt(Partial_Reveal_Object *obj) 
-{  obj->vt_raw = (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw | MARK_BIT_MASK);
-   assert(!obj_is_forwarded_in_vt(obj));
+inline Boolean obj_mark_in_vt(Partial_Reveal_Object *obj) 
+{  
+  Partial_Reveal_VTable* vt = obj_get_vt_raw(obj);
+  if((unsigned int)vt & CONST_MARK_BIT) return FALSE;
+  obj_set_vt(obj, (unsigned int)vt | CONST_MARK_BIT);
+  return TRUE;
 }
 
 inline void obj_unmark_in_vt(Partial_Reveal_Object *obj) 
-{
-  assert(!obj_is_forwarded_in_vt(obj));
-  assert(obj_is_marked_in_vt(obj)); 
-  obj->vt_raw = (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw & ~MARK_BIT_MASK);
+{ 
+  Partial_Reveal_VTable* vt = obj_get_vt_raw(obj);
+  obj_set_vt(obj, (unsigned int)vt & ~CONST_MARK_BIT);
 }
 
-inline void obj_set_forward_in_vt(Partial_Reveal_Object *obj) 
-{
-  assert(!obj_is_marked_in_vt(obj));
-  obj->vt_raw = (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw | FORWARDING_BIT_MASK);
+inline Boolean obj_is_marked_or_fw_in_oi(Partial_Reveal_Object *obj)
+{ return get_obj_info_raw(obj) & DUAL_MARKBITS; }
+
+
+inline void obj_clear_dual_bits_in_oi(Partial_Reveal_Object *obj)
+{  
+  Obj_Info_Type info = get_obj_info_raw(obj);
+  set_obj_info(obj, (unsigned int)info & DUAL_MARKBITS_MASK);
 }
 
-inline Boolean obj_is_forwarded_in_vt(Partial_Reveal_Object *obj) 
-{  return (POINTER_SIZE_INT)obj->vt_raw & FORWARDING_BIT_MASK; }
+/****************************************/
+#ifndef MARK_BIT_FLIPPING
 
-inline void obj_clear_forward_in_vt(Partial_Reveal_Object *obj) 
+inline Partial_Reveal_Object *obj_get_fw_in_oi(Partial_Reveal_Object *obj) 
 {
-  assert(obj_is_forwarded_in_vt(obj) && !obj_is_marked_in_vt(obj));  
-  obj->vt_raw = (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw & ~FORWARDING_BIT_MASK);
+  assert(get_obj_info_raw(obj) & CONST_FORWARD_BIT);
+  return (Partial_Reveal_Object*) (get_obj_info_raw(obj) & ~CONST_FORWARD_BIT);
 }
 
-inline void obj_set_forwarding_pointer_in_vt(Partial_Reveal_Object *obj, void *dest) 
-{
-  assert(!obj_is_marked_in_vt(obj));
-  obj->vt_raw = (Partial_Reveal_VTable *)((POINTER_SIZE_INT)dest | FORWARDING_BIT_MASK);
+inline Boolean obj_is_fw_in_oi(Partial_Reveal_Object *obj) 
+{  return (get_obj_info_raw(obj) & CONST_FORWARD_BIT); }
+
+inline void obj_set_fw_in_oi(Partial_Reveal_Object *obj,void *dest)
+{  
+  assert(!(get_obj_info_raw(obj) & CONST_FORWARD_BIT));
+  set_obj_info(obj,(Obj_Info_Type)dest | CONST_FORWARD_BIT); 
 }
 
-inline Partial_Reveal_Object *obj_get_forwarding_pointer_in_vt(Partial_Reveal_Object *obj) 
-{
-  assert(obj_is_forwarded_in_vt(obj) && !obj_is_marked_in_vt(obj));
-  return (Partial_Reveal_Object *)obj_get_vt(obj);
+
+inline Boolean obj_is_marked_in_oi(Partial_Reveal_Object *obj) 
+{  return ( get_obj_info_raw(obj) & CONST_MARK_BIT ); }
+
+inline Boolean obj_mark_in_oi(Partial_Reveal_Object *obj) 
+{  
+  Obj_Info_Type info = get_obj_info_raw(obj);
+  if ( info & CONST_MARK_BIT ) return FALSE;
+
+  set_obj_info(obj, info|CONST_MARK_BIT);
+  return TRUE;
 }
 
-inline Partial_Reveal_Object *get_forwarding_pointer_in_obj_info(Partial_Reveal_Object *obj) 
-{
-  assert(get_obj_info(obj) & FORWARDING_BIT_MASK);
-  return (Partial_Reveal_Object*) (get_obj_info(obj) & ~FORWARDING_BIT_MASK);
+inline void obj_unmark_in_oi(Partial_Reveal_Object *obj) 
+{  
+  Obj_Info_Type info = get_obj_info_raw(obj);
+  info = info & ~CONST_MARK_BIT;
+  set_obj_info(obj, info);
+  return;
 }
 
-inline Boolean obj_is_forwarded_in_obj_info(Partial_Reveal_Object *obj) 
+/* **********************************  */
+#else /* ifndef MARK_BIT_FLIPPING */
+
+inline void mark_bit_flip()
+{ 
+  FLIP_FORWARD_BIT = FLIP_MARK_BIT;
+  FLIP_MARK_BIT ^= DUAL_MARKBITS; 
+}
+
+inline Partial_Reveal_Object *obj_get_fw_in_oi(Partial_Reveal_Object *obj) 
 {
-  return (get_obj_info(obj) & FORWARDING_BIT_MASK);
+  assert(get_obj_info_raw(obj) & FLIP_FORWARD_BIT);
+  return (Partial_Reveal_Object*) get_obj_info(obj);
 }
 
-inline void set_forwarding_pointer_in_obj_info(Partial_Reveal_Object *obj,void *dest)
-{  set_obj_info(obj,(Obj_Info_Type)dest | FORWARDING_BIT_MASK); }
+inline Boolean obj_is_fw_in_oi(Partial_Reveal_Object *obj) 
+{  return (get_obj_info_raw(obj) & FLIP_FORWARD_BIT); }
 
-struct GC;
-/* all Spaces inherit this Space structure */
-typedef struct Space{
-  void* heap_start;
-  void* heap_end;
-  unsigned int reserved_heap_size;
-  unsigned int committed_heap_size;
-  unsigned int num_collections;
-  GC* gc;
-  Boolean move_object;
-  Boolean (*mark_object_func)(Space* space, Partial_Reveal_Object* p_obj);
-}Space;
+inline void obj_set_fw_in_oi(Partial_Reveal_Object *obj, void *dest)
+{ 
+  assert(IS_FALLBACK_COMPACTION || (!(get_obj_info_raw(obj) & FLIP_FORWARD_BIT))); 
+  /* This assert should always exist except it's fall back compaction. In fall-back compaction
+     an object can be marked in last time minor collection, which is exactly this time's fw bit,
+     because the failed minor collection flipped the bits. */
 
-inline unsigned int space_committed_size(Space* space){ return space->committed_heap_size;}
-inline void* space_heap_start(Space* space){ return space->heap_start; }
-inline void* space_heap_end(Space* space){ return space->heap_end; }
+  /* It's important to clear the FLIP_FORWARD_BIT before collection ends, since it is the same as
+     next minor cycle's FLIP_MARK_BIT. And if next cycle is major, it is also confusing
+     as FLIP_FORWARD_BIT. (The bits are flipped only in minor collection). */
+  set_obj_info(obj,(Obj_Info_Type)dest | FLIP_FORWARD_BIT); 
+}
 
-inline Boolean address_belongs_to_space(void* addr, Space* space) 
+inline Boolean obj_mark_in_oi(Partial_Reveal_Object* p_obj)
 {
-  return (addr >= space_heap_start(space) && addr < space_heap_end(space));
+  Obj_Info_Type info = get_obj_info_raw(p_obj);
+  assert((info & DUAL_MARKBITS ) != DUAL_MARKBITS);
+  
+  if( info & FLIP_MARK_BIT ) return FALSE;  
+  
+  info = info & DUAL_MARKBITS_MASK;
+  set_obj_info(p_obj, info|FLIP_MARK_BIT);
+  return TRUE;
 }
 
-inline Boolean obj_belongs_to_space(Partial_Reveal_Object *p_obj, Space* space)
+inline Boolean obj_unmark_in_oi(Partial_Reveal_Object* p_obj)
 {
-  return address_belongs_to_space((Partial_Reveal_Object*)p_obj, space);
+  Obj_Info_Type info = get_obj_info_raw(p_obj);
+  info = info & DUAL_MARKBITS_MASK;
+  set_obj_info(p_obj, info);
+  return TRUE;
 }
 
+inline Boolean obj_is_marked_in_oi(Partial_Reveal_Object* p_obj)
+{
+  Obj_Info_Type info = get_obj_info_raw(p_obj);
+  return (info & FLIP_MARK_BIT);
+}
+
+#endif /* MARK_BIT_FLIPPING */
+
 /* all GCs inherit this GC structure */
 struct Mutator;
 struct Collector;
 struct GC_Metadata;
-struct Finalizer_Weakref_Metadata;
+struct Finref_Metadata;
 struct Vector_Block;
+struct Space_Tuner;
+
 typedef struct GC{
   void* heap_start;
   void* heap_end;
   unsigned int reserved_heap_size;
   unsigned int committed_heap_size;
   unsigned int num_collections;
+  int64 time_collections;
+  float survive_ratio;
   
   /* mutation related info */
   Mutator *mutator_list;
@@ -217,18 +281,28 @@
   
   /* metadata is the pool for rootset, tracestack, etc. */  
   GC_Metadata* metadata;
-  Finalizer_Weakref_Metadata *finalizer_weakref_metadata;
+  Finref_Metadata *finref_metadata;
+
   unsigned int collect_kind; /* MAJOR or MINOR */
+  unsigned int last_collect_kind;
+  Boolean collect_result; /* succeed or fail */
+
+  Boolean generate_barrier;
+  
   /* FIXME:: this is wrong! root_set belongs to mutator */
   Vector_Block* root_set;
 
-  /* mem info */
-  apr_pool_t *aux_pool;
-  port_vmem_t *allocated_memory;
+  //For_LOS_extend
+  Space_Tuner* tuner;
 
 }GC;
 
-void mark_scan_heap(Collector* collector);
+void mark_scan_pool(Collector* collector);
+
+inline void mark_scan_heap(Collector* collector)
+{
+    mark_scan_pool(collector);    
+}
 
 inline void* gc_heap_base(GC* gc){ return gc->heap_start; }
 inline void* gc_heap_ceiling(GC* gc){ return gc->heap_end; }
@@ -237,7 +311,37 @@
   return (addr >= gc_heap_base(gc) && addr < gc_heap_ceiling(gc));
 }
 
-void gc_parse_options();
+void gc_parse_options(GC* gc);
 void gc_reclaim_heap(GC* gc, unsigned int gc_cause);
+
+/* generational GC related */
+
+extern Boolean NOS_PARTIAL_FORWARD;
+
+//#define STATIC_NOS_MAPPING
+
+#ifdef STATIC_NOS_MAPPING
+
+  //#define NOS_BOUNDARY ((void*)0x2ea20000)  //this is for 512M
+  #define NOS_BOUNDARY ((void*)0x40000000) //this is for 256M
+
+	#define nos_boundary NOS_BOUNDARY
+
+#else /* STATIC_NOS_MAPPING */
+
+	extern void* nos_boundary;
+
+#endif /* STATIC_NOS_MAPPING */
+
+inline Boolean addr_belongs_to_nos(void* addr)
+{ return addr >= nos_boundary; }
+
+inline Boolean obj_belongs_to_nos(Partial_Reveal_Object* p_obj)
+{ return addr_belongs_to_nos(p_obj); }
+
+extern void* los_boundary;
+
+inline Boolean obj_is_moved(Partial_Reveal_Object* p_obj)
+{ return p_obj >= los_boundary; }
 
 #endif //_GC_COMMON_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.cpp Thu Jan 11 05:57:16 2007
@@ -19,7 +19,7 @@
  */
 
 #include "gc_common.h"
-#include "../finalizer_weakref/finalizer_weakref_metadata.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
 
 /* Setter functions for the gc class property field. */
 void gc_set_prop_alignment_mask (GC_VTable_Info *gcvt, unsigned int the_mask)
@@ -42,9 +42,9 @@
 {
   gcvt->gc_class_properties |= CL_PROP_FINALIZABLE_MASK;
 }
-void gc_set_prop_reference(Partial_Reveal_VTable *vt, WeakReferenceType type)
+void gc_set_prop_reference(GC_VTable_Info *gcvt, WeakReferenceType type)
 {
-  vtable_get_gcvt(vt)->gc_class_properties |= (unsigned int)type << CL_PROP_REFERENCE_TYPE_SHIFT;
+  gcvt->gc_class_properties |= (unsigned int)type << CL_PROP_REFERENCE_TYPE_SHIFT;
 }
 
 
@@ -61,8 +61,9 @@
   return 0;
 }
 
-static int *build_ref_offset_array(Class_Handle ch, GC_VTable_Info *gcvt, WeakReferenceType type)
+static unsigned int class_num_ref_fields(Class_Handle ch)
 {
+  WeakReferenceType is_reference = class_is_reference(ch);
   unsigned num_ref_fields = 0;
   unsigned num_fields = class_num_instance_fields_recursive(ch);
 
@@ -74,8 +75,8 @@
     }
   }
 
-  int skip = -1; // not skip any reference
-  if (type != NOT_REFERENCE) {
+#ifndef BUILD_IN_REFERENT
+  if (is_reference != NOT_REFERENCE) {
     int offset = class_get_referent_offset(ch);
     unsigned int gc_referent_offset = get_gc_referent_offset();
     if (gc_referent_offset == 0) {
@@ -84,28 +85,26 @@
       assert(gc_referent_offset == offset);
     }
 
-    skip = offset; // skip global referent offset
     num_ref_fields--;
   }
-  
-  if( num_ref_fields )   
-    gcvt->gc_object_has_ref_field = true;
-  else 
-    return NULL;
-   
-  /* add a null-termination slot */
-  unsigned int size = (num_ref_fields+1) * sizeof (unsigned int);
-
-  /* alloc from gcvt pool */
-  int *result = (int*) STD_MALLOC(size);
-  assert(result);
+#endif
 
-  int *new_ref_array = result;
-  for(idx = 0; idx < num_fields; idx++) {
+  return num_ref_fields;
+}
+
+static void build_ref_offset_array(Class_Handle ch, GC_VTable_Info *gcvt)
+{     
+  unsigned num_fields = class_num_instance_fields_recursive(ch);
+  WeakReferenceType is_reference = class_is_reference(ch);
+  unsigned int gc_referent_offset = get_gc_referent_offset();
+  
+  int *new_ref_array = gcvt->gc_ref_offset_array;
+  int *result = new_ref_array;
+  for(unsigned int idx = 0; idx < num_fields; idx++) {
     Field_Handle fh = class_get_instance_field_recursive(ch, idx);
     if(field_is_reference(fh)) {
       int offset = field_get_offset(fh);
-      if (offset == skip) continue;
+      if(is_reference && offset == gc_referent_offset) continue;
       *new_ref_array = field_get_offset(fh);
       new_ref_array++;
     }
@@ -114,15 +113,12 @@
   /* ref array is NULL-terminated */
   *new_ref_array = 0;
 
-  gcvt->gc_number_of_ref_fields = num_ref_fields;
-
+  unsigned int num_ref_fields = gcvt->gc_number_of_ref_fields;
   /* offsets were built with idx, may not be in order. Let's sort it anyway.
      FIXME: verify_live_heap depends on ordered offset array. */
-  qsort(result, num_ref_fields, sizeof(*result), intcompare);
-
-  gcvt->gc_ref_offset_array  = result;
+  qsort(gcvt->gc_ref_offset_array, num_ref_fields, sizeof(int), intcompare);
   
-  return new_ref_array;
+  return;
 }
 
 void gc_class_prepared (Class_Handle ch, VTable_Handle vth) 
@@ -132,48 +128,70 @@
   assert(vth);
 
   Partial_Reveal_VTable *vt = (Partial_Reveal_VTable *)vth;
+
+  unsigned int num_ref_fields = class_num_ref_fields(ch);
+  unsigned int gcvt_size = sizeof(GC_VTable_Info);
+  if(num_ref_fields){
+    gcvt_size += num_ref_fields * sizeof(unsigned int);  
+  }
   
-  /* FIXME: gcvts are too random is memory */
-  gcvt = (GC_VTable_Info *) STD_MALLOC(sizeof(GC_VTable_Info));
+  gcvt_size = (gcvt_size + GCVT_ALIGN_MASK) & ~GCVT_ALIGN_MASK;
+  gcvt = (GC_VTable_Info*) malloc(gcvt_size);
   assert(gcvt);
-  vtable_set_gcvt(vt, gcvt);
-  memset((void *)gcvt, 0, sizeof(GC_VTable_Info));
+  assert(!((unsigned int)gcvt % GCVT_ALIGNMENT));
+
+  memset((void *)gcvt, 0, gcvt_size);
   gcvt->gc_clss = ch;
   gcvt->gc_class_properties = 0;
-  gcvt->gc_object_has_ref_field = false;
-  
   gc_set_prop_alignment_mask(gcvt, class_get_alignment(ch));
 
+  if(num_ref_fields){
+    gcvt->gc_number_of_ref_fields = num_ref_fields;
+    /* Build the offset array */
+    build_ref_offset_array(ch, gcvt);
+  }
+
   if(class_is_array(ch)) {
     Class_Handle array_element_class = class_get_array_element_class(ch);
     gc_set_prop_array(gcvt);
-    gcvt->gc_array_element_size = class_element_size(ch);
+    
+    gcvt->array_elem_size = class_element_size(ch);
     unsigned int the_offset = vector_first_element_offset_unboxed(array_element_class);
-    gcvt->gc_array_first_element_offset = the_offset;
+    gcvt->array_first_elem_offset = the_offset;
   
     if (class_is_non_ref_array (ch)) {
       gc_set_prop_non_ref_array(gcvt);
     }else{
-      gcvt->gc_object_has_ref_field = true;
+      gcvt->gc_number_of_ref_fields = 1;
     }
   }
-
+  
   if (class_is_finalizable(ch)) {
     gc_set_prop_finalizable(gcvt);
   }
 
   WeakReferenceType type = class_is_reference(ch);
-  gc_set_prop_reference(vt, type);
+  gc_set_prop_reference(gcvt, type);
   
   unsigned int size = class_get_boxed_data_size(ch);
   gcvt->gc_allocated_size = size;
   
-  /* Build the offset array */
-  build_ref_offset_array(ch, gcvt, type);
-
   gcvt->gc_class_name = class_get_name(ch);
   assert (gcvt->gc_class_name);
 
+  /* these should be set last to use the gcvt pointer */
+  if(gcvt->gc_number_of_ref_fields)
+    gcvt = (GC_VTable_Info*)((unsigned int)gcvt | GC_CLASS_FLAG_REFS);
+  
+  if(class_is_array(ch))
+    gcvt = (GC_VTable_Info*)((unsigned int)gcvt | GC_CLASS_FLAG_ARRAY);
+    
+  if(class_is_finalizable(ch))
+    gcvt = (GC_VTable_Info*)((unsigned int)gcvt | GC_CLASS_FLAG_FINALIZER);
+
+  vtable_set_gcvt(vt, gcvt);
+
+  return;
 }  /* gc_class_prepared */
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_class.h Thu Jan 11 05:57:16 2007
@@ -22,32 +22,74 @@
 #define _GC_TYPES_H_
 
 #include "open/types.h"
+#include "gc_platform.h"
 
-#define FORWARDING_BIT_MASK 0x1
-#define MARK_BIT_MASK 0x2
+/* CONST_MARK_BIT is used in mark_scan in vt, no matter MARK_BIT_FLIPPING used or not. 
+   MARK_BIT_FLIPPING is used in oi for marking and forwarding in non-gen nursery forwarding
+   (the marking is for those objects not in nos.)
+   For gen mode, we can use or not use MARK_BIT_FLIPPING, because we never mark any object not
+   in nos. And for live objects in nos, its bits are reset when forwared. So there is no need 
+   to use a lower-performance bit flipping in gen mode.
+   When MARK_BIT_FLIPPING is defined, all configurations are working.
+   If it is not defined, we can't run one configuration: non-gen-mode nos-trace-forwarding. We have 
+   to run nos-mark-forwarding/copying which has an extra pass to reset the mark bit.
+   
+   Important invariants:
+   1. We never put forwarding pointer in vt. 
+   2. Forwarding pointer only exists during collection. No obj has fw (or fw_bit) in oi during execution.
+   3. During app execution, no obj has mark_bit set without MARK_BIT_FLIPPING defined.
+   
+*/
+#define CONST_MARK_BIT 0x1
+
+#define DUAL_MARKBITS  0x3
+#define DUAL_MARKBITS_MASK  (~DUAL_MARKBITS)
+
+#define MARK_BIT_FLIPPING
+
+#ifdef MARK_BIT_FLIPPING
+
+  extern unsigned int Cur_Mark_Bit;
+  extern unsigned int Cur_Forward_Bit;
+  #define FLIP_MARK_BIT Cur_Mark_Bit
+  #define FLIP_FORWARD_BIT Cur_Forward_Bit
+
+  #define FORWARD_BIT FLIP_FORWARD_BIT
+
+#else  /* #ifdef MARK_BIT_FLIPPING*/
+
+  #define CONST_FORWARD_BIT 0x2
+  #define FORWARD_BIT CONST_FORWARD_BIT
+
+#endif /* else MARK_BIT_FLIPPING */
 
 typedef void *Thread_Handle; 
+
+#define GC_CLASS_FLAG_FINALIZER 1
+#define GC_CLASS_FLAG_ARRAY 2
+#define GC_CLASS_FLAG_REFS 4
+#define GC_CLASS_IS_REF_ARRAY (GC_CLASS_FLAG_ARRAY|GC_CLASS_FLAG_REFS)
+#define GC_CLASS_FLAGS_MASK (~(GC_CLASS_IS_REF_ARRAY|GC_CLASS_FLAG_FINALIZER))
+
+#define GC_OBJECT_ALIGN_MASK (GC_OBJECT_ALIGNMENT-1)
+#define GCVT_ALIGNMENT 8
+#define GCVT_ALIGN_MASK (GCVT_ALIGNMENT-1)
+
 typedef POINTER_SIZE_INT Obj_Info_Type;
 
 typedef struct GC_VTable_Info {
-  unsigned int gc_object_has_ref_field;
+
   unsigned int gc_number_of_ref_fields;
 
   uint32 gc_class_properties;    // This is the same as class_properties in VM's VTable.
 
-  unsigned int instance_data_size;
-
-  // Offset from the top by CLASS_ALLOCATED_SIZE_OFFSET
-  // The number of bytes allocated for this object. It is the same as
-  // instance_data_size with the constraint bit cleared. This includes
-  // the OBJECT_HEADER_SIZE as well as the OBJECT_VTABLE_POINTER_SIZE
   unsigned int gc_allocated_size;
 
-  unsigned int gc_array_element_size;
+  unsigned int array_elem_size;
 
   // This is the offset from the start of the object to the first element in the
   // array. It isn't a constant since we pad double words.
-  int gc_array_first_element_offset;
+  int array_first_elem_offset;
 
   // The GC needs access to the class name for debugging and for collecting information
   // about the allocation behavior of certain classes. Store the name of the class here.
@@ -55,11 +97,8 @@
   Class_Handle gc_clss;
 
   // This array holds an array of offsets to the pointer fields in
-  // an instance of this class, including the weak referent field.
-  // It would be nice if this
-  // was located immediately prior to the vtable, since that would
-  // eliminate a dereference.
-  int *gc_ref_offset_array;
+  // an instance of this class, including or not the weak referent field depending on compilation option
+  int gc_ref_offset_array[1];
   
 } GC_VTable_Info;
 
@@ -72,32 +111,59 @@
   Obj_Info_Type obj_info;
 } Partial_Reveal_Object;
 
+typedef struct Partial_Reveal_Array {
+  Partial_Reveal_VTable *vt_raw;
+  Obj_Info_Type obj_info;
+  unsigned int array_len;
+} Partial_Reveal_Array;
+
+inline Obj_Info_Type get_obj_info_raw(Partial_Reveal_Object *obj) 
+{  assert(obj); return obj->obj_info; }
+
+#ifndef MARK_BIT_FLIPPING
+
 inline Obj_Info_Type get_obj_info(Partial_Reveal_Object *obj) 
-{  return obj->obj_info; }
+{  assert(obj); return obj->obj_info & ~CONST_MARK_BIT; }
+
+#else
+
+inline Obj_Info_Type get_obj_info(Partial_Reveal_Object *obj) 
+{  assert(obj); return obj->obj_info & DUAL_MARKBITS_MASK; }
+
+#endif /* MARK_BIT_FLIPPING */
 
 inline void set_obj_info(Partial_Reveal_Object *obj, Obj_Info_Type new_obj_info) 
-{  obj->obj_info = new_obj_info; }
+{  assert(obj); obj->obj_info = new_obj_info; }
 
 inline Obj_Info_Type *get_obj_info_addr(Partial_Reveal_Object *obj) 
-{  return &obj->obj_info; }
+{  assert(obj); return &obj->obj_info; }
 
-inline Partial_Reveal_VTable *obj_get_vtraw(Partial_Reveal_Object *obj) 
-{  return obj->vt_raw; }
+inline Partial_Reveal_VTable *obj_get_vt_raw(Partial_Reveal_Object *obj) 
+{  assert(obj && obj->vt_raw); return obj->vt_raw; }
 
-inline Partial_Reveal_VTable **obj_get_vtraw_addr(Partial_Reveal_Object *obj) 
-{  return &obj->vt_raw; }
+inline Partial_Reveal_VTable **obj_get_vt_addr(Partial_Reveal_Object *obj) 
+{  assert(obj && obj->vt_raw); return &obj->vt_raw; }
 
 inline Partial_Reveal_VTable *obj_get_vt(Partial_Reveal_Object *obj) 
-{  return (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw & ~(FORWARDING_BIT_MASK | MARK_BIT_MASK)); }
+{  assert(obj && obj->vt_raw); return (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw & ~CONST_MARK_BIT); }
 
 inline void obj_set_vt(Partial_Reveal_Object *obj, Allocation_Handle ah) 
-{  obj->vt_raw = (Partial_Reveal_VTable *)ah; }
+{  assert(obj && ah); obj->vt_raw = (Partial_Reveal_VTable *)ah; }
+
+inline GC_VTable_Info *vtable_get_gcvt_raw(Partial_Reveal_VTable *vt) 
+{  assert(vt && vt->gcvt); return vt->gcvt; }
 
 inline GC_VTable_Info *vtable_get_gcvt(Partial_Reveal_VTable *vt) 
-{  return vt->gcvt; }
+{  assert(vt && vt->gcvt); return (GC_VTable_Info*)((unsigned int)vt->gcvt & GC_CLASS_FLAGS_MASK); }
 
 inline void vtable_set_gcvt(Partial_Reveal_VTable *vt, GC_VTable_Info *new_gcvt) 
-{  vt->gcvt = new_gcvt; }
+{  assert(vt && new_gcvt); vt->gcvt = new_gcvt; }
+
+inline GC_VTable_Info *obj_get_gcvt_raw(Partial_Reveal_Object *obj) 
+{
+  Partial_Reveal_VTable *vt = obj_get_vt(obj);
+  return vtable_get_gcvt_raw(vt);
+}
 
 inline GC_VTable_Info *obj_get_gcvt(Partial_Reveal_Object *obj) 
 {
@@ -107,11 +173,18 @@
 
 inline Boolean object_has_ref_field(Partial_Reveal_Object *obj) 
 {
-  GC_VTable_Info *gcvt = obj_get_gcvt(obj);
-  return gcvt->gc_object_has_ref_field;   
+  GC_VTable_Info *gcvt = obj_get_gcvt_raw(obj);
+  return (unsigned int)gcvt & GC_CLASS_FLAG_REFS;   
+}
+
+inline Boolean object_has_ref_field_before_scan(Partial_Reveal_Object *obj) 
+{
+  Partial_Reveal_VTable *vt = obj_get_vt_raw(obj);  
+  GC_VTable_Info *gcvt = vtable_get_gcvt_raw(vt);
+  return (unsigned int)gcvt & GC_CLASS_FLAG_REFS;   
 }
 
-inline Boolean object_ref_field_num(Partial_Reveal_Object *obj) 
+inline unsigned int object_ref_field_num(Partial_Reveal_Object *obj) 
 {
   GC_VTable_Info *gcvt = obj_get_gcvt(obj);
   return gcvt->gc_number_of_ref_fields;   
@@ -119,15 +192,13 @@
 
 inline Boolean object_is_array(Partial_Reveal_Object *obj) 
 {
-  GC_VTable_Info *gcvt = obj_get_gcvt(obj);
-  return (gcvt->gc_class_properties & CL_PROP_ARRAY_MASK);
-  
+  GC_VTable_Info *gcvt = obj_get_gcvt_raw(obj);
+  return ((unsigned int)gcvt & GC_CLASS_FLAG_ARRAY);
 } 
 
 inline Boolean obj_is_primitive_array(Partial_Reveal_Object *obj) 
 {
-  GC_VTable_Info *gcvt = obj_get_gcvt(obj);  
-  return (gcvt->gc_class_properties & CL_PROP_NON_REF_ARRAY_MASK);
+  return object_is_array(obj) && !object_has_ref_field(obj);
 }
 
 inline Class_Handle obj_get_class_handle(Partial_Reveal_Object *obj) 
@@ -142,6 +213,25 @@
   return gcvt->gc_allocated_size;
 }
 
+inline unsigned int array_first_element_offset(Partial_Reveal_Array *obj)
+{ 
+  GC_VTable_Info *gcvt = obj_get_gcvt((Partial_Reveal_Object*)obj);
+  return gcvt->array_first_elem_offset;
+}
+
+inline unsigned int array_object_size(Partial_Reveal_Object *obj) 
+{
+  GC_VTable_Info *gcvt = obj_get_gcvt(obj);  
+  int array_len = ((Partial_Reveal_Array*)obj)->array_len;
+  return (gcvt->array_first_elem_offset + gcvt->array_elem_size * array_len + GC_OBJECT_ALIGN_MASK) & (~GC_OBJECT_ALIGN_MASK);
+}
+
+inline unsigned int vm_object_size(Partial_Reveal_Object *obj)
+{
+  Boolean is_array = object_is_array(obj);
+  return is_array? array_object_size(obj) : nonarray_object_size(obj);
+}
+
 #define CL_PROP_REFERENCE_TYPE_SHIFT 16
 #define CL_PROP_REFERENCE_TYPE_MASK 0x00030000
 
@@ -153,8 +243,8 @@
 
 inline Boolean type_has_finalizer(Partial_Reveal_VTable *vt)
 {
-  GC_VTable_Info *gcvt = vtable_get_gcvt(vt);
-  return gcvt->gc_class_properties & CL_PROP_FINALIZABLE_MASK;
+  GC_VTable_Info *gcvt = vtable_get_gcvt_raw(vt);
+  return (unsigned int)gcvt & GC_CLASS_FLAG_FINALIZER;
 }
 
 #endif //#ifndef _GC_TYPES_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_for_vm.cpp Thu Jan 11 05:57:16 2007
@@ -31,21 +31,27 @@
 
 void gc_tls_init();
 
+Boolean gc_requires_barriers() 
+{   return p_global_gc->generate_barrier; }
+
 void gc_init() 
-{  
-  gc_parse_options();
-    
+{      
   assert(p_global_gc == NULL);
   GC* gc = (GC*)STD_MALLOC(sizeof(GC_Gen));
   assert(gc);
   memset(gc, 0, sizeof(GC));  
   p_global_gc = gc;
+
+  gc_parse_options(gc);
+  
   gc_tls_init();
   
   gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes);
 
   gc_metadata_initialize(gc); /* root set and mark stack */
-  gc_finalizer_weakref_metadata_initialize(gc);
+#ifndef BUILD_IN_REFERENT
+  gc_finref_metadata_initialize(gc);
+#endif
   collector_initialize(gc);
   gc_init_heap_verification(gc);
 
@@ -57,7 +63,9 @@
   GC* gc =  p_global_gc;
   gc_gen_destruct((GC_Gen*)gc);
   gc_metadata_destruct(gc); /* root set and mark stack */
-  gc_finalizer_weakref_metadata_destruct(gc);
+#ifndef BUILD_IN_REFERENT
+  gc_finref_metadata_destruct(gc);
+#endif
   collector_destruct(gc);
 
   if( verify_live_heap ){
@@ -73,10 +81,15 @@
 void gc_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) 
 {   
   Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)ref;
-  if (*p_ref == NULL) return;
-  assert( !obj_is_marked_in_vt(*p_ref));
-  assert( !obj_is_forwarded_in_vt(*p_ref) && !obj_is_forwarded_in_obj_info(*p_ref)); 
-  assert( obj_is_in_gc_heap(*p_ref));
+  Partial_Reveal_Object* p_obj = *p_ref;
+  if (p_obj == NULL) return;
+  assert( !obj_is_marked_in_vt(p_obj));
+  /* for Minor_collection, it's possible for p_obj be forwarded in non-gen mark-forward GC. 
+     The forward bit is actually last cycle's mark bit.
+     For Major collection, it's possible for p_obj be marked in last cycle. Since we don't
+     flip the bit for major collection, we may find it's marked there.
+     So we can't do assert about oi except we really want. */
+  assert( address_belongs_to_gc_heap(p_obj, p_global_gc));
   gc_rootset_add_entry(p_global_gc, p_ref);
 } 
 
@@ -116,7 +129,7 @@
 /* java heap size.*/
 int64 gc_total_memory() 
 {
-  return (int64)((POINTER_SIZE_INT)gc_heap_ceiling(p_global_gc) - (POINTER_SIZE_INT)gc_heap_base(p_global_gc)); 
+  return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc)); 
 }
 
 void gc_vm_initialized()
@@ -137,10 +150,14 @@
 unsigned int gc_time_since_last_gc()
 {  assert(0); return 0; }
 
+int32 gc_get_hashcode(Managed_Object_Handle p_object) 
+{  return 23; }
+
 
 void gc_finalize_on_exit()
 {
-  process_objects_with_finalizer_on_exit(p_global_gc);
+  if(!IGNORE_FINREF )
+    put_all_fin_on_exit(p_global_gc);
 }
 
 /* for future use

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp Thu Jan 11 05:57:16 2007
@@ -19,17 +19,16 @@
  */
 
 #include "gc_metadata.h"
-#include "../thread/mutator.h"
-#include "../thread/collector.h"
 #include "interior_pointer.h"
 #include "../finalizer_weakref/finalizer_weakref.h"
+#include "gc_block.h"
 
-#define GC_METADATA_SIZE_BYTES 48*MB
+#define GC_METADATA_SIZE_BYTES (1*MB)
+#define GC_METADATA_EXTEND_SIZE_BYTES (1*MB)
 
-#define METADATA_BLOCK_SIZE_BIT_SHIFT 12
-#define METADATA_BLOCK_SIZE_BYTES (1<<METADATA_BLOCK_SIZE_BIT_SHIFT)
+#define METADATA_BLOCK_SIZE_BYTES VECTOR_BLOCK_DATA_SIZE_BYTES
 
-static GC_Metadata gc_metadata;
+GC_Metadata gc_metadata;
 
 void gc_metadata_initialize(GC* gc)
 {
@@ -37,20 +36,22 @@
      dynamically alloc space for metadata. 
      We just don't have this dynamic support at the moment. */
 
-  void* metadata = STD_MALLOC(GC_METADATA_SIZE_BYTES);
-  memset(metadata, 0, GC_METADATA_SIZE_BYTES);
-  gc_metadata.heap_start = metadata;
-  gc_metadata.heap_end = (void*)((unsigned int)metadata + GC_METADATA_SIZE_BYTES);
+  unsigned int seg_size = GC_METADATA_SIZE_BYTES + METADATA_BLOCK_SIZE_BYTES;
+  void* metadata = STD_MALLOC(seg_size);
+  memset(metadata, 0, seg_size);
+  gc_metadata.segments[0] = metadata;
+  metadata = (void*)round_up_to_size((unsigned int)metadata, METADATA_BLOCK_SIZE_BYTES);
+  gc_metadata.num_alloc_segs = 1;
 
   unsigned int i=0;       
-  unsigned int num_blocks =  GC_METADATA_SIZE_BYTES >> METADATA_BLOCK_SIZE_BIT_SHIFT;
+  unsigned int num_blocks =  GC_METADATA_SIZE_BYTES/METADATA_BLOCK_SIZE_BYTES;
   for(i=0; i<num_blocks; i++){
     Vector_Block* block = (Vector_Block*)((unsigned int)metadata + i*METADATA_BLOCK_SIZE_BYTES);
     vector_block_init(block, METADATA_BLOCK_SIZE_BYTES);
   }
   
   /* part of the metadata space is used for trace_stack */
-  unsigned num_tasks = num_blocks >> 2;
+  unsigned num_tasks = num_blocks >> 1;
   gc_metadata.free_task_pool = sync_pool_create();
   for(i=0; i<num_tasks; i++){
     unsigned int block = (unsigned int)metadata + i*METADATA_BLOCK_SIZE_BYTES;    
@@ -60,10 +61,9 @@
   gc_metadata.mark_task_pool = sync_pool_create();
 
   /* the other part is used for root sets (including rem sets) */
-  unsigned num_sets = (num_blocks >> 1) + num_tasks;
   gc_metadata.free_set_pool = sync_pool_create();
   /* initialize free rootset pool so that mutators can use them */  
-  for(; i<num_sets; i++){
+  for(; i<num_blocks; i++){
     unsigned int block = (unsigned int)metadata + i*METADATA_BLOCK_SIZE_BYTES;    
     pool_put_entry(gc_metadata.free_set_pool, (void*)block); 
   }
@@ -89,10 +89,127 @@
   sync_pool_destruct(metadata->collector_remset_pool);
   sync_pool_destruct(metadata->collector_repset_pool);
 
-  STD_FREE(metadata->heap_start);
+  for(unsigned int i=0; i<metadata->num_alloc_segs; i++){
+    assert(metadata->segments[i]);
+    STD_FREE(metadata->segments[i]);
+  }
+  
   gc->metadata = NULL;  
 }
 
+Vector_Block* gc_metadata_extend(Pool* pool)
+{  
+  GC_Metadata *metadata = &gc_metadata;
+  lock(metadata->alloc_lock);
+  Vector_Block* block = pool_get_entry(pool);
+  if( block ){
+    unlock(metadata->alloc_lock);
+    return block;
+  }
+ 
+  unsigned int num_alloced = metadata->num_alloc_segs;
+  if(num_alloced == GC_METADATA_SEGMENT_NUM){
+    printf("Run out GC metadata, please give it more segments!\n");
+    exit(0);
+  }
+  unsigned int seg_size =  GC_METADATA_EXTEND_SIZE_BYTES + METADATA_BLOCK_SIZE_BYTES;
+  void *new_segment = STD_MALLOC(seg_size);
+  memset(new_segment, 0, seg_size);
+  metadata->segments[num_alloced] = new_segment;
+  new_segment = (void*)round_up_to_size((unsigned int)new_segment, METADATA_BLOCK_SIZE_BYTES);
+  metadata->num_alloc_segs = num_alloced + 1;
+  
+  unsigned int num_blocks =  GC_METADATA_EXTEND_SIZE_BYTES/METADATA_BLOCK_SIZE_BYTES;
+
+  unsigned int i=0;
+  for(i=0; i<num_blocks; i++){
+    Vector_Block* block = (Vector_Block*)((unsigned int)new_segment + i*METADATA_BLOCK_SIZE_BYTES);
+    vector_block_init(block, METADATA_BLOCK_SIZE_BYTES);
+    assert(vector_block_is_empty((Vector_Block *)block));
+  }
+
+  if( pool == gc_metadata.free_task_pool){  
+    for(i=0; i<num_blocks; i++){
+      unsigned int block = (unsigned int)new_segment + i*METADATA_BLOCK_SIZE_BYTES;    
+      vector_stack_init((Vector_Block*)block);
+      pool_put_entry(gc_metadata.free_task_pool, (void*)block); 
+    }
+  
+  }else{ 
+    assert( pool == gc_metadata.free_set_pool );
+    for(i=0; i<num_blocks; i++){
+      unsigned int block = (unsigned int)new_segment + i*METADATA_BLOCK_SIZE_BYTES;    
+      pool_put_entry(gc_metadata.free_set_pool, (void*)block); 
+    }
+  }
+  
+  block = pool_get_entry(pool);
+  unlock(metadata->alloc_lock);
+ 
+  return block;
+}
+
+extern Boolean IS_MOVE_COMPACT;
+
+static void gc_update_repointed_sets(GC* gc, Pool* pool)
+{
+  GC_Metadata* metadata = gc->metadata;
+  
+  /* NOTE:: this is destructive to the root sets. */
+  pool_iterator_init(pool);
+  Vector_Block* root_set = pool_iterator_next(pool);
+
+  while(root_set){
+    unsigned int* iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+
+      Partial_Reveal_Object* p_obj = *p_ref;
+      if(IS_MOVE_COMPACT){
+        if(obj_is_moved(p_obj))
+          *p_ref = obj_get_fw_in_table(p_obj);
+      } else {
+        if( // obj_is_fw_in_oi(p_obj) && //NOTE:: we removed the minor_copy algorithm at the moment, so we don't need this check
+            obj_is_moved(p_obj)){
+          /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens.
+           * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time.
+           * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted,
+           * for whose which can be scanned in MOS & NOS must have been set fw bit in oi.
+           */
+          assert(address_belongs_to_gc_heap(obj_get_fw_in_oi(p_obj), gc));
+          *p_ref = obj_get_fw_in_oi(p_obj);
+        }
+      }
+    }
+    root_set = pool_iterator_next(pool);
+  } 
+  
+  return;
+}
+
+void gc_fix_rootset(Collector* collector)
+{  
+  GC* gc = collector->gc;  
+  GC_Metadata* metadata = gc->metadata;
+
+  /* generational MINOR_COLLECTION doesn't need rootset update, but need reset */
+  if( gc->collect_kind != MINOR_COLLECTION ) /* MINOR but not forwarding */
+    gc_update_repointed_sets(gc, metadata->gc_rootset_pool);
+  else
+  gc_set_pool_clear(metadata->gc_rootset_pool);
+  
+#ifndef BUILD_IN_REFERENT
+  gc_update_finref_repointed_refs(gc);
+#endif
+
+  update_rootset_interior_pointer();
+  /* it was pointing to the last root_set entry in gc_rootset_pool (before rem_sets). */
+  gc->root_set = NULL;
+      
+  return;
+}
+
 void gc_set_rootset(GC* gc)
 {
   GC_Metadata* metadata = gc->metadata;
@@ -105,9 +222,15 @@
   
   /* put back last rootset block */
   pool_put_entry(gc_rootset_pool, gc->root_set);
-  gc->root_set = NULL;
   
-  if(!gc_requires_barriers()) return;
+  /* we only reset gc->root_set here for non gen mode, because we need it to remember the border
+     between root_set and rem_set in gc_rootset_pool for gen mode. This is useful when a minor
+     gen collection falls back to compaction, we can clear all the blocks in 
+     gc_rootset_pool after the entry pointed by gc->root_set. So we clear this value
+     only after we know we are not going to fallback. */
+    // gc->root_set = NULL;
+  
+  if(!gc_is_gen_mode()) return;
 
   /* put back last remset block of each mutator */
   Mutator *mutator = gc->mutator_list;
@@ -115,7 +238,7 @@
     pool_put_entry(mutator_remset_pool, mutator->rem_set);
     mutator->rem_set = NULL;
     mutator = mutator->next;
-  }  
+  }
 
   /* put back last remset block of each collector (saved in last collection) */  
   unsigned int num_active_collectors = gc->num_active_collectors;
@@ -128,7 +251,7 @@
     collector->rem_set = NULL;
   }
 
-  if( gc->collect_kind == MAJOR_COLLECTION ){
+  if( gc->collect_kind != MINOR_COLLECTION ){
     /* all the remsets are useless now */
     /* clean and put back mutator remsets */  
     root_set = pool_get_entry( mutator_remset_pool );
@@ -167,136 +290,36 @@
 
 }
 
-void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_ref)
-{
-  assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); 
-
-  Vector_Block* root_set = mutator->rem_set;  
-  vector_block_add_entry(root_set, (unsigned int)p_ref);
-  
-  if( !vector_block_is_full(root_set)) return;
-    
-  pool_put_entry(gc_metadata.mutator_remset_pool, root_set);
-  mutator->rem_set = pool_get_entry(gc_metadata.free_set_pool);  
-  assert(mutator->rem_set);
-}
-
-void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref)
-{
-//  assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); 
-
-  Vector_Block* root_set = collector->rep_set;  
-  vector_block_add_entry(root_set, (unsigned int)p_ref);
-  
-  if( !vector_block_is_full(root_set)) return;
-    
-  pool_put_entry(gc_metadata.collector_repset_pool, root_set);
-  collector->rep_set = pool_get_entry(gc_metadata.free_set_pool);  
-  assert(collector->rep_set);
-}
-
-void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref)
-{
-  assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); 
-
-  Vector_Block* root_set = collector->rem_set;  
-  vector_block_add_entry(root_set, (unsigned int)p_ref);
-  
-  if( !vector_block_is_full(root_set)) return;
-    
-  pool_put_entry(gc_metadata.collector_remset_pool, root_set);
-  collector->rem_set = pool_get_entry(gc_metadata.free_set_pool);  
-  assert(collector->rem_set);
-}
-
-void collector_tracestack_push(Collector* collector, void* p_task)
-{
-  /* we don't have assert as others because p_task is a p_obj for marking,
-     or a p_ref for trace forwarding. The latter can be a root set pointer */
-  Vector_Block* trace_task = (Vector_Block*)collector->trace_stack;  
-  vector_stack_push(trace_task, (unsigned int)p_task);
-  
-  if( !vector_stack_is_full(trace_task)) return;
-    
-  pool_put_entry(gc_metadata.mark_task_pool, trace_task);
-  collector->trace_stack = pool_get_entry(gc_metadata.free_task_pool);  
-  assert(collector->trace_stack);
-}
-
-void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref)
-{
-  assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address()); 
-  
-  Vector_Block* root_set = gc->root_set;  
-  vector_block_add_entry(root_set, (unsigned int)p_ref);
-  
-  if( !vector_block_is_full(root_set)) return;
-    
-  pool_put_entry(gc_metadata.gc_rootset_pool, root_set);
-  gc->root_set = pool_get_entry(gc_metadata.free_set_pool);  
-  assert(gc->root_set);
-}
-
-
-static void gc_update_repointed_sets(GC* gc, Pool* pool)
+void gc_reset_rootset(GC* gc)
 {
-  GC_Metadata* metadata = gc->metadata;
-  
-  /* NOTE:: this is destructive to the root sets. */
-  Vector_Block* root_set = pool_get_entry(pool);
-
-  while(root_set){
-    unsigned int* iter = vector_block_iterator_init(root_set);
-    while(!vector_block_iterator_end(root_set,iter)){
-      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
-      iter = vector_block_iterator_advance(root_set,iter);
-
-      Partial_Reveal_Object* p_obj = *p_ref;
-      /* For repset, this check is unnecessary, since all slots are repointed; otherwise
-         they will not be recorded. For root set, it is possible to point to LOS or other
-         non-moved space.  */
-#ifdef _DEBUG
-      if( pool != metadata->gc_rootset_pool)
-        assert(obj_is_forwarded_in_obj_info(p_obj));
-      else
-#endif
-      if(!obj_is_forwarded_in_obj_info(p_obj)) continue;
-      *p_ref = get_forwarding_pointer_in_obj_info(p_obj);
-    }
-    vector_block_clear(root_set);
-    pool_put_entry(metadata->free_set_pool, root_set);
-    root_set = pool_get_entry(pool);
-  } 
+  assert(pool_is_empty(gc_metadata.gc_rootset_pool));
+  assert(gc->root_set == NULL);
+  gc->root_set = free_set_pool_get_entry(&gc_metadata); 
   
+  assert(vector_block_is_empty(gc->root_set)); 
   return;
-}
+}  
 
-void gc_update_repointed_refs(Collector* collector)
-{  
-  GC* gc = collector->gc;  
-  GC_Metadata* metadata = gc->metadata;
+void gc_clear_remset(GC* gc)
+{
+  assert(gc->root_set != NULL);
 
-  /* generational MINOR_COLLECTION doesn't need rootset update */
-  if( !gc_requires_barriers() || gc->collect_kind == MAJOR_COLLECTION ){
-    gc_update_repointed_sets(gc, metadata->gc_rootset_pool);
-    gc_update_repointed_sets(gc, metadata->collector_repset_pool);   
+  Pool* pool = gc_metadata.gc_rootset_pool;    
+  Vector_Block* rem_set = pool_get_entry(pool);
+  while(rem_set != gc->root_set){
+    vector_block_clear(rem_set);
+    pool_put_entry(gc_metadata.free_set_pool, rem_set);
+    rem_set = pool_get_entry(pool);
   }
-  
-  gc_update_finalizer_weakref_repointed_refs(gc);
-  update_rootset_interior_pointer();
+ 
+  assert(rem_set == gc->root_set);
+  /* put back root set */
+  pool_put_entry(pool, rem_set);
     
   return;
-}
-
-void gc_reset_rootset(GC* gc)
-{
-  assert(pool_is_empty(gc_metadata.gc_rootset_pool));
-  gc->root_set = pool_get_entry(gc_metadata.free_set_pool); 
-  
-  assert(vector_block_is_empty(gc->root_set)); 
-  return;
-}  
+} 
 
+extern Boolean verify_live_heap;
 void gc_metadata_verify(GC* gc, Boolean is_before_gc)
 {
   GC_Metadata* metadata = gc->metadata;
@@ -304,12 +327,17 @@
   assert(pool_is_empty(metadata->collector_repset_pool));
   assert(pool_is_empty(metadata->mark_task_pool));
   
-  if(!is_before_gc || !gc_requires_barriers())
+  if(!is_before_gc || !gc_is_gen_mode())
     assert(pool_is_empty(metadata->mutator_remset_pool));
   
-  if(!gc_requires_barriers()){
+  if(!gc_is_gen_mode()){
     /* FIXME:: even for gen gc, it should be empty if NOS is forwarding_all */  
     assert(pool_is_empty(metadata->collector_remset_pool));
+  }
+
+  if(verify_live_heap ){
+    unsigned int free_pool_size = pool_size(metadata->free_set_pool);
+    printf("===========%s, free_pool_size = %d =============\n", is_before_gc?"before GC":"after GC", free_pool_size);
   }
   
   return;  

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h Thu Jan 11 05:57:16 2007
@@ -23,11 +23,16 @@
 #include "gc_common.h"
 #include "../utils/vector_block.h"
 #include "../utils/sync_pool.h"
+#include "../thread/collector.h"
+#include "../thread/mutator.h"
 
-typedef struct GC_Metadata{  
-  void* heap_start;
-  void* heap_end;
-  
+#define GC_METADATA_SEGMENT_NUM 128
+
+typedef struct GC_Metadata{
+  void *segments[GC_METADATA_SEGMENT_NUM]; /* address array of malloced segments for free pool */
+  unsigned int num_alloc_segs; /* next available position in pool_segments array */
+  SpinLock  alloc_lock;
+    
   Pool* free_task_pool; /* list of free buffers for mark tasks */
   Pool* mark_task_pool; /* list of mark tasks */
   
@@ -40,20 +45,131 @@
       
 }GC_Metadata;
 
+extern GC_Metadata gc_metadata;
+
 void gc_metadata_initialize(GC* gc);
 void gc_metadata_destruct(GC* gc);
 void gc_metadata_verify(GC* gc, Boolean is_before_gc);
 
 void gc_set_rootset(GC* gc);
 void gc_reset_rootset(GC* gc);
-void gc_update_repointed_refs(Collector* collector);
+void gc_fix_rootset(Collector* collector);
+
+void gc_clear_remset(GC* gc);
+inline void  gc_task_pool_clear(Pool* task_pool)
+{
+  Vector_Block* task = pool_get_entry(task_pool);
+  while(task){
+    vector_stack_clear(task);  
+    pool_put_entry(gc_metadata.free_task_pool, task);
+    task = pool_get_entry(task_pool);
+  } 
+  return;
+}
+
+inline void  gc_set_pool_clear(Pool* set_pool)
+{
+  Vector_Block* set = pool_get_entry(set_pool);
+  while(set){
+    vector_block_clear(set);  
+    pool_put_entry(gc_metadata.free_set_pool, set);
+    set = pool_get_entry(set_pool);
+  } 
+  return;  
+}
+
+Vector_Block* gc_metadata_extend(Pool* pool);
+
+inline Vector_Block *free_set_pool_get_entry(GC_Metadata *metadata)
+{
+  Vector_Block *block = pool_get_entry(metadata->free_set_pool);
+
+  while(!block)
+      block = gc_metadata_extend(metadata->free_set_pool);
+  
+  assert(vector_block_is_empty(block));
+  return block;
+}
+
+inline Vector_Block *free_task_pool_get_entry(GC_Metadata *metadata)
+{
+  Vector_Block *block = pool_get_entry(metadata->free_task_pool);
+
+  while(!block)
+      block = gc_metadata_extend(metadata->free_task_pool);
+   
+  assert(vector_stack_is_empty(block));
+  return block;
+}
+
+inline void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_ref)
+{
+  assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); 
 
-void collector_tracestack_push(Collector* collector, void* p_task);
+  Vector_Block* root_set = mutator->rem_set;  
+  vector_block_add_entry(root_set, (unsigned int)p_ref);
+  
+  if( !vector_block_is_full(root_set)) return;
+    
+  pool_put_entry(gc_metadata.mutator_remset_pool, root_set);
+  mutator->rem_set = free_set_pool_get_entry(&gc_metadata);  
+  assert(mutator->rem_set);
+}
+
+inline void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+//  assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); 
 
-void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_slot);
-void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_slot);
-void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_slot);
+  Vector_Block* root_set = collector->rep_set;  
+  vector_block_add_entry(root_set, (unsigned int)p_ref);
+  
+  if( !vector_block_is_full(root_set)) return;
+    
+  pool_put_entry(gc_metadata.collector_repset_pool, root_set);
+  collector->rep_set = free_set_pool_get_entry(&gc_metadata);  
+  assert(collector->rep_set);
+}
+
+inline void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+  //assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); 
 
-void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_slot);
+  Vector_Block* root_set = collector->rem_set;  
+  vector_block_add_entry(root_set, (unsigned int)p_ref);
+  
+  if( !vector_block_is_full(root_set)) return;
+    
+  pool_put_entry(gc_metadata.collector_remset_pool, root_set);
+  collector->rem_set = free_set_pool_get_entry(&gc_metadata);  
+  assert(collector->rem_set);
+}
+
+inline void collector_tracestack_push(Collector* collector, void* p_task)
+{
+  /* we don't have assert as others because p_task is a p_obj for marking,
+     or a p_ref for trace forwarding. The latter can be a root set pointer */
+  Vector_Block* trace_task = (Vector_Block*)collector->trace_stack;  
+  vector_stack_push(trace_task, (unsigned int)p_task);
+  
+  if( !vector_stack_is_full(trace_task)) return;
+    
+  pool_put_entry(gc_metadata.mark_task_pool, trace_task);
+  collector->trace_stack = free_task_pool_get_entry(&gc_metadata);  
+  assert(collector->trace_stack);
+}
+
+inline void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref)
+{
+  assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address()); 
+  
+  Vector_Block* root_set = gc->root_set;  
+  vector_block_add_entry(root_set, (unsigned int)p_ref);
+  
+  if( !vector_block_is_full(root_set)) return;
+    
+  pool_put_entry(gc_metadata.gc_rootset_pool, root_set);
+  gc->root_set = free_set_pool_get_entry(&gc_metadata);  
+  assert(gc->root_set);
+}
 
 #endif /* #ifndef _GC_METADATA_H_ */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h?view=diff&rev=495225&r1=495224&r2=495225
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h Thu Jan 11 05:57:16 2007
@@ -21,12 +21,26 @@
 #ifndef _GC_PLATFORM_H_
 #define _GC_PLATFORM_H_
 
+#include <assert.h>
+#include <ctype.h>
 
 #include <apr_time.h>
 #include <apr_atomic.h>
 
 #include <open/hythread_ext.h>
 
+
+#ifndef _DEBUG
+
+//#define RELEASE_DEBUG
+
+#ifdef RELEASE_DEBUG
+#undef assert
+#define assert(x) do{ if(!(x)) __asm{int 3}}while(0)
+#endif
+
+#endif //_DEBUG
+
 #define USEC_PER_SEC INT64_C(1000000)
 
 #define VmThreadHandle  void*
@@ -49,19 +63,13 @@
 }
 
 inline int vm_create_event(VmEventHandle* event)
-{
-  return hysem_create(event, 0, 1);
-}
+{  return hysem_create(event, 0, 1); }
 
 inline void vm_thread_yield()
-{
-  hythread_yield();
-}
+{  hythread_yield(); }
 
 inline void* vm_thread_local()
-{
-  return hythread_self();  
-}
+{  return hythread_self();  }
 
 inline int vm_create_thread(int (*func)(void*), void *data)
 { 
@@ -80,34 +88,127 @@
 
 inline uint32 atomic_cas32(volatile apr_uint32_t *mem,
                                            apr_uint32_t swap,
-                                           apr_uint32_t cmp) {
-  return (uint32)apr_atomic_cas32(mem, swap, cmp);
+                                           apr_uint32_t cmp) 
+{  return (uint32)apr_atomic_cas32(mem, swap, cmp); }
+
+inline uint32 atomic_inc32(volatile apr_uint32_t *mem)
+{  return (uint32)apr_atomic_inc32(mem); }
+
+inline uint32 atomic_dec32(volatile apr_uint32_t *mem)
+{  return (uint32)apr_atomic_dec32(mem); }
+
+inline uint32 atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val) 
+{  return (uint32)apr_atomic_add32(mem, val); }
+
+inline Boolean pool_create(apr_pool_t **newpool, apr_pool_t *parent) 
+{  return (Boolean)apr_pool_create(newpool, parent);}
+
+inline void pool_destroy(apr_pool_t *p) 
+{  apr_pool_destroy(p); }
+
+#ifndef _WIN32
+#include <sys/mman.h>
+#endif
+
+inline void *vm_map_mem(void* start, unsigned int size) 
+{
+  void* address;
+#ifdef _WIN32
+  address = VirtualAlloc(start, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+#else
+  address = mmap(start, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+  if(address == MAP_FAILED) address = NULL;
+    
+#endif /* ifdef _WIN32 else */
+
+  return address;
 }
 
-inline uint32 atomic_inc32(volatile apr_uint32_t *mem){
-  return (uint32)apr_atomic_inc32(mem);
+inline Boolean vm_unmap_mem(void* start, unsigned int size) 
+{
+  unsigned int result;
+#ifdef _WIN32
+  result = VirtualFree(start, 0, MEM_RELEASE);
+#else
+  result = munmap(start, size);
+  if(result == -1) result = 0;
+    
+#endif /* ifdef _WIN32 else */
+
+  return result;
 }
 
-inline uint32 atomic_dec32(volatile apr_uint32_t *mem){
-  return (uint32)apr_atomic_dec32(mem);
+inline void *vm_alloc_mem(void* start, unsigned int size) 
+{
+  void* address;
+#ifdef _WIN32
+  address = VirtualAlloc(start, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+#else
+  address = mmap(start, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+  if(address == MAP_FAILED) address = NULL;
+    
+#endif /* ifdef _WIN32 else */
+
+  return address;
 }
 
-inline uint32 atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val) {
-  return (uint32)apr_atomic_add32(mem, val);
+inline Boolean vm_free_mem(void* start, unsigned int size) 
+{
+  return vm_unmap_mem(start, size);
 }
 
-inline Boolean pool_create(apr_pool_t **newpool, apr_pool_t *parent) {
-  return (Boolean)apr_pool_create(newpool, parent);
+inline void *vm_reserve_mem(void* start, unsigned int size) 
+{
+  void* address;
+#ifdef _WIN32
+  address = VirtualAlloc(start, size, MEM_RESERVE, PAGE_READWRITE);
+#else
+  address = mmap(start, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+  if(address == MAP_FAILED) address = NULL;
+    
+#endif /* ifdef _WIN32 else */
+
+  return address;
 }
 
-inline void pool_destroy(apr_pool_t *p) {
-  apr_pool_destroy(p);
+inline void *vm_commit_mem(void* start, unsigned int size) 
+{
+  void* address;
+#ifdef _WIN32
+  address = VirtualAlloc(start, size, MEM_COMMIT, PAGE_READWRITE);
+#else
+    
+#endif /* ifdef _WIN32 else */
+
+  return address;
 }
 
+inline Boolean vm_decommit_mem(void* start, unsigned int size) 
+{
+  unsigned int result;
+#ifdef _WIN32
+  result = VirtualFree(start, size, MEM_DECOMMIT);
+#else
+    
+#endif /* ifdef _WIN32 else */
 
-inline int64 time_now() {
-  return apr_time_now();
+  return result;
 }
+
+inline int64 time_now() 
+{  return apr_time_now(); }
+
+inline void string_to_upper(char* s)
+{
+  while(*s){
+    *s = toupper(*s);
+    s++;
+  }
+}  
+
+#ifdef PLATFORM_POSIX
+#define max(x, y) ((x)>(y)?(x):(y))
+#endif
 
 typedef volatile unsigned int SpinLock;