You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by wj...@apache.org on 2006/11/19 23:16:27 UTC

svn commit: r476946 [2/2] - in /harmony/enhanced/drlvm/trunk: build/make/components/vm/ vm/gc_gen/src/common/ vm/gc_gen/src/gen/ vm/gc_gen/src/mark_compact/ vm/gc_gen/src/mark_sweep/ vm/gc_gen/src/thread/ vm/gc_gen/src/trace_forward/

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.h Sun Nov 19 14:16:25 2006
@@ -22,6 +22,7 @@
 #define _COLLECTOR_H_
 
 #include "../common/gc_common.h"
+struct Block_Header;
 
 typedef struct Collector{
   /* <-- first couple of fields are overloaded as Allocator */
@@ -33,18 +34,27 @@
   VmThreadHandle thread_handle;   /* This thread; */
   /* End of Allocator --> */
 
+  /* FIXME:: for testing */
   Space* collect_space;
-  /* collector has remsets to remember those stored during copying */  
-  RemslotSet* last_cycle_remset;   /* remembered in last cycle, used in this cycle as roots */
-  RemslotSet* this_cycle_remset;   /* remembered in this cycle, will switch with last_remslot */
 
   TraceStack *trace_stack;
-  MarkStack *mark_stack;
+  MarkStack* mark_stack;
+  
+  Vector_Block* rep_set; /* repointed set */
+  Vector_Block* rem_set;
   
   VmEventHandle task_assigned_event;
   VmEventHandle task_finished_event;
   
+  Block_Header* cur_compact_block;
+  Block_Header* cur_target_block;
+  
+  /* during compaction, save non-zero obj_info who's overwritten by forwarding pointer */
+  ObjectMap*  obj_info_map; 
+
   void(*task_func)(void*) ;   /* current task */
+  
+  unsigned int result;
  
 }Collector;
 
@@ -55,5 +65,8 @@
 void collector_execute_task(GC* gc, TaskType task_func, Space* space);
 
 Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj);
+
+void gc_restore_obj_info(GC* gc);
+
 
 #endif //#ifndef _COLLECTOR_H_

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp Sun Nov 19 14:16:25 2006
@@ -25,18 +25,18 @@
 Space* gc_get_nos(GC_Gen* gc);
 void mutator_initialize(GC* gc, void *gc_information) 
 {
-  /* FIXME:: NOTE: gc_info is uncleared */
+  /* FIXME:: make sure gc_info is cleared */
   Mutator *mutator = (Mutator *) gc_information;
   mutator->free = NULL;
   mutator->ceiling = NULL;
   mutator->alloc_block = NULL;
   mutator->alloc_space = gc_get_nos((GC_Gen*)gc);
   mutator->gc = gc;
-  
-  assert(mutator->remslot == NULL);
-  mutator->remslot = new RemslotSet();
-  mutator->remslot->clear();
     
+  if(gc_requires_barriers()){
+    mutator->rem_set = pool_get_entry(gc->metadata->free_set_pool);
+  }
+       
   lock(gc->mutator_list_lock);     // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
 
   mutator->next = (Mutator *)gc->mutator_list;
@@ -53,11 +53,12 @@
 
   Mutator *mutator = (Mutator *)gc_information;
 
-  lock(gc->mutator_list_lock);     // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
+  if(gc_requires_barriers()){ /* put back the remset when a mutator exits */
+    pool_put_entry(gc->metadata->gc_rootset_pool, mutator->rem_set);
+    mutator->rem_set = NULL;
+  }
 
-  Fspace* fspace = (Fspace*)mutator->alloc_space;
-  fspace->remslot_sets->push_back(mutator->remslot);
-  mutator->remslot = NULL;
+  lock(gc->mutator_list_lock);     // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
 
   volatile Mutator *temp = gc->mutator_list;
   if (temp == mutator) {  /* it is at the head of the list */
@@ -75,5 +76,4 @@
   gc->num_mutators--;
   return;
 }
-
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h Sun Nov 19 14:16:25 2006
@@ -34,8 +34,8 @@
   VmThreadHandle thread_handle;   /* This thread; */
   /* END of Allocator --> */
   
-  RemslotSet *remslot;
-  Mutator *next;  /* The gc info area associated with the next active thread. */
+  Vector_Block* rem_set;
+  Mutator* next;  /* The gc info area associated with the next active thread. */
 } Mutator;
 
 void mutator_initialize(GC* gc, void* tls_gc_info);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/thread_alloc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/thread_alloc.h?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/thread_alloc.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/thread_alloc.h Sun Nov 19 14:16:25 2006
@@ -22,6 +22,7 @@
 #define _THREAD_ALLOC_H_
 
 #include "../common/gc_block.h"
+#include "../common/gc_metadata.h"
 
 typedef struct Allocator{
   void *free;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.cpp Sun Nov 19 14:16:25 2006
@@ -30,32 +30,6 @@
 Boolean forward_first_half;;
 void* object_forwarding_boundary=NULL;
 
-void fspace_save_reloc(Fspace* fspace, Partial_Reveal_Object** p_ref)
-{
-  Block_Header* block = GC_BLOCK_HEADER(p_ref);
-  block->reloc_table->push_back(p_ref);
-  return;
-}
-
-void  fspace_update_reloc(Fspace* fspace)
-{
-  SlotVector* reloc_table;
-  /* update refs in fspace */
-  Block* blocks = fspace->blocks;
-  for(unsigned int i=0; i < fspace->num_managed_blocks; i++){
-    Block_Header* block = (Block_Header*)&(blocks[i]);
-    reloc_table = block->reloc_table;
-    for(unsigned int j=0; j < reloc_table->size(); j++){
-      Partial_Reveal_Object** p_ref = (*reloc_table)[j];
-      Partial_Reveal_Object* p_target_obj = get_forwarding_pointer_in_obj_info(*p_ref);
-      *p_ref = p_target_obj;
-    }
-    reloc_table->clear();
-  }
-  
-  return;  
-}  
-
 Boolean fspace_mark_object(Fspace* fspace, Partial_Reveal_Object *p_obj)
 {  
   obj_mark_in_vt(p_obj);
@@ -66,48 +40,20 @@
   unsigned int *p_word = &(GC_BLOCK_HEADER(p_obj)->mark_table[obj_word_index]);
   unsigned int word_mask = (1<<obj_offset_in_word);
 	
-  unsigned int result = (*p_word)|word_mask;
-	
-  if( result==(*p_word) ) return FALSE;
-  
-  *p_word = result; 
-  
-   return TRUE;
-}
-
-Boolean fspace_object_is_marked(Partial_Reveal_Object *p_obj, Fspace* fspace)
-{
-  assert(p_obj);
+  unsigned int old_value = *p_word;
+  unsigned int new_value = old_value|word_mask;
   
-#ifdef _DEBUG //TODO:: Cleanup
-  unsigned int obj_word_index = OBJECT_WORD_INDEX_TO_MARKBIT_TABLE(p_obj);
-  unsigned int obj_offset_in_word = OBJECT_WORD_OFFSET_IN_MARKBIT_TABLE(p_obj); 	
-	
-  unsigned int *p_word = &(GC_BLOCK_HEADER(p_obj)->mark_table[obj_word_index]);
-  unsigned int word_mask = (1<<obj_offset_in_word);
-	
-  unsigned int result = (*p_word)|word_mask;
-	
-  if( result==(*p_word) )
-    assert( obj_is_marked_in_vt(p_obj));
-  else 
-    assert(!obj_is_marked_in_vt(p_obj));
-    
-#endif
-
-  return (obj_is_marked_in_vt(p_obj));
-    
+  while(old_value != new_value){
+    unsigned int temp = atomic_cas32(p_word, new_value, old_value);
+    if(temp == old_value) return TRUE;
+    old_value = *p_word;
+    new_value = old_value|word_mask;
+  }
+  return FALSE;
 }
 
 static void fspace_destruct_blocks(Fspace* fspace)
-{ 
-  Block* blocks = (Block*)fspace->blocks; 
-  for(unsigned int i=0; i < fspace->num_managed_blocks; i++){
-    Block_Header* block = (Block_Header*)&(blocks[i]);
-    delete block->reloc_table;
-    block->reloc_table = NULL;
-  }
-  
+{   
   return;
 }
 
@@ -123,7 +69,6 @@
     block->base = block->free;
     block->block_idx = i + start_idx;
     block->status = BLOCK_FREE;  
-    block->reloc_table = new SlotVector();
     last_block->next = block;
     last_block = block;
   }
@@ -163,10 +108,7 @@
   
   fspace_init_blocks(fspace);
   
-  fspace->obj_info_map = new ObjectMap();
   fspace->mark_object_func = fspace_mark_object;
-  fspace->save_reloc_func = fspace_save_reloc;
-  fspace->update_reloc_func = fspace_update_reloc;
 
   fspace->move_object = TRUE;
   fspace->num_collections = 0;
@@ -174,10 +116,7 @@
   gc_set_nos((GC_Gen*)gc, (Space*)fspace);
   /* above is same as Mspace init --> */
   
-  fspace->remslot_sets = new std::vector<RemslotSet *>();
-  fspace->rem_sets_lock = FREE_LOCK;
-
-  nos_boundary = fspace->heap_end;
+  nos_boundary = fspace->heap_start;
 
   forward_first_half = TRUE;
   object_forwarding_boundary = (void*)&fspace->blocks[fspace->first_block_idx + (unsigned int)(fspace->num_managed_blocks * NURSERY_OBJECT_FORWARDING_RATIO)];
@@ -216,7 +155,7 @@
   unsigned int last_idx = fspace->ceiling_block_idx;
   Block* blocks = fspace->blocks;
   unsigned int num_freed = 0;
-  for(unsigned int i = first_idx; i <= last_idx; i++){
+  for(unsigned int i = 0; i <= last_idx-first_idx; i++){
     Block_Header* block = (Block_Header*)&(blocks[i]);
     if(block->status == BLOCK_FREE) continue;
     block_clear_mark_table(block); 
@@ -236,6 +175,8 @@
   fspace->num_collections++;  
   
   GC* gc = fspace->gc;
+
+  pool_iterator_init(gc->metadata->gc_rootset_pool);
 
   if(gc_requires_barriers()){ 
     /* generational GC. Only trace (mark) nos */

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace.h Sun Nov 19 14:16:25 2006
@@ -44,8 +44,6 @@
   GC* gc;
   Boolean move_object;
   Boolean (*mark_object_func)(Fspace* space, Partial_Reveal_Object* p_obj);
-  void (*save_reloc_func)(Fspace* space, Partial_Reveal_Object** p_ref);
-  void (*update_reloc_func)(Fspace* space);
   /* END of Space --> */
 
   Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
@@ -58,16 +56,8 @@
   unsigned int num_used_blocks;
   unsigned int num_managed_blocks;
   unsigned int num_total_blocks;
-
-  /* during compaction, save non-zero obj_info who's overwritten by forwarding pointer */
-  ObjectMap*  obj_info_map; 
   /* END of Blocked_Space --> */
-    
-  /* saved remsets of collectors */
-  /* saved remsets of exited mutators */
-  std::vector<RemslotSet *> *remslot_sets;
-  SpinLock rem_sets_lock;
-  
+      
 } Fspace;
 
 void fspace_initialize(GC* gc, void* start, unsigned int fspace_size);
@@ -81,15 +71,8 @@
 void* fspace_alloc(unsigned size, Allocator *allocator);
 
 Boolean fspace_mark_object(Fspace* fspace, Partial_Reveal_Object *p_obj);
-void fspace_save_reloc(Fspace* fspace, Partial_Reveal_Object** p_ref);
-void fspace_update_reloc(Fspace* fspace);
-void reset_fspace_for_allocation(Fspace* fspace);
 
-inline Block_Header* fspace_get_first_copy_block(Fspace* fspace)
-{  return (Block_Header*)fspace->blocks; }
-
-inline Block_Header* fspace_get_next_copy_block(Fspace* fspace, Block_Header* block)
-{  return block->next; }
+void reset_fspace_for_allocation(Fspace* fspace);
 
 
 Boolean fspace_compute_object_target(Collector* collector, Fspace* fspace);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp Sun Nov 19 14:16:25 2006
@@ -23,26 +23,54 @@
 #include "../mark_sweep/lspace.h"
 #include "../thread/collector.h"
 
+static volatile Block_Header* current_copy_block;
+static volatile Block_Header* current_target_block;
+
+static Block_Header* fspace_get_first_copy_block(Fspace* fspace)
+{  return (Block_Header*)fspace->blocks; }
+
+static Block_Header* fspace_get_next_copy_block(Fspace* fspace)
+{  
+  /* FIXME::FIXME:: this only works for full space copying */
+  Block_Header* cur_copy_block = (Block_Header*)current_copy_block;
+  
+  while(cur_copy_block != NULL){
+    Block_Header* next_copy_block = current_copy_block->next;
+
+    Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)&current_copy_block, next_copy_block, cur_copy_block);
+    if(temp == cur_copy_block)
+      return cur_copy_block;
+      
+    cur_copy_block = (Block_Header*)current_copy_block;
+  }
+  /* run out fspace blocks for copying */
+  return NULL;
+}
+
+
 /* copying of fspace is only for MAJOR_COLLECTION or non-generational partial copy collection */
 static Block_Header* mspace_get_first_target_block_for_nos(Mspace* mspace)
 {  
   return (Block_Header*)&mspace->blocks[mspace->free_block_idx-mspace->first_block_idx];
 }
 
-static Block_Header* mspace_get_next_target_block_for_nos(Mspace* mspace, Block_Header* block)
-{ return block->next; }
-
-static void fspace_restore_obj_info(Fspace* fspace)
-{
-  ObjectMap* objmap = fspace->obj_info_map;
-  ObjectMap::iterator obj_iter;
-  for( obj_iter=objmap->begin(); obj_iter!=objmap->end(); obj_iter++){
-    Partial_Reveal_Object* p_target_obj = obj_iter->first;
-    Obj_Info_Type obj_info = obj_iter->second;
-    set_obj_info(p_target_obj, obj_info);     
+static Block_Header* mspace_get_next_target_block_for_nos(Mspace* mspace)
+{ 
+  Block_Header* mspace_heap_end = (Block_Header*)space_heap_end((Space*)mspace);
+  Block_Header* cur_target_block = (Block_Header*)current_target_block;
+  Block_Header* next_target_block = current_target_block->next;
+  
+  while(cur_target_block < mspace_heap_end){
+    Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)&current_target_block, next_target_block, cur_target_block);
+    if(temp == cur_target_block)
+      return cur_target_block;
+      
+    cur_target_block = (Block_Header*)current_target_block;
+    next_target_block = current_target_block->next;     
   }
-  objmap->clear();
-  return;  
+  /* mos is always able to hold nos in minor collection */
+  assert(0);
+  return NULL;
 }
 
 struct GC_Gen;
@@ -51,11 +79,13 @@
 Boolean fspace_compute_object_target(Collector* collector, Fspace* fspace)
 {  
   Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)collector->gc);
-  Block_Header* dest_block = mspace_get_first_target_block_for_nos(mspace);    
-  Block_Header* curr_block = fspace_get_first_copy_block(fspace);
+  Block_Header* dest_block = mspace_get_next_target_block_for_nos(mspace);    
+  Block_Header* curr_block = fspace_get_next_copy_block(fspace);
 
+  assert(dest_block->status == BLOCK_FREE);
+  dest_block->status = BLOCK_USED;
   void* dest_addr = GC_BLOCK_BODY(dest_block);
- 
+  
   while( curr_block ){
     unsigned int mark_bit_idx;
     Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &mark_bit_idx);
@@ -67,15 +97,17 @@
       
       if( ((unsigned int)dest_addr + obj_size) > (unsigned int)GC_BLOCK_END(dest_block)){
         dest_block->free = dest_addr;
-        dest_block = mspace_get_next_target_block_for_nos(mspace, dest_block);
+        dest_block = mspace_get_next_target_block_for_nos(mspace);
         if(dest_block == NULL) return FALSE;
+        assert(dest_block->status == BLOCK_FREE);
+        dest_block->status = BLOCK_USED;
         dest_addr = GC_BLOCK_BODY(dest_block);
       }
       assert(((unsigned int)dest_addr + obj_size) <= (unsigned int)GC_BLOCK_END(dest_block));
       
       Obj_Info_Type obj_info = get_obj_info(p_obj);
       if( obj_info != 0 ) {
-        fspace->obj_info_map->insert(ObjectMap::value_type((Partial_Reveal_Object*)dest_addr, obj_info));
+        collector->obj_info_map->insert(ObjectMap::value_type((Partial_Reveal_Object*)dest_addr, obj_info));
       }
       set_forwarding_pointer_in_obj_info(p_obj, dest_addr);
 
@@ -84,11 +116,9 @@
       p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx);
   
     }
-    curr_block = fspace_get_next_copy_block(fspace, curr_block);
+    curr_block = fspace_get_next_copy_block(fspace);
   }
-  
-  mspace->free_block_idx = dest_block->block_idx+1;
-  
+    
   return TRUE;
 }   
 
@@ -96,7 +126,7 @@
 
 void fspace_copy_collect(Collector* collector, Fspace* fspace) 
 {  
-  Block_Header* curr_block = fspace_get_first_copy_block(fspace);
+  Block_Header* curr_block = fspace_get_next_copy_block(fspace);
   
   while( curr_block ){
     unsigned int mark_bit_idx;
@@ -119,36 +149,70 @@
       p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx);  
     }
         
-    curr_block = fspace_get_next_copy_block(fspace, curr_block);
+    curr_block = fspace_get_next_copy_block(fspace);
   }
-  
-  fspace_restore_obj_info(fspace);
-  reset_fspace_for_allocation(fspace);  
-  
+    
   return;
 }
 
-void gc_gen_update_repointed_refs(Collector* collector);
+void gc_update_repointed_refs(Collector* collector);
+
+static volatile unsigned int num_marking_collectors = 0;
+static volatile unsigned int num_installing_collectors = 0;
 
 void mark_copy_fspace(Collector* collector) 
 {  
   GC* gc = collector->gc;
   Fspace* fspace = (Fspace*)collector->collect_space;
-  
-  /* FIXME:: Single-threaded mark-copying for fspace currently */
+  Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
 
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  
   /* Pass 1: mark all live objects in heap, and save all the slots that 
              have references  that are going to be repointed */
-  mark_scan_heap(collector);
+  atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
+             
+  mark_scan_heap_par(collector);
+
+  unsigned int old_num = atomic_inc32(&num_marking_collectors);
+  if( ++old_num == num_active_collectors ){
+    /* world for single thread, e.g., verification of last phase, and preparation of next phase */
+    current_copy_block = fspace_get_first_copy_block(fspace);
+    current_target_block = mspace_get_first_target_block_for_nos(mspace);    
+    /* let other collectors go */
+    num_marking_collectors++; 
+  }
+  
+  while(num_marking_collectors != num_active_collectors + 1);
 
   /* Pass 2: assign each live fspace object a new location */
+  atomic_cas32( &num_installing_collectors, 0, num_active_collectors+1);
+
   fspace_compute_object_target(collector, fspace);  
 
-  gc_gen_update_repointed_refs(collector);
+  old_num = atomic_inc32(&num_installing_collectors);
+  if( ++old_num == num_active_collectors){
+    /* nothing to do in this single thread region */
+    mspace->free_block_idx = current_target_block->block_idx;
+    num_installing_collectors++; 
+  }
+  
+  while(num_installing_collectors != num_active_collectors + 1);
+
+  /* FIXME:: temporary. let only one thread go forward */
+  if( collector->thread_handle != 0 ) return;
+  
+  gc_update_repointed_refs(collector);
 
   /* FIXME:: Pass 2 and 3 can be merged into one pass */
   /* Pass 3: copy live fspace object to new location */
-  fspace_copy_collect(collector, fspace);        
+  current_copy_block = fspace_get_first_copy_block(fspace);
+  fspace_copy_collect(collector, fspace);
+          
+  /* FIXME:: should be collector_restore_obj_info(collector) */
+  gc_restore_obj_info(gc);
+  
+  reset_fspace_for_allocation(fspace);  
     
   return;
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp?view=diff&rev=476946&r1=476945&r2=476946
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp Sun Nov 19 14:16:25 2006
@@ -20,6 +20,7 @@
 
 #include "fspace.h"
 #include "../thread/collector.h"
+#include "../common/gc_metadata.h"
 
 static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
 {
@@ -99,9 +100,10 @@
   if (!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) {
     assert(!obj_is_forwarded_in_vt(p_obj));
     /* this obj remains in fspace, remember its ref slot for next GC. */
-    if( !address_belongs_to_space(p_ref, space) )
-      collector->this_cycle_remset->push_back(p_ref); 
-
+    if( !address_belongs_to_space(p_ref, space) ){
+      collector_remset_add_entry(collector, p_ref); 
+    }
+    
     if(fspace_mark_object((Fspace*)space, p_obj)) 
       scan_object(collector, p_obj);
     
@@ -145,29 +147,35 @@
   }
 }
 
-static void collector_trace_remsets(Collector* collector)
+static void collector_trace_rootsets(Collector* collector)
 {
-  Fspace* fspace = (Fspace*)collector->collect_space;
+  GC_Metadata* metadata = collector->gc->metadata;  
   
+  Space* space = collector->collect_space;
   HashSet remslot_hash;
 
   /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */
-  for(unsigned int i=0; i< fspace->remslot_sets->size(); i++) {
-    RemslotSet* remslot = (*fspace->remslot_sets)[i];
-    for (unsigned int j = 0; j < remslot->size(); j++) {
-      Partial_Reveal_Object **ref = (*remslot)[j];
-      assert(ref);
-      if(*ref == NULL) continue;  
-      if (obj_belongs_to_space(*ref, (Space*)fspace)) {
-        if (remslot_hash.find(ref) == remslot_hash.end()) {
-          remslot_hash.insert(ref);
-          trace_root(collector, ref);
+  pool_iterator_init(metadata->gc_rootset_pool);
+  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+  while(root_set){    
+    unsigned int* iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+
+      assert(p_ref);
+      if(*p_ref == NULL) continue;  
+      if (obj_belongs_to_space(*p_ref, space)) {
+        if (remslot_hash.find(p_ref) == remslot_hash.end()) {
+          remslot_hash.insert(p_ref);
+          trace_root(collector, p_ref);
         }
       }
     }
-    remslot->clear();  
+    pool_put_entry(metadata->free_set_pool, root_set);
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
   }
-  fspace->remslot_sets->clear();
     
   return;
 }
@@ -186,8 +194,7 @@
   
   /* FIXME:: Single-threaded trace-forwarding for fspace currently */
 
-  space->remslot_sets->push_back(gc->root_set);
-  collector_trace_remsets(collector);
+  collector_trace_rootsets(collector);
 
   update_relocated_refs(collector);
   reset_fspace_for_allocation(space);