You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by xl...@apache.org on 2008/01/11 15:38:50 UTC

svn commit: r611186 [2/3] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ los/ mark_compact/ mark_sweep/ move_compact/ semi_space/ thread/ trace_forward/ utils/ verify/

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp Fri Jan 11 06:38:32 2008
@@ -22,6 +22,8 @@
 #include "gen.h"
 #include "gen_stats.h"
 
+Boolean gc_profile = FALSE;
+
 void gc_gen_stats_initialize(GC_Gen* gc)
 {
   GC_Gen_Stats* stats = (GC_Gen_Stats*)STD_MALLOC(sizeof(GC_Gen_Stats));
@@ -71,7 +73,7 @@
       gc_gen_stats->nos_surviving_obj_size_minor += collector_stats->nos_obj_size_moved_minor;
     }
 
-    gc_gen_stats->nos_surviving_ration_minor = ((float)gc_gen_stats->nos_surviving_obj_size_minor)/gc->nos->committed_heap_size;
+    gc_gen_stats->nos_surviving_ratio_minor = ((float)gc_gen_stats->nos_surviving_obj_size_minor)/gc->nos->committed_heap_size;
 
   }else{
 
@@ -91,7 +93,7 @@
   }
 
   if (is_los_collected) {
-    gc_gen_stats->los_surviving_ration = ((float)gc_gen_stats->los_suviving_obj_size)/gc->los->committed_heap_size;
+    gc_gen_stats->los_surviving_ratio = ((float)gc_gen_stats->los_suviving_obj_size)/gc->los->committed_heap_size;
   }
 }
 
@@ -104,7 +106,7 @@
       <<"\nGC: collection algo: "<<((stats->nos_collection_algo_minor==MINOR_NONGEN_FORWARD_POOL)?"nongen forward":"gen forward")
       <<"\nGC: num surviving objs: "<<stats->nos_surviving_obj_num_minor
       <<"\nGC: size surviving objs: "<<verbose_print_size(stats->nos_surviving_obj_size_minor)
-      <<"\nGC: surviving ratio: "<<(int)(stats->nos_surviving_ration_minor*100)<<"%\n");
+      <<"\nGC: surviving ratio: "<<(int)(stats->nos_surviving_ratio_minor*100)<<"%\n");
   }else{
     TRACE2("gc.space", "GC: Mspace Collection stats: "
       <<"\nGC: collection algo: "<<((stats->nos_mos_collection_algo_major==MAJOR_COMPACT_SLIDE)?"slide compact":"move compact")
@@ -118,7 +120,7 @@
       <<"\nGC: collection algo: "<<((stats->los_collection_algo==MAJOR_COMPACT_SLIDE)?"slide compact":"mark sweep")
       <<"\nGC: num surviving objs: "<<stats->los_suviving_obj_num
       <<"\nGC: size surviving objs: "<<verbose_print_size(stats->los_suviving_obj_size)
-      <<"\nGC: surviving ratio: "<<(int)(stats->los_surviving_ration*100)<<"%\n");
+      <<"\nGC: surviving ratio: "<<(int)(stats->los_surviving_ratio*100)<<"%\n");
   }
 
 }

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.h?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.h Fri Jan 11 06:38:32 2008
@@ -20,6 +20,8 @@
 
 #include "gen.h"
 
+extern Boolean gc_profile;
+
 typedef struct GC_Gen_Stats {
   unsigned int num_minor_collections;
   unsigned int num_major_collections;
@@ -37,7 +39,7 @@
   /*minor related info*/
   unsigned int nos_surviving_obj_num_minor;
   POINTER_SIZE_INT nos_surviving_obj_size_minor;
-  float nos_surviving_ration_minor;
+  float nos_surviving_ratio_minor;
   int nos_collection_algo_minor;
 
   /*major related info*/
@@ -50,7 +52,7 @@
   Boolean is_los_collected; /*whether large obj space is collected or not*/
   unsigned int los_suviving_obj_num;
   POINTER_SIZE_INT los_suviving_obj_size;
-  float los_surviving_ration;
+  float los_surviving_ratio;
   int los_collection_algo;
 
 }GC_Gen_Stats;

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_utils.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_utils.cpp?rev=611186&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_utils.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_utils.cpp Fri Jan 11 06:38:32 2008
@@ -0,0 +1,81 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "gen.h"
+
+#ifndef STATIC_NOS_MAPPING
+void* nos_space_adjust(Space* nos, void* new_nos_boundary, POINTER_SIZE_INT new_nos_size)
+{
+  if(nos->collect_algorithm == MINOR_NONGEN_SEMISPACE_POOL || nos->collect_algorithm == MINOR_GEN_SEMISPACE_POOL)
+    return sspace_heap_start_adjust((Sspace*)nos, new_nos_boundary, new_nos_size);
+  else if( nos->collect_algorithm == MINOR_NONGEN_FORWARD_POOL )
+    return fspace_heap_start_adjust((Fspace*)nos, new_nos_boundary, new_nos_size);  
+  
+  assert(0);
+  return NULL;
+}
+#endif
+
+POINTER_SIZE_INT mos_free_space_size(Space* mos)
+{
+  POINTER_SIZE_INT free_size = 0;
+  if( mos->collect_algorithm != MAJOR_MARK_SWEEP )
+    return mspace_free_space_size((Mspace*)mos);
+
+  assert(0);
+  return free_size; 
+}
+
+POINTER_SIZE_INT nos_free_space_size(Space* nos)
+{
+  POINTER_SIZE_INT free_size = 0;
+  if(nos->collect_algorithm == MINOR_NONGEN_SEMISPACE_POOL || nos->collect_algorithm == MINOR_GEN_SEMISPACE_POOL)
+    return sspace_free_space_size((Sspace*)nos);
+  else if( nos->collect_algorithm == MINOR_NONGEN_FORWARD_POOL )
+    return fspace_free_space_size((Fspace*)nos);
+
+  assert(0);
+  return free_size; 
+ 
+}
+
+POINTER_SIZE_INT mos_used_space_size(Space* mos)
+{
+  POINTER_SIZE_INT free_size = 0;
+  if( mos->collect_algorithm != MAJOR_MARK_SWEEP )
+    return mspace_used_space_size((Mspace*)mos);
+
+  assert(0);
+  return free_size; 
+}
+
+POINTER_SIZE_INT nos_used_space_size(Space* nos)
+{
+  POINTER_SIZE_INT free_size = 0;
+  if(nos->collect_algorithm == MINOR_NONGEN_SEMISPACE_POOL || nos->collect_algorithm == MINOR_GEN_SEMISPACE_POOL)
+    return sspace_used_space_size((Sspace*)nos);
+  else if( nos->collect_algorithm == MINOR_NONGEN_FORWARD_POOL )
+    return fspace_used_space_size((Fspace*)nos);
+
+  assert(0);
+  return free_size; 
+ 
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_utils.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.cpp Fri Jan 11 06:38:32 2008
@@ -61,7 +61,7 @@
   
   /* Else, for last bucket MAX_LIST_INDEX, we must traverse it */
   while(  area != (Free_Area*)list ){
-    if(area->size >= size)	return area;
+    if(area->size >= size)  return area;
     area = (Free_Area*)(area->next);
   }
   

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.h?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/free_area_pool.h Fri Jan 11 06:38:32 2008
@@ -38,7 +38,7 @@
   Bidir_List* next;
   Bidir_List* prev;
   /* END of Bidir_List --> */
-  SpinLock lock;	
+  SpinLock lock;  
 }Lockable_Bidir_List;
 
 typedef struct Free_Area{
@@ -120,7 +120,7 @@
   /* set bit flag of the list */
   Bidir_List* list = (Bidir_List*)&(pool->sized_area_list[index]);
   if(list->next == list){
-  	pool_list_clear_flag(pool, index);		
+    pool_list_clear_flag(pool, index);    
   }
 }
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp Fri Jan 11 06:38:32 2008
@@ -245,7 +245,7 @@
   void* dest_addr = lspace->heap_start;
   unsigned int iterate_index = 0;
   Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
-  
+  	
   assert(!collector->rem_set);
   collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
 #ifdef USE_32BITS_HASHCODE  
@@ -256,6 +256,7 @@
   GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
 #endif
   while( p_obj ){
+   
     assert( obj_is_marked_in_vt(p_obj));
     unsigned int obj_size = vm_object_size(p_obj);
 #ifdef GC_GEN_STATS
@@ -478,7 +479,6 @@
   TRACE2("gc.process", "GC: end of lspace sweep algo ...\n");
   return;
 }
-
 
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp Fri Jan 11 06:38:32 2008
@@ -259,4 +259,3 @@
 
 
 
-

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.cpp Fri Jan 11 06:38:32 2008
@@ -109,9 +109,8 @@
     block->src = NULL;
     block->next_src = NULL;
     assert(!block->dest_counter);
-
     if(i >= new_num_used){
-      block->status = BLOCK_FREE; 
+      block->status = BLOCK_FREE;
       block->free = GC_BLOCK_BODY(block);
     }
   }
@@ -172,6 +171,5 @@
 {
     return mspace->expected_threshold_ratio;
 }
-
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace.h Fri Jan 11 06:38:32 2008
@@ -74,6 +74,12 @@
 Mspace *mspace_initialize(GC* gc, void* reserved_base, POINTER_SIZE_INT mspace_size, POINTER_SIZE_INT commit_size);
 void mspace_destruct(Mspace* mspace);
 
+inline POINTER_SIZE_INT mspace_free_space_size(Mspace* mos)
+{ return blocked_space_free_mem_size((Blocked_Space*)mos);}
+
+inline POINTER_SIZE_INT mspace_used_space_size(Mspace* mos)
+{ return blocked_space_used_mem_size((Blocked_Space*)mos);}
+
 void* mspace_alloc(unsigned size, Allocator *allocator);
 void mspace_collection(Mspace* mspace);
 void mspace_reset_after_collection(Mspace* mspace);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_alloc.cpp Fri Jan 11 06:38:32 2008
@@ -74,4 +74,3 @@
 
 
 
-

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp Fri Jan 11 06:38:32 2008
@@ -36,12 +36,15 @@
 Space* gc_get_mos(GC_Gen* gc);
 Space* gc_get_los(GC_Gen* gc);
 
+extern Boolean verify_live_heap;
+volatile unsigned int debug_num_compact_blocks;
+
 static void mspace_move_objects(Collector* collector, Mspace* mspace) 
 {
   Block_Header* curr_block = collector->cur_compact_block;
   Block_Header* dest_block = collector->cur_target_block;
   Block_Header *local_last_dest = dest_block;
-  
+
   void* dest_sector_addr = dest_block->base;
   Boolean is_fallback = gc_match_kind(collector->gc, FALLBACK_COLLECTION);
   
@@ -55,7 +58,15 @@
   GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
 #endif
 
+  unsigned int debug_num_live_obj = 0;
+
   while( curr_block ){
+
+    if(verify_live_heap){ 
+      atomic_inc32(&debug_num_compact_blocks);
+      debug_num_live_obj = 0;
+    }
+    
     void* start_pos;
     Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos);
 
@@ -63,6 +74,7 @@
  #ifdef USE_32BITS_HASHCODE      
       hashcode_buf_clear(curr_block->hashcode_buf);
  #endif
+      assert(!verify_live_heap ||debug_num_live_obj == curr_block->num_live_objs);
       curr_block = mspace_get_next_compact_block(collector, mspace);
       continue;    
     }
@@ -71,9 +83,11 @@
     void* src_sector_addr = p_obj;
           
     while( p_obj ){
+
+      debug_num_live_obj++;
       assert( obj_is_marked_in_vt(p_obj));
       /* we don't check if it's set, since only remaining objs from last NOS partial collection need it. */
-      obj_unmark_in_oi(p_obj); 
+      obj_clear_dual_bits_in_oi(p_obj); 
 
 #ifdef GC_GEN_STATS
       gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, vm_object_size(p_obj));
@@ -119,9 +133,11 @@
       /* current sector is done, let's move it. */
       POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr;
       assert((sector_distance % GC_OBJECT_ALIGNMENT) == 0);
+      /* if sector_distance is zero, we don't do anything. But since block stable is never cleaned, we have to set 0 to it. */
       curr_block->table[curr_sector] = sector_distance;
 
-      memmove(dest_sector_addr, src_sector_addr, curr_sector_size);
+      if(sector_distance != 0) 
+        memmove(dest_sector_addr, src_sector_addr, curr_sector_size);
 
 #ifdef USE_32BITS_HASHCODE
       hashcode_buf_refresh_new_entry(new_hashcode_buf, sector_distance);
@@ -134,8 +150,10 @@
 #ifdef USE_32BITS_HASHCODE      
     hashcode_buf_clear(curr_block->hashcode_buf);
  #endif    
+    assert(!verify_live_heap ||debug_num_live_obj == curr_block->num_live_objs);
     curr_block = mspace_get_next_compact_block(collector, mspace);
   }
+    
   dest_block->new_free = dest_sector_addr;
   collector->cur_target_block = local_last_dest;
  
@@ -172,8 +190,8 @@
 {
   GC* gc = collector->gc;
   Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
-  Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc);
   Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
+  Blocked_Space* nos = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
   
   unsigned int num_active_collectors = gc->num_active_collectors;
   
@@ -205,7 +223,7 @@
     }
 #endif
     gc_identify_dead_weak_roots(gc);
-
+    debug_num_compact_blocks = 0;
     /* let other collectors go */
     num_marking_collectors++; 
   }
@@ -225,13 +243,20 @@
   old_num = atomic_inc32(&num_moving_collectors);
   if( ++old_num == num_active_collectors ){
     /* single thread world */
-    if(lspace->move_object) lspace_compute_object_target(collector, lspace);    
+    if(lspace->move_object) 
+      lspace_compute_object_target(collector, lspace);    
+    
     gc->collect_result = gc_collection_result(gc);
     if(!gc->collect_result){
       num_moving_collectors++; 
       return;
     }
- 
+    
+    if(verify_live_heap){
+      assert( debug_num_compact_blocks == mspace->num_managed_blocks + nos->num_managed_blocks );	
+      debug_num_compact_blocks = 0;
+    }
+
     gc_reset_block_for_collectors(gc, mspace);
     blocked_space_block_iterator_init((Blocked_Space*)mspace);
     num_moving_collectors++; 
@@ -256,6 +281,7 @@
     lspace_fix_repointed_refs(collector, lspace);   
     gc_fix_rootset(collector, FALSE);
     if(lspace->move_object)  lspace_sliding_compact(collector, lspace);    
+    
     num_fixing_collectors++; 
   }
   while(num_fixing_collectors != num_active_collectors + 1);
@@ -263,7 +289,8 @@
   TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass3");
 
   /* Pass 4: **************************************************
-     restore obj_info                                         */
+     restore obj_info . Actually only LOS needs it.   Since oi is recorded for new address, so the restoration
+     doesn't need to to specify space. */
 
   TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass4: restore obj_info ...");
 
@@ -276,7 +303,7 @@
   while(num_restoring_collectors != num_active_collectors);
 
    /* Dealing with out of memory in mspace */  
-  if(mspace->free_block_idx > fspace->first_block_idx){    
+  if(mspace->free_block_idx > nos->first_block_idx){    
      atomic_cas32( &num_extending_collectors, 0, num_active_collectors);        
      mspace_extend_compact(collector);        
      atomic_inc32(&num_extending_collectors);    

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp Fri Jan 11 06:38:32 2008
@@ -100,15 +100,27 @@
   marker_execute_task((GC*)gc,(TaskType)wspace_mark_scan_mostly_concurrent,(Space*)gc->wspace);
 }
 
+/*FIXME: move this function out of this file.*/
+void gc_check_mutator_allocation(GC* gc)
+{
+  lock(gc->mutator_list_lock);     // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
+
+  Mutator *mutator = gc->mutator_list;
+  while(mutator){
+    wait_mutator_signal(mutator, MUTATOR_ENTER_ALLOCATION_MARK);
+    mutator = mutator->next;
+  }
 
+  unlock(gc->mutator_list_lock);
+}
 
 void wspace_sweep_concurrent(Collector* collector);
 void gc_ms_start_concurrent_sweep(GC_MS* gc, unsigned int num_collectors)
 {
   ops_color_flip();
-  //FIXME: Need barrier here.
-  //apr_memory_rw_barrier();
-  gc_disenable_alloc_obj_live();
+  mem_fence();
+  gc_disable_alloc_obj_live();
+  gc_check_mutator_allocation((GC*)gc);
   wspace_init_pfc_pool_iterator(gc->wspace);
   
   collector_execute_task_concurrent((GC*)gc, (TaskType)wspace_sweep_concurrent, (Space*)gc->wspace, num_collectors);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h Fri Jan 11 06:38:32 2008
@@ -78,6 +78,7 @@
   SpinLock concurrent_mark_lock;
   SpinLock enumerate_rootset_lock;
   SpinLock concurrent_sweep_lock;
+  SpinLock collection_scheduler_lock;
   
   /* system info */
   unsigned int _system_alloc_unit;

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp Fri Jan 11 06:38:32 2008
@@ -132,21 +132,32 @@
   /* Find local chunk pointers' head and their number */
   for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
     if(size_segs[i]->local_alloc){
-      chunk_ptr_num += size_segs[i]->chunk_num;
+      chunk_ptr_num = size_segs[i]->chunk_num;
       assert(local_chunks[i]);
-      if(!chunk_ptrs)
+      if(!chunk_ptrs){
         chunk_ptrs = local_chunks[i];
-    }
-  }
+
   
-  /* Put local pfc to the according pools */
-  for(unsigned int i = 0; i < chunk_ptr_num; ++i){
-    if(chunk_ptrs[i])
-      wspace_put_pfc(wspace, chunk_ptrs[i]);
+        /* Put local pfc to the according pools */
+        for(unsigned int i = 0; i < chunk_ptr_num; ++i){
+          if(chunk_ptrs[i]){
+            if(!USE_CONCURRENT_GC){
+              wspace_put_pfc(wspace, chunk_ptrs[i]);
+            }else{
+              Chunk_Header* chunk_to_rem = chunk_ptrs[i];
+              chunk_to_rem->status = CHUNK_USED | CHUNK_NORMAL;
+              wspace_register_used_chunk(wspace, chunk_to_rem);
+            }
+          }
+        }
+        
+        /* Free mem for local chunk pointers */
+        STD_FREE(chunk_ptrs);
+        chunk_ptrs = NULL;
+      }
+    }
   }
   
-  /* Free mem for local chunk pointers */
-  STD_FREE(chunk_ptrs);
   
   /* Free mem for size segments (Chunk_Header**) */
   STD_FREE(local_chunks);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp Fri Jan 11 06:38:32 2008
@@ -95,7 +95,10 @@
     
     mutator_post_signal((Mutator*) allocator,ENABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS);
   }
+  
+  mutator_post_signal((Mutator*) allocator,MUTATOR_ENTER_ALLOCATION_MARK);
   void *p_obj = alloc_in_chunk(chunks[index]);
+  mutator_post_signal((Mutator*) allocator,MUTATOR_EXIT_ALLOCATION_MARK);
 
   if(chunk->slot_index == MAX_SLOT_INDEX){
     chunk->status = CHUNK_USED | CHUNK_NORMAL;
@@ -147,7 +150,9 @@
       mutator_post_signal((Mutator*) allocator,ENABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS);
     }    
     
+    mutator_post_signal((Mutator*) allocator,MUTATOR_ENTER_ALLOCATION_MARK);
     p_obj = alloc_in_chunk(chunks[index]);
+    mutator_post_signal((Mutator*) allocator,MUTATOR_EXIT_ALLOCATION_MARK);
     
     if(chunk->slot_index == MAX_SLOT_INDEX){
       chunk->status = CHUNK_USED | CHUNK_NORMAL;
@@ -211,11 +216,13 @@
   
   if(!chunk) return NULL;
   abnormal_chunk_init(chunk, chunk_size, size);
+  
+  mutator_post_signal((Mutator*) allocator,MUTATOR_ENTER_ALLOCATION_MARK);  
   if(is_obj_alloced_live()){
     chunk->table[0] |= cur_mark_black_color  ;
   } 
-  //FIXME: Need barrier here.   
-  //apr_memory_rw_barrier();
+  mutator_post_signal((Mutator*) allocator,MUTATOR_EXIT_ALLOCATION_MARK);
+  mem_fence();
   
   chunk->table[0] |= cur_alloc_color;
   set_super_obj_mask(chunk->base);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h Fri Jan 11 06:38:32 2008
@@ -136,7 +136,7 @@
 
 inline void alloc_slot_in_table(POINTER_SIZE_INT *table, unsigned int slot_index)
 {
-  assert(!slot_is_alloc_in_table(table, slot_index));
+  //assert(!slot_is_alloc_in_table(table, slot_index));
   
   unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
   unsigned int word_index = color_bits_index / BITS_PER_WORD;
@@ -156,6 +156,7 @@
       return; /*returning true does not mean it's marked by this thread. */
     }
     old_word = *p_color_word;
+    //FIXME: this assertion is too strong here because of concurrent sweeping.
     assert(!slot_is_alloc_in_table(table, slot_index));
     
     new_word = old_word | mark_alloc_color;
@@ -187,8 +188,7 @@
   if(p_obj && is_obj_alloced_live())
     obj_mark_black_in_table((Partial_Reveal_Object*)p_obj, chunk->slot_size);
   
-  //FIXME: Need barrier here.
-  //apr_memory_rw_barrier();
+  mem_fence();
   
   alloc_slot_in_table(table, slot_index);
   if(chunk->status & CHUNK_NEED_ZEROING)

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp Fri Jan 11 06:38:32 2008
@@ -825,30 +825,30 @@
 
 void *malloc_wrapper(int size)
 {
-	massert(size > 0);
-	if(!cur_free_ptr) {
-		cur_free_bytes = INIT_ALLOC_SIZE;
-		cur_free_ptr = (char*) STD_MALLOC(cur_free_bytes);
-	}
-	
-	massert(cur_free_bytes >= size);
-	
-	total_malloc_bytes += size;
-	cur_free_bytes -= size;
-	
-	void * ret = cur_free_ptr;
-	cur_free_ptr += size;
-	return ret;
+  massert(size > 0);
+  if(!cur_free_ptr) {
+    cur_free_bytes = INIT_ALLOC_SIZE;
+    cur_free_ptr = (char*) STD_MALLOC(cur_free_bytes);
+  }
+  
+  massert(cur_free_bytes >= size);
+  
+  total_malloc_bytes += size;
+  cur_free_bytes -= size;
+  
+  void * ret = cur_free_ptr;
+  cur_free_ptr += size;
+  return ret;
 }
 
 void free_wrapper(int size)
 {
-	massert(size > 0);
-	massert(cur_free_ptr);
-	massert(total_malloc_bytes >= size);
-	cur_free_bytes += size;
-	total_malloc_bytes -= size;
-	cur_free_ptr -= size;
+  massert(size > 0);
+  massert(cur_free_ptr);
+  massert(total_malloc_bytes >= size);
+  cur_free_bytes += size;
+  total_malloc_bytes -= size;
+  cur_free_ptr -= size;
 }
 
 unsigned int *shift_table;
@@ -857,57 +857,58 @@
 static int already_inited = 0;
 void fastdiv_init()
 {
-	if(already_inited) return;
-	already_inited = 1;
-	
-	int i;
-	int shift_table_size = (MAX_SLOT_SIZE + 1) * sizeof shift_table[0];
-	shift_table = (unsigned int *)malloc_wrapper(shift_table_size);
-	memset(shift_table, 0x00, shift_table_size) ;
-	for(i = MAX_SLOT_SIZE + 1;i--;) {
-		shift_table[i] = 0;
-		int v = i;
-		while(v && !(v & 1)) {
-			v >>= 1;
-			shift_table[i]++;
-		}
-	}
+  if(already_inited) return;
+  already_inited = 1;
+  
+  int i;
+  int shift_table_size = (MAX_SLOT_SIZE + 1) * sizeof shift_table[0];
+  shift_table = (unsigned int *)malloc_wrapper(shift_table_size);
+  memset(shift_table, 0x00, shift_table_size) ;
+  for(i = MAX_SLOT_SIZE + 1;i--;) {
+    shift_table[i] = 0;
+    int v = i;
+    while(v && !(v & 1)) {
+      v >>= 1;
+      shift_table[i]++;
+    }
+  }
 
-	memset(compact_table, 0x00, sizeof compact_table);
-	memset(mask, 0x00, sizeof mask);
-	for(i = 1;i < 32;i += 2) {
-		int cur = 1;
-		unsigned short *p = NULL;
-		while(1) {
-			p = (unsigned short*)malloc_wrapper(cur * sizeof p[0]);
-			memset(p, 0xff, cur * sizeof p[0]);
-			int j;
-			for(j = 0; j <= MAX_ADDR_OFFSET;j += i) {
-				int pos = j & (cur - 1);
-				if(p[pos] == 0xffff) {
-					p[pos] = j / i;
-				}else {
-					break;
-				}
-			}
-			if(j <= MAX_ADDR_OFFSET) {
-				free_wrapper(cur * sizeof p[0]);
-				cur <<= 1;
-				p = NULL;
-			}else {
-				break;
-			}
-		}
-		massert(p);
-		mask[i] = cur - 1;
-		while(cur && p[cur - 1] == 0xffff) {
-			free_wrapper(sizeof p[0]);
-			cur--;
-		}
-		compact_table[i] = p;
-	}
+  memset(compact_table, 0x00, sizeof compact_table);
+  memset(mask, 0x00, sizeof mask);
+  for(i = 1;i < 32;i += 2) {
+    int cur = 1;
+    unsigned short *p = NULL;
+    while(1) {
+      p = (unsigned short*)malloc_wrapper(cur * sizeof p[0]);
+      memset(p, 0xff, cur * sizeof p[0]);
+      int j;
+      for(j = 0; j <= MAX_ADDR_OFFSET;j += i) {
+        int pos = j & (cur - 1);
+        if(p[pos] == 0xffff) {
+          p[pos] = j / i;
+        }else {
+          break;
+        }
+      }
+      if(j <= MAX_ADDR_OFFSET) {
+        free_wrapper(cur * sizeof p[0]);
+        cur <<= 1;
+        p = NULL;
+      }else {
+        break;
+      }
+    }
+    massert(p);
+    mask[i] = cur - 1;
+    while(cur && p[cur - 1] == 0xffff) {
+      free_wrapper(sizeof p[0]);
+      cur--;
+    }
+    compact_table[i] = p;
+  }
 }
 
 #endif
+
 
 

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h Fri Jan 11 06:38:32 2008
@@ -39,22 +39,22 @@
 
 inline int fastdiv_div(int x, int y)
 {
-	massert(x % y == 0);
-	massert(0 <= y && y <= 1024);
-	massert(y % 4 == 0);
-	massert(y <= 128 || y % 8 == 0);
-	massert(y <= 256 || y % 128 == 0);
-	massert(x <= (1 << 16));
+  massert(x % y == 0);
+  massert(0 <= y && y <= 1024);
+  massert(y % 4 == 0);
+  massert(y <= 128 || y % 8 == 0);
+  massert(y <= 256 || y % 128 == 0);
+  massert(x <= (1 << 16));
 
-	int s = shift_table[y];
-	massert(s >= 2);
-	x >>= s;
-	y >>= s;
+  int s = shift_table[y];
+  massert(s >= 2);
+  x >>= s;
+  y >>= s;
 
-	massert(x >= 0 && x <= 16 * (1 << 10));
-	massert(y <= 32 && y % 2);
+  massert(x >= 0 && x <= 16 * (1 << 10));
+  massert(y <= 32 && y % 2);
 
-	return (int)compact_table[y][x & mask[y]];
+  return (int)compact_table[y][x & mask[y]];
 }
 #else
 #define fastdiv_div(x,y) ((x) / (y))

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h Fri Jan 11 06:38:32 2008
@@ -430,12 +430,11 @@
 {
   POINTER_SIZE_INT temp = cur_alloc_color;
   cur_alloc_color = cur_mark_black_color;
-  //FIXME: Need barrier here.
-  //apr_memory_rw_barrier();
+  mem_fence();
   cur_mark_black_color = temp;
   cur_alloc_mask = (~cur_alloc_mask) & FLIP_COLOR_MASK_IN_TABLE;
   cur_mark_mask = (~cur_mark_mask) & FLIP_COLOR_MASK_IN_TABLE;
-  //printf("color flip\n");
+  TRACE2("gc.con","color bit flips");
 }
 
 extern void wspace_mark_scan(Collector *collector, Wspace *wspace);

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp Fri Jan 11 06:38:32 2008
@@ -339,7 +339,6 @@
       /*grab more pfc pools*/
       pfc_pool = wspace_grab_next_pfc_pool(wspace);
     }
-    gc_unset_sweeping_global_normal_chunk();
     
     /*4. Check the used list again.*/
     chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
@@ -350,6 +349,8 @@
 
     /*5. Switch the PFC backup list to PFC list.*/
     wspace_exchange_pfc_pool(wspace);
+    
+    gc_unset_sweeping_global_normal_chunk();
 
     /*6. Put back live abnormal chunk and normal unreusable chunk*/
     Chunk_Header* used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace);
@@ -379,4 +380,5 @@
   }
   while(num_sweeping_collectors != num_active_collectors + 1);
 }
+
 

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/move_compact/gc_mc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/move_compact/gc_mc.h?rev=611186&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/move_compact/gc_mc.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/move_compact/gc_mc.h Fri Jan 11 06:38:32 2008
@@ -0,0 +1,64 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/move_compact/gc_mc.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.cpp?rev=611186&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.cpp Fri Jan 11 06:38:32 2008
@@ -0,0 +1,409 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+
+#include "sspace.h"
+
+POINTER_SIZE_INT TOSPACE_SIZE = 0;
+
+static unsigned int sspace_compute_num_tospace_blocks(Sspace* sspace)
+{
+  unsigned int num_tospace_blocks = 0;
+
+  if(! TOSPACE_SIZE) /* if no specified size, use default one */
+    return sspace->num_managed_blocks >> 3;  
+
+
+  if( (TOSPACE_SIZE << 1) >= sspace->committed_heap_size ){
+
+    TOSPACE_SIZE = sspace->committed_heap_size >> 1;
+
+    /* tospace should always smaller than fromspace, and at least 1 block. */
+    TOSPACE_SIZE = round_down_to_size(TOSPACE_SIZE, GC_BLOCK_SIZE_BYTES);
+    if( TOSPACE_SIZE != 0 ) TOSPACE_SIZE -= 1; 
+
+    if( TOSPACE_SIZE >= MB ){
+      WARN2("gc.init", "TOSPACE_SIZE is too big, set it to be " << TOSPACE_SIZE/MB << "MB");
+    }else{
+      WARN2("gc.init", "TOSPACE_SIZE is too big, set it to be " << TOSPACE_SIZE/KB << "KB");
+    }
+  }
+
+  num_tospace_blocks = (unsigned int)(TOSPACE_SIZE >> GC_BLOCK_SHIFT_COUNT);
+
+  return num_tospace_blocks;
+}
+
+static void sspace_config_after_clean(Sspace* sspace)
+{
+  
+  unsigned int num_tospace_blocks = sspace_compute_num_tospace_blocks(sspace);
+  unsigned int num_fromspace_blocks = sspace->num_managed_blocks - num_tospace_blocks;
+  unsigned int sspace_first_idx = sspace->first_block_idx;
+ 
+  /* prepare for from-space, first half */
+  Block_Header* fromspace_last_block = (Block_Header*)&(sspace->blocks[num_fromspace_blocks - 1]);
+  fromspace_last_block->next = NULL;
+
+  /* prepare for to-space */
+  sspace->tospace_first_idx = sspace_first_idx + num_fromspace_blocks;
+  sspace->ceiling_block_idx = sspace->tospace_first_idx +  num_tospace_blocks - 1;
+  assert( sspace->ceiling_block_idx == sspace_first_idx + sspace->num_managed_blocks - 1 );
+  sspace->free_block_idx = sspace->tospace_first_idx;
+          
+  /* no survivor area at the beginning */
+  sspace->survivor_area_start = (Block_Header*)&(sspace->blocks[num_fromspace_blocks]);
+  sspace->survivor_area_end = sspace->survivor_area_start;
+
+  sspace->cur_free_block = (Block_Header*)sspace->heap_start;
+  if(num_fromspace_blocks == 0) sspace->cur_free_block = NULL; 
+
+  sspace->num_used_blocks = 0;
+  
+}
+
+Sspace *sspace_initialize(GC* gc, void* start, POINTER_SIZE_INT sspace_size, POINTER_SIZE_INT commit_size) 
+{    
+  assert( (sspace_size%GC_BLOCK_SIZE_BYTES) == 0 );
+  Sspace* sspace = (Sspace *)STD_MALLOC(sizeof(Sspace));
+  assert(sspace);
+  memset(sspace, 0, sizeof(Sspace));
+    
+  sspace->reserved_heap_size = sspace_size;
+  sspace->num_total_blocks = (unsigned int)(sspace_size >> GC_BLOCK_SHIFT_COUNT);
+
+  void* reserved_base = start;
+  /* commit sspace mem */    
+  if(!large_page_hint)    
+    vm_commit_mem(reserved_base, commit_size);
+  memset(reserved_base, 0, commit_size);
+  
+  sspace->committed_heap_size = commit_size;
+  sspace->heap_start = reserved_base;
+  sspace->blocks = (Block*)reserved_base;
+
+#ifdef STATIC_NOS_MAPPING
+  sspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + sspace->reserved_heap_size);
+#else /* for dynamic mapping, nos->heap_end is gc->heap_end */
+  sspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + sspace->committed_heap_size);
+#endif
+
+  sspace->num_managed_blocks = (unsigned int)(commit_size >> GC_BLOCK_SHIFT_COUNT);
+ 
+  unsigned int sspace_first_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, sspace->heap_start);
+  sspace->first_block_idx = sspace_first_idx;
+
+  space_init_blocks((Blocked_Space*)sspace); /* it uses first_block_idx and num_managed_blocks */
+
+  sspace_config_after_clean(sspace);  /* it uses first_block_idx and num_managed_blocks */
+ 	  
+  sspace->move_object = TRUE;
+  sspace->num_collections = 0;
+  sspace->time_collections = 0;
+  sspace->survive_ratio = 0.2f;
+  sspace->last_alloced_size = 0;
+  sspace->accumu_alloced_size = 0;  
+  sspace->total_alloced_size = 0;
+  sspace->last_surviving_size = 0;
+  sspace->period_surviving_size = 0;
+  
+  sspace->gc = gc;
+
+  return sspace;
+}
+
+void sspace_destruct(Sspace *sspace) 
+{
+#ifdef USE_32BITS_HASHCODE
+  space_desturct_blocks((Blocked_Space*)sspace);
+#endif
+  STD_FREE(sspace);   
+}
+
+POINTER_SIZE_INT sspace_free_space_size(Sspace* nos)
+{
+  POINTER_SIZE_INT tospace_free_size = blocked_space_free_mem_size((Blocked_Space*)nos);
+  
+  POINTER_SIZE_INT fromspace_free_size = 0;
+  Block_Header* cur_free_block = nos->cur_free_block;
+  POINTER_SIZE_INT num_free_blocks = 0;
+  while(cur_free_block){
+    num_free_blocks++;
+    cur_free_block = cur_free_block->next;  
+  }
+  fromspace_free_size = num_free_blocks << GC_BLOCK_SHIFT_COUNT;
+  
+  return tospace_free_size + fromspace_free_size;
+}
+
+POINTER_SIZE_INT sspace_used_space_size(Sspace* nos)
+{
+  return nos->committed_heap_size - sspace_free_space_size(nos);
+}
+
+/* adjust the next pointer of block to point correctly to its adjacent next */
+void sspace_prepare_for_collection(Sspace* sspace)
+{  
+  unsigned int tospace_first_idx = sspace->tospace_first_idx;
+  unsigned int sspace_last_idx = sspace->first_block_idx + sspace->num_managed_blocks - 1;
+
+  /* if to-space is in second half, need to do two connections:
+     1. from-space last block to the first block of to-space, 2. connect from-space two parts cross survivor area */
+  if(sspace->ceiling_block_idx == sspace_last_idx){
+    Block_Header* tospace_first_block = (Block_Header*)&sspace->blocks[tospace_first_idx - sspace->first_block_idx];
+    /* For 1. Fromspace has two parts only when tospace is not adjacent to survivor area. */
+    if(tospace_first_block != sspace->survivor_area_end){
+      Block_Header* block_before_tospace =  (Block_Header*)((Block*)tospace_first_block - 1);
+      assert( block_before_tospace->next == NULL);
+      block_before_tospace->next = tospace_first_block;
+    }
+    /* For 2. It doesn't matter if survivor_area size is 0. */
+    Block_Header* block_before_survivor_area = (Block_Header*)((Block*)(sspace->survivor_area_start) - 1);
+    block_before_survivor_area->next = (Block_Header*)(sspace->survivor_area_start);
+      
+  }else{  /* otherwise, tospace is in first half */
+    
+    /* Connect last block of from-space to first block of tospacde */
+    Block_Header* block_before_tospace = (Block_Header*)&sspace->blocks[tospace_first_idx - sspace->first_block_idx - 1];
+    block_before_tospace->next = (Block_Header*)((Block*)block_before_tospace + 1);
+        
+  }
+  /* in any case, sspace last block's next should point to NULL. This is the case:
+     1. when tospace is in second half and has 0 in size. 
+     2. when tospace is in first half, both tospace and survivor area have 0 in size. 
+     But the above code sets it to point to the first block of tospace, which is sspace->heap_end and the block is not mapped.
+     */
+  ((Block_Header*)&(sspace->blocks[sspace->num_managed_blocks - 1]))->next = NULL;  
+
+  return;
+}
+
+void sspace_reset_after_collection(Sspace* sspace)
+{ 
+  Block* blocks = (Block*)sspace->blocks;
+ 
+   /* During LOS extension, NOS last block points back to MOS first block, so that,
+     the first couple blocks of MOS are compacted to NOS end. We need reset the NOS last block next pointer.
+     This is needed for next collection. And it's needed for from-space setup, 
+     so that the last block of from-space points to NULL. 
+   */
+  Block_Header *sspace_last_block = (Block_Header*)&blocks[sspace->num_managed_blocks - 1];
+  sspace_last_block->next = NULL;
+
+  unsigned int sspace_first_idx = sspace->first_block_idx;
+
+  Boolean is_major_collection = gc_match_kind(sspace->gc, MAJOR_COLLECTION);
+
+  if( is_major_collection ){ 
+    /* prepare for from-space, first half */
+    sspace_config_after_clean(sspace);
+
+    /* clean up the collected blocks */
+    for(unsigned int i = sspace_first_idx; i <= sspace->ceiling_block_idx; i++){
+      Block_Header* block = (Block_Header*)&(blocks[i - sspace_first_idx]);
+      block_reset(block);
+    }
+  
+    return;          
+  }
+
+  /* for minor collection */
+  unsigned int num_tospace_blocks = sspace_compute_num_tospace_blocks(sspace);
+  unsigned int num_fromspace_blocks = sspace->num_managed_blocks - num_tospace_blocks;
+  unsigned int sspace_last_idx = sspace_first_idx + sspace->num_managed_blocks - 1;
+
+    /* clean up the collected blocks */
+  unsigned int start_idx = 0;
+  unsigned int end_idx = sspace->tospace_first_idx - sspace_first_idx - 1;
+  for(unsigned int i = start_idx; i <= end_idx; i++){
+    Block_Header* block = (Block_Header*)&(blocks[i]);
+    block_reset(block);
+  }
+  
+  /* when tospace is in first half. clean the part of fromspace after tospace. */
+  if(sspace->ceiling_block_idx != sspace_last_idx){
+    start_idx = sspace->ceiling_block_idx - sspace_first_idx + 1;
+    end_idx = sspace_last_idx - sspace_first_idx;
+    for(unsigned int i = start_idx; i <= end_idx; i++){
+      Block_Header* block = (Block_Header*)&(blocks[i]);
+      block_reset(block);
+    }    
+  }
+
+  sspace->cur_free_block = (Block_Header*)sspace->heap_start;
+  
+  /* minor collection always has a survivor area.
+    (The size of survivor area can zero if tospace size is zero, e.g., sspace size is to small. 
+     This is no correctness issue, because it means all sspace objects are forwarded to MOS. )
+  */
+  sspace->survivor_area_start = (void *)&blocks[sspace->tospace_first_idx - sspace_first_idx];
+  sspace->survivor_area_end = (void *)&blocks[sspace->free_block_idx - sspace_first_idx];
+  sspace->num_used_blocks = sspace->free_block_idx - sspace->tospace_first_idx;        
+
+  /* if the survivor_area of this collection is at first half of sspace */
+  if(  sspace->ceiling_block_idx != sspace_last_idx){      
+      
+    /* Check if suvivor area is overlapped with new tospace. In that case, we should ensure fromspace covers survivor area. */                   
+    if( sspace->survivor_area_end > &(sspace->blocks[num_fromspace_blocks]) ){
+     /* this case might happen after nos_boundary adjusted. Since this function is only called before nos_boundary adjustment, that means, 
+        if execution comes here, it's because of last time minor collection boundary adjustment, and because we don't change the sspace config in minor collection.) */
+      num_fromspace_blocks = ((POINTER_SIZE_INT)sspace->survivor_area_end - (POINTER_SIZE_INT)sspace->heap_start) >> GC_BLOCK_SHIFT_COUNT;
+      num_tospace_blocks = sspace->num_managed_blocks - num_fromspace_blocks;
+    }
+    
+    /* prepare for to-space in second half */
+    sspace->tospace_first_idx = sspace_first_idx + num_fromspace_blocks;
+    sspace->ceiling_block_idx = sspace->tospace_first_idx +  num_tospace_blocks - 1;
+    sspace->free_block_idx = sspace->tospace_first_idx;
+    
+    /* prepare from-space */
+    /* connect the free areas cross survivor area */
+    Block_Header* block_before_survivor_area = (Block_Header*)((Block*)sspace->survivor_area_start - 1);
+    /* set last block of fromspace next to NULL, if the size of survivor_area is zero or its adjacent to tospace.
+       ensure we don't connect fromspace to tospace. */
+    /* we intentionally don't use survivor_area_end->block_idx dereference, because survivor_area_end 
+       might point at the heap_end, which has no mem mapped. */
+    if( sspace->survivor_area_end == &(sspace->blocks[num_fromspace_blocks])) 
+      block_before_survivor_area->next = NULL;
+    else{
+      block_before_survivor_area->next = (Block_Header*)sspace->survivor_area_end;
+      ((Block_Header*)&(sspace->blocks[num_fromspace_blocks-1]))->next = NULL;
+    }
+    
+  }else{ /* after minor collection, if the survivor_area is at second half of sspace */
+
+    /* Check if sspace has no enough blocks to hold tospace. */                   
+    if( sspace->tospace_first_idx - sspace->first_block_idx <= num_tospace_blocks ){
+      /* this case shold never happen right after minor collection, might happen after nos_boundary adjusted */        
+      assert(0); 
+      num_tospace_blocks = (sspace->tospace_first_idx - sspace->first_block_idx) >> 3;
+      num_fromspace_blocks = sspace->num_managed_blocks - num_tospace_blocks;
+    }
+          
+    /* prepare for to-space */
+    sspace->ceiling_block_idx = sspace->tospace_first_idx - 1;
+    sspace->tospace_first_idx = sspace->tospace_first_idx - num_tospace_blocks;
+    sspace->free_block_idx = sspace->tospace_first_idx;
+
+    /* prepare for from-space */
+    /* connect the free areas cross tospace and survivor_area */
+    Block_Header* block_before_tospace = (Block_Header*)&blocks[sspace->tospace_first_idx - 1 - sspace_first_idx];
+    /* in case survivor_area_end is heap_end, then block_after_survivor_area should be NULL */
+    Block_Header* survivor_area_last_blk = (Block_Header*)((Block*)(sspace->survivor_area_end) - 1);
+    block_before_tospace->next = survivor_area_last_blk->next;
+    
+  }
+         
+  return;
+}
+
+#ifndef STATIC_NOS_MAPPING 
+void* sspace_heap_start_adjust(Sspace* sspace, void* new_space_start, POINTER_SIZE_INT new_space_size)
+{ 
+  GC* gc = sspace->gc;
+  
+  /* we can simply change certain fields of sspace and keep the original semispace config.
+     The issue is, the tospace was computed according to original space size. 
+     So we just do another round of config after major collection. 
+     It's troublesome to do config again after minor collection. */
+
+  /* major collection leaves no survivor area in nos */
+  if( gc_match_kind(gc, MAJOR_COLLECTION)){
+    /* retore the fromspace last block next pointer. It was set a moment ago in sspace_reset_after_collection. */
+    Block_Header* block_before_survivor_area = (Block_Header*)((Block*)(sspace->survivor_area_start) - 1);
+    block_before_survivor_area->next = (Block_Header*)(sspace->survivor_area_start);
+
+    blocked_space_adjust((Blocked_Space*)sspace, new_space_start, new_space_size);
+    
+    sspace_config_after_clean(sspace);
+    
+    return new_space_start;   
+  }
+  /* for minor collection */
+  /* always leave at least one free block at the beginning of sspace for fromspace, 
+     so that sspace->cur_free_block always points to sspace start. */
+  void* old_space_start = new_space_start;
+  
+  if( new_space_start >= sspace->survivor_area_start)
+    new_space_start = (Block*)(sspace->survivor_area_start) - 1; 
+  
+  void* tospace_start = &(sspace->blocks[sspace->free_block_idx - sspace->first_block_idx]); 
+  if( new_space_start >= tospace_start)
+    new_space_start = (Block*)tospace_start - 1;
+  
+  new_space_size += (POINTER_SIZE_INT)old_space_start - (POINTER_SIZE_INT)new_space_start;
+
+  /* change the fields that are changed */  
+  sspace->heap_start = new_space_start;
+  sspace->blocks = (Block*)new_space_start;
+  sspace->committed_heap_size = new_space_size;
+  sspace->reserved_heap_size = new_space_size;
+  sspace->num_managed_blocks = (unsigned int)(new_space_size >> GC_BLOCK_SHIFT_COUNT);
+  sspace->num_total_blocks = sspace->num_managed_blocks;
+  sspace->first_block_idx = ((Block_Header*)new_space_start)->block_idx;
+  
+  sspace->cur_free_block = (Block_Header*)sspace->heap_start;
+
+  return new_space_start;
+}
+#endif /* #ifndef STATIC_NOS_MAPPING  */
+
+void collector_execute_task(GC* gc, TaskType task_func, Space* space);
+
+/* world is stopped when starting sspace_collection */      
+#include "../gen/gen.h"  /* for gc_is_gen_mode() */
+void sspace_collection(Sspace *sspace)
+{
+  sspace->num_collections++;  
+
+  if(gc_is_gen_mode()){
+    sspace->collect_algorithm = MINOR_GEN_SEMISPACE_POOL;
+  }else{
+    sspace->collect_algorithm = MINOR_NONGEN_SEMISPACE_POOL;
+  }
+
+  GC* gc = sspace->gc;
+  
+  /* we should not destruct rootset structure in case we need fall back */
+  pool_iterator_init(gc->metadata->gc_rootset_pool);
+
+  switch(sspace->collect_algorithm){
+
+#ifdef MARK_BIT_FLIPPING
+
+    case MINOR_NONGEN_SEMISPACE_POOL:
+      TRACE2("gc.process", "GC: nongen_semispace_pool algo start ... \n");
+      collector_execute_task(gc, (TaskType)nongen_ss_pool, (Space*)sspace);
+      TRACE2("gc.process", "\nGC: end of nongen semispace pool algo ... \n");
+      break;
+
+#endif /*#ifdef MARK_BIT_FLIPPING */
+
+    case MINOR_GEN_SEMISPACE_POOL:
+      TRACE2("gc.process", "gen_semispace_pool algo start ... \n");
+      collector_execute_task(gc, (TaskType)gen_ss_pool, (Space*)sspace);
+      TRACE2("gc.process", "\nGC: end of gen semispace pool algo ... \n");
+      break;
+    
+    default:
+      DIE2("gc.collection","Specified minor collection algorithm doesn't exist!");
+      exit(0);
+      break;
+  }
+  
+  return; 
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.h?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace.h Fri Jan 11 06:38:32 2008
@@ -65,8 +65,8 @@
   
   Block_Header* cur_free_block;
   unsigned int tospace_first_idx;
-  void* survivor_area_top;
-  void* survivor_area_bottom;
+  void* survivor_area_end;
+  void* survivor_area_start;
 
 }Sspace;
 
@@ -85,6 +85,9 @@
 void nongen_ss_pool(Collector* collector);
 void gen_ss_pool(Collector* collector);
 
+POINTER_SIZE_INT sspace_free_space_size(Sspace* nos);
+POINTER_SIZE_INT sspace_used_space_size(Sspace* nos);
+
 FORCE_INLINE Boolean sspace_has_free_block(Sspace* sspace)
 {
   return (sspace->cur_free_block != NULL);
@@ -92,8 +95,14 @@
 
 FORCE_INLINE Boolean obj_belongs_to_survivor_area(Sspace* sspace, Partial_Reveal_Object* p_obj)
 {
-  return (p_obj >= sspace->survivor_area_bottom && 
-                          p_obj < sspace->survivor_area_top);
+  return (p_obj >= sspace->survivor_area_start && 
+                          p_obj < sspace->survivor_area_end);
+}
+
+FORCE_INLINE Boolean obj_to_be_forwarded(Sspace* sspace, Partial_Reveal_Object* p_obj)
+{
+  assert( obj_belongs_to_survivor_area(sspace, p_obj)?obj_is_survivor(p_obj):!obj_is_survivor(p_obj));
+  return obj_is_survivor(p_obj);
 }
 
 /* treat semispace alloc as thread local alloc. If it fails or p_obj is old, forward it to MOS */
@@ -102,10 +111,28 @@
   void* p_targ_obj = NULL;
   Sspace* sspace = (Sspace*)allocator->alloc_space;
   
-  if( !obj_belongs_to_survivor_area(sspace, p_obj) )
+  if( obj_to_be_forwarded(sspace, p_obj) ) 
+    return NULL;
+    
+  p_targ_obj = thread_local_alloc(size, allocator);
+  if(!p_targ_obj)
     p_targ_obj = semispace_alloc(size, allocator);           
   
   return p_targ_obj;
+}
+
+#ifndef STATIC_NOS_MAPPING
+void* sspace_heap_start_adjust(Sspace* sspace, void* new_heap_start, POINTER_SIZE_INT new_heap_size);
+#endif /* #ifndef STATIC_NOS_MAPPING  */
+
+inline POINTER_SIZE_INT sspace_tospace_size(Sspace* space)
+{
+  return blocked_space_free_mem_size((Blocked_Space*)space);
+}
+
+inline POINTER_SIZE_INT sspace_survivor_area_size(Sspace* space)
+{
+  return (POINTER_SIZE_INT)space->survivor_area_end - (POINTER_SIZE_INT)space->survivor_area_start;
 }
 
 #endif // _FROM_SPACE_H_

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_alloc.cpp?rev=611186&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_alloc.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_alloc.cpp Fri Jan 11 06:38:32 2008
@@ -0,0 +1,81 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "sspace.h"
+
+Boolean sspace_alloc_block(Sspace* sspace, Allocator* allocator)
+{    
+  alloc_context_reset(allocator);
+  
+  /* now try to get a new block */
+  Block_Header* old_free_blk = sspace->cur_free_block;
+    
+  while(old_free_blk != NULL){   
+    Block_Header* new_free_blk = old_free_blk->next;
+    Block_Header* allocated_blk = (Block_Header*)atomic_casptr((volatile void**)&sspace->cur_free_block, new_free_blk, old_free_blk);
+    if(allocated_blk != old_free_blk){     /* if failed */  
+      old_free_blk = sspace->cur_free_block;
+      continue;
+    }
+    /* ok, got one */    
+    allocator_init_free_block(allocator, allocated_blk);
+
+    return TRUE;
+  }
+
+  return FALSE;
+  
+}
+
+void* sspace_alloc(unsigned size, Allocator *allocator) 
+{
+  void*  p_return = NULL;
+
+  /* First, try to allocate object from TLB (thread local block) */
+  p_return = thread_local_alloc(size, allocator);
+  if (p_return)  return p_return;
+
+  /* ran out local block, grab a new one*/  
+  Sspace* sspace = (Sspace*)allocator->alloc_space;
+  int attempts = 0;
+  while( !sspace_alloc_block(sspace, allocator)){
+    vm_gc_lock_enum();
+    /* after holding lock, try if other thread collected already */
+    if ( !sspace_has_free_block(sspace) ) {  
+        if(attempts < 2) {
+          gc_reclaim_heap(allocator->gc, GC_CAUSE_NOS_IS_FULL); 
+          if(allocator->alloc_block){
+            vm_gc_unlock_enum();  
+            break;
+          }
+          
+          attempts++;
+          
+        }else{  /* no free block after "attempts" collections */
+          vm_gc_unlock_enum();  
+          return NULL;
+        }
+    }
+    vm_gc_unlock_enum();  
+  }
+  
+  p_return = thread_local_alloc(size, allocator);
+  
+  return p_return;
+  
+}
+

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_alloc.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_forward.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_forward.cpp?rev=611186&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_forward.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_forward.cpp Fri Jan 11 06:38:32 2008
@@ -0,0 +1,68 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "sspace.h"
+
+static Boolean semispace_alloc_block(Sspace* sspace, Allocator* allocator)
+{
+  alloc_context_reset(allocator);
+
+  /* now try to get a new block */
+  unsigned int old_free_idx = sspace->free_block_idx;
+  unsigned int new_free_idx = old_free_idx+1;
+  while( old_free_idx <= sspace->ceiling_block_idx ){   
+    unsigned int allocated_idx = atomic_cas32(&sspace->free_block_idx, new_free_idx, old_free_idx);
+    if(allocated_idx != old_free_idx){
+      old_free_idx = sspace->free_block_idx;
+      new_free_idx = old_free_idx+1;
+      continue;
+    }
+    /* ok, got one */
+    Block_Header* alloc_block = (Block_Header*)&(sspace->blocks[allocated_idx - sspace->first_block_idx]);
+
+    allocator_init_free_block(allocator, alloc_block);
+
+    return TRUE;
+  }
+
+  /* semispace is out, a fallback should be triggered */
+  return FALSE;
+  
+}
+
+void* semispace_alloc(unsigned int size, Allocator* allocator)
+{
+  void *p_return = NULL;
+   
+  /* All chunks of data requested need to be multiples of GC_OBJECT_ALIGNMENT */
+  assert((size % GC_OBJECT_ALIGNMENT) == 0);
+  assert( size <= GC_OBJ_SIZE_THRESHOLD );
+
+  /* check if collector local alloc block is ok. If not, grab a new block */
+  p_return = thread_local_alloc(size, allocator);
+  if(p_return) return p_return;
+  
+  /* grab a new block */
+  Sspace* sspace = (Sspace*)allocator->alloc_space;
+  Boolean ok = semispace_alloc_block(sspace, allocator);
+  if(!ok) return NULL; 
+  
+  p_return = thread_local_alloc(size, allocator);
+  assert(p_return);
+    
+  return p_return;
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_forward.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_gen_ss_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_gen_ss_pool.cpp?rev=611186&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_gen_ss_pool.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_gen_ss_pool.cpp Fri Jan 11 06:38:32 2008
@@ -0,0 +1,334 @@
+
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "sspace.h"
+#include "../thread/collector.h"
+#include "../common/gc_metadata.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+#include "../common/compressed_ref.h"
+
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
+
+static void* tospace_start;
+static void* tospace_end;
+
+static Boolean obj_belongs_to_tospace(Partial_Reveal_Object* p_obj)
+{
+  return ( p_obj >= tospace_start && p_obj < tospace_end );
+}
+
+static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref) 
+{
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+  if( p_obj == NULL) return;
+    
+  /* the slot can be in tspace or fspace, we don't care. In gen mode,
+     we care only if the reference in the slot is pointing to nos */
+  if (obj_belongs_to_nos(p_obj))
+    collector_tracestack_push(collector, p_ref); 
+
+  return;
+}
+
+static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) 
+{
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
+  if (!object_has_ref_field(p_obj)) return;
+    
+  REF *p_ref;
+
+  /* scan array object */
+  if (object_is_array(p_obj)) {
+    Partial_Reveal_Object* array = p_obj;
+    assert(!obj_is_primitive_array(array));
+
+    int32 array_length = vector_get_length((Vector_Handle) array);        
+    for (int i = 0; i < array_length; i++) {
+      p_ref= (REF *)vector_get_element_address_ref((Vector_Handle) array, i);
+      scan_slot(collector, p_ref);
+    }   
+    return;
+  }
+
+  /* scan non-array object */
+  unsigned int num_refs = object_ref_field_num(p_obj);
+  int *ref_iterator = object_ref_iterator_init(p_obj);
+            
+  for(unsigned int i=0; i<num_refs; i++){
+    REF* p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);        
+    scan_slot(collector, p_ref);
+  }
+
+#ifndef BUILD_IN_REFERENT
+  scan_weak_reference(collector, p_obj, scan_slot);
+#endif
+  
+  return;
+}
+
+/* NOTE:: At this point, p_ref can be in anywhere like root, and other spaces, but *p_ref must be in fspace, 
+   since only slot which points to object in fspace could be added into TraceStack.
+   The problem is the *p_ref may be forwarded already so that, when we come here we find it's pointing to tospace.
+   We will simply return for that case. It might be forwarded due to:
+    1. two difference slots containing same reference; 
+    2. duplicate slots in remset ( we use SSB for remset, no duplication filtering.)
+   The same object can be traced by the thread itself, or by other thread.
+*/
+
+static FORCE_INLINE void forward_object(Collector *collector, REF *p_ref) 
+{
+  Space* space = collector->collect_space; 
+  GC* gc = collector->gc;
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+
+  if(!obj_belongs_to_nos(p_obj)) return; 
+
+  Partial_Reveal_Object* p_target_obj = NULL;
+  Boolean to_rem_slot = FALSE;
+
+  /* Fastpath: object has already been forwarded, update the ref slot */
+  if(obj_is_fw_in_oi(p_obj)){
+    p_target_obj = obj_get_fw_in_oi(p_obj);
+    write_slot(p_ref, p_target_obj);
+
+    /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
+    if(obj_is_survivor(p_target_obj))
+      to_rem_slot = TRUE;
+
+  }else if( obj_belongs_to_tospace(p_obj)){
+    /* if p_obj the new copy in tospace, rem p_ref if it's from MOS */   
+    to_rem_slot = TRUE; 
+  }  
+  
+  if( to_rem_slot == TRUE ){
+    if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
+      collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 
+    
+    return;
+  }  
+  
+  /* following is the logic for forwarding */  
+  p_target_obj = collector_forward_object(collector, p_obj);
+  
+  /* if p_target_obj is NULL, it is forwarded by other thread. 
+      Note: a race condition here, it might be forwarded by other, but not set the 
+      forwarding pointer yet. We need spin here to get the forwarding pointer. 
+      We can implement the collector_forward_object() so that the forwarding pointer 
+      is set in the atomic instruction, which requires to roll back the mos_alloced
+      space. That is easy for thread local block allocation cancellation. */
+  if( p_target_obj == NULL ){
+    if(collector->result == FALSE ){
+      /* failed to forward, let's get back to controller. */
+      vector_stack_clear(collector->trace_stack);
+      return;
+    }
+    /* forwarded already*/
+    p_target_obj = obj_get_fw_in_oi(p_obj);
+  
+  }else{  /* otherwise, we successfully forwarded */
+
+#ifdef GC_GEN_STATS
+    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+    gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
+    gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
+#endif
+
+    scan_object(collector, p_target_obj);
+  }
+  
+  assert(p_target_obj);
+  write_slot(p_ref, p_target_obj);
+  
+  /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
+  if(obj_is_survivor(p_target_obj)){
+    if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
+      collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 
+  }
+   
+  return;
+}
+
+static void trace_object(Collector *collector, REF *p_ref)
+{ 
+  forward_object(collector, p_ref);
+  
+  Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack;
+  while( !vector_stack_is_empty(trace_stack)){
+    p_ref = (REF *)vector_stack_pop(trace_stack); 
+    forward_object(collector, p_ref);
+    trace_stack = (Vector_Block*)collector->trace_stack;
+  }
+    
+  return; 
+}
+ 
+/* for tracing phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+static void collector_trace_rootsets(Collector* collector)
+{
+  GC* gc = collector->gc;
+  GC_Metadata* metadata = gc->metadata;
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+#endif
+  
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
+
+  Space* space = collector->collect_space;
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+  /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */  
+  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+  /* first step: copy all root objects to trace tasks. */ 
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ......");
+  while(root_set){
+    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      REF *p_ref = (REF *)*iter;
+      iter = vector_block_iterator_advance(root_set,iter);
+      
+      if(!*p_ref) continue;  /* root ref cann't be NULL, but remset can be */
+      Partial_Reveal_Object *p_obj = read_slot(p_ref);
+
+#ifdef GC_GEN_STATS
+      gc_gen_collector_update_rootset_ref_num(stats);
+#endif
+
+      if(obj_belongs_to_nos(p_obj)){
+        collector_tracestack_push(collector, p_ref);
+      }
+    } 
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */    
+  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+  
+  /* second step: iterate over the trace tasks and forward objects */
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack.");
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ......");
+
+retry:
+  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
+
+  while(trace_task){    
+    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
+    while(!vector_block_iterator_end(trace_task,iter)){
+      REF *p_ref = (REF *)*iter;
+      iter = vector_block_iterator_advance(trace_task,iter);
+      assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */
+      /* in sequential version, we only trace same object once, but we were using a local hashset for that,
+         which couldn't catch the repetition between multiple collectors. This is subject to more study. */
+   
+      /* FIXME:: we should not let root_set empty during working, other may want to steal it. 
+         degenerate my stack into root_set, and grab another stack */
+   
+      /* a task has to belong to collected space, it was checked before put into the stack */
+      trace_object(collector, p_ref);
+      if(collector->result == FALSE)  break; /* force return */
+    }
+    vector_stack_clear(trace_task);
+    pool_put_entry(metadata->free_task_pool, trace_task);
+    if(collector->result == FALSE){
+      gc_task_pool_clear(metadata->mark_task_pool);
+      break; /* force return */
+    }
+
+    trace_task = pool_get_entry(metadata->mark_task_pool);
+  }
+  
+  atomic_inc32(&num_finished_collectors);
+  while(num_finished_collectors != num_active_collectors){
+    if( pool_is_empty(metadata->mark_task_pool)) continue;
+    /* we can't grab the task here, because of a race condition. If we grab the task, 
+       and the pool is empty, other threads may fall to this barrier and then pass. */
+    atomic_dec32(&num_finished_collectors);
+    goto retry;      
+  }
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects.");
+
+  /* now we are done, but each collector has a private stack that is empty */  
+  trace_task = (Vector_Block*)collector->trace_stack;
+  vector_stack_clear(trace_task);
+  pool_put_entry(metadata->free_task_pool, trace_task);   
+  collector->trace_stack = NULL;
+  
+  return;
+}
+
+void gen_ss_pool(Collector* collector) 
+{  
+  GC* gc = collector->gc;
+  
+  Sspace* sspace = (Sspace*)collector->collect_space;
+  unsigned int sspace_first_idx = sspace->first_block_idx;
+  tospace_start = (void*)&(sspace->blocks[sspace->tospace_first_idx - sspace_first_idx]);
+  tospace_end = (void*)&(sspace->blocks[sspace->ceiling_block_idx - sspace_first_idx]);
+
+  collector_trace_rootsets(collector);
+  
+  /* the rest work is not enough for parallelization, so let only one thread go */
+  if( (POINTER_SIZE_INT)collector->thread_handle != 0 ) {
+    TRACE2("gc.process", "GC: collector["<<(POINTER_SIZE_INT)collector->thread_handle<<"] finished");
+    return;
+  }
+
+  gc->collect_result = gc_collection_result(gc);
+  if(!gc->collect_result){
+#ifndef BUILD_IN_REFERENT
+    fallback_finref_cleanup(gc);
+#endif
+    return;
+  }
+
+  if(!IGNORE_FINREF ){
+    collector_identify_finref(collector);
+    if(!gc->collect_result) return;
+  }
+#ifndef BUILD_IN_REFERENT
+  else {
+      gc_set_weakref_sets(gc);
+      gc_update_weakref_ignore_finref(gc);
+    }
+#endif
+  gc_identify_dead_weak_roots(gc);
+  
+  gc_fix_rootset(collector, FALSE);
+  
+  TRACE2("gc.process", "GC: collector[0] finished");
+
+  return;
+  
+}
+
+void trace_obj_in_gen_ss(Collector *collector, void *p_ref)
+{
+  trace_object(collector, (REF *)p_ref);
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_gen_ss_pool.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_nongen_ss_pool.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_nongen_ss_pool.cpp?rev=611186&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_nongen_ss_pool.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_nongen_ss_pool.cpp Fri Jan 11 06:38:32 2008
@@ -0,0 +1,298 @@
+
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "sspace.h"
+#include "../thread/collector.h"
+#include "../thread/collector_alloc.h"
+#include "../common/gc_metadata.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+#ifdef GC_GEN_STATS
+#include "../gen/gen_stats.h"
+#endif
+
+#ifdef MARK_BIT_FLIPPING
+
+static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref)
+{
+  if(read_slot(p_ref) == NULL) return;
+  
+  collector_tracestack_push(collector, p_ref); 
+  return;
+}
+
+static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) 
+{
+  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
+    
+  if (!object_has_ref_field_before_scan(p_obj)) return;
+    
+  REF *p_ref;
+
+  if (object_is_array(p_obj)) {   /* scan array object */
+  
+    Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj;
+    unsigned int array_length = array->array_len; 
+    p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+
+    for (unsigned int i = 0; i < array_length; i++) {
+      scan_slot(collector, p_ref+i);
+    }   
+
+  }else{ /* scan non-array object */
+    
+    unsigned int num_refs = object_ref_field_num(p_obj);
+    int* ref_iterator = object_ref_iterator_init(p_obj);
+ 
+    for(unsigned int i=0; i<num_refs; i++){  
+      p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);  
+      scan_slot(collector, p_ref);
+    }    
+
+#ifndef BUILD_IN_REFERENT
+    scan_weak_reference(collector, p_obj, scan_slot);
+#endif
+  
+  }
+
+  return;
+}
+
+/* NOTE:: At this point, p_ref can be in anywhere like root, and other spaces, but *p_ref must be in fspace, 
+   since only slot which points to object in fspace could be added into TraceStack.
+   The problem is the *p_ref may be forwarded already so that, when we come here we find it's pointing to tospace.
+   We will simply return for that case. It might be forwarded due to:
+    1. two difference slots containing same reference; 
+    2. duplicate slots in remset ( we use SSB for remset, no duplication filtering.)
+   The same object can be traced by the thread itself, or by other thread.
+*/
+
+static FORCE_INLINE void forward_object(Collector* collector, REF *p_ref) 
+{
+  GC* gc = collector->gc;
+  Partial_Reveal_Object *p_obj = read_slot(p_ref);
+
+  if(!obj_belongs_to_nos(p_obj)){
+    if(obj_mark_in_oi(p_obj)){
+#ifdef GC_GEN_STATS
+      if(gc_profile){
+        GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+        gc_gen_collector_update_marked_nonnos_obj_stats_minor(stats);
+      }
+#endif
+      scan_object(collector, p_obj);
+    }
+    return;
+  }
+
+  Partial_Reveal_Object* p_target_obj = NULL;
+  /* Fastpath: object has already been forwarded, update the ref slot */
+  if(obj_is_fw_in_oi(p_obj)) {
+    p_target_obj = obj_get_fw_in_oi(p_obj);
+    assert(p_target_obj);
+    write_slot(p_ref, p_target_obj);
+    return;
+  }
+
+  /* following is the logic for forwarding */  
+  p_target_obj = collector_forward_object(collector, p_obj);
+  
+  /* if p_target_obj is NULL, it is forwarded by other thread. 
+      We can implement the collector_forward_object() so that the forwarding pointer 
+      is set in the atomic instruction, which requires to roll back the mos_alloced
+      space. That is easy for thread local block allocation cancellation. */
+  if( p_target_obj == NULL ){
+    if(collector->result == FALSE ){
+      /* failed to forward, let's get back to controller. */
+      vector_stack_clear(collector->trace_stack);
+      return;
+    }
+
+    p_target_obj = obj_get_fw_in_oi(p_obj);
+    assert(p_target_obj);
+    write_slot(p_ref, p_target_obj);
+    return;
+  }
+  /* otherwise, we successfully forwarded */
+
+#ifdef GC_GEN_STATS
+  if(gc_profile){
+    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+    gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
+    gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
+  }
+#endif
+  write_slot(p_ref, p_target_obj);
+
+  scan_object(collector, p_target_obj); 
+  return;
+}
+
+static void trace_object(Collector *collector, REF *p_ref)
+{ 
+  forward_object(collector, p_ref);
+
+  Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack;
+  while( !vector_stack_is_empty(trace_stack)){
+    p_ref = (REF *)vector_stack_pop(trace_stack); 
+    forward_object(collector, p_ref);
+    trace_stack = (Vector_Block*)collector->trace_stack;
+  }
+  return; 
+}
+ 
+/* for tracing phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+static void collector_trace_rootsets(Collector* collector)
+{
+  GC* gc = collector->gc;
+  GC_Metadata* metadata = gc->metadata;
+#ifdef GC_GEN_STATS
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
+#endif
+  
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
+
+  Space* space = collector->collect_space;
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+  /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */  
+  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+  /* first step: copy all root objects to trace tasks. */ 
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ...");
+  while(root_set){
+    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
+    while(!vector_block_iterator_end(root_set,iter)){
+      REF *p_ref = (REF *)*iter;
+      iter = vector_block_iterator_advance(root_set, iter);
+
+      assert(*p_ref);  /* root ref cann't be NULL, but remset can be */
+
+      collector_tracestack_push(collector, p_ref);
+
+#ifdef GC_GEN_STATS    
+      gc_gen_collector_update_rootset_ref_num(stats);
+#endif
+    } 
+    root_set = pool_iterator_next(metadata->gc_rootset_pool);
+  }
+  /* put back the last trace_stack task */    
+  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+  
+  /* second step: iterate over the trace tasks and forward objects */
+  collector->trace_stack = free_task_pool_get_entry(metadata);
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack.");
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ...");
+
+retry:
+  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
+
+  while(trace_task){    
+    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
+    while(!vector_block_iterator_end(trace_task,iter)){
+      REF *p_ref = (REF *)*iter;
+      iter = vector_block_iterator_advance(trace_task, iter);
+      trace_object(collector, p_ref);
+      
+      if(collector->result == FALSE)  break; /* force return */
+ 
+    }
+    vector_stack_clear(trace_task);
+    pool_put_entry(metadata->free_task_pool, trace_task);
+
+    if(collector->result == FALSE){
+      gc_task_pool_clear(metadata->mark_task_pool);
+      break; /* force return */
+    }
+    
+    trace_task = pool_get_entry(metadata->mark_task_pool);
+  }
+  
+  /* A collector comes here when seeing an empty mark_task_pool. The last collector will ensure 
+     all the tasks are finished.*/
+     
+  atomic_inc32(&num_finished_collectors);
+  while(num_finished_collectors != num_active_collectors){
+    if( pool_is_empty(metadata->mark_task_pool)) continue;
+    /* we can't grab the task here, because of a race condition. If we grab the task, 
+       and the pool is empty, other threads may fall to this barrier and then pass. */
+    atomic_dec32(&num_finished_collectors);
+    goto retry; 
+  }
+
+  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects.");
+
+  /* now we are done, but each collector has a private stack that is empty */  
+  trace_task = (Vector_Block*)collector->trace_stack;
+  vector_stack_clear(trace_task);
+  pool_put_entry(metadata->free_task_pool, trace_task);   
+  collector->trace_stack = NULL;
+  
+  return;
+}
+
+void nongen_ss_pool(Collector* collector) 
+{  
+  GC* gc = collector->gc;
+  
+  collector_trace_rootsets(collector);  
+  /* the rest work is not enough for parallelization, so let only one thread go */
+  if( (POINTER_SIZE_INT)collector->thread_handle != 0 ) {
+    TRACE2("gc.process", "GC: collector["<<(POINTER_SIZE_INT)collector->thread_handle<<"] finished");
+    return;
+  }
+  gc->collect_result = gc_collection_result(gc);
+  if(!gc->collect_result){
+#ifndef BUILD_IN_REFERENT
+    fallback_finref_cleanup(gc);
+#endif
+    return;
+  }
+
+  if(!IGNORE_FINREF ){
+    collector_identify_finref(collector);
+    if(!gc->collect_result) return;
+  }
+#ifndef BUILD_IN_REFERENT
+  else {
+      gc_set_weakref_sets(gc);
+      gc_update_weakref_ignore_finref(gc);
+  }
+#endif
+  gc_identify_dead_weak_roots(gc);
+  
+  gc_fix_rootset(collector, FALSE);
+  
+  TRACE2("gc.process", "GC: collector[0] finished");
+
+  return;
+  
+}
+
+void trace_obj_in_nongen_ss(Collector *collector, void *p_ref)
+{
+  trace_object(collector, (REF*)p_ref);
+}
+
+#endif /* MARK_BIT_FLIPPING */

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/semi_space/sspace_nongen_ss_pool.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp?rev=611186&r1=611185&r2=611186&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp Fri Jan 11 06:38:32 2008
@@ -85,9 +85,11 @@
     
   GC_Metadata* metadata = collector->gc->metadata;
   
-  if(gc_is_gen_mode() && gc_match_kind(collector->gc, MINOR_COLLECTION) && NOS_PARTIAL_FORWARD){
-    assert(collector->rem_set==NULL);
-    collector->rem_set = free_set_pool_get_entry(metadata);
+  if(gc_is_gen_mode() && gc_match_kind(collector->gc, MINOR_COLLECTION)){
+    if( NOS_PARTIAL_FORWARD || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL ){
+      assert(collector->rem_set==NULL);
+      collector->rem_set = free_set_pool_get_entry(metadata);
+    }
   }
   
 #ifndef BUILD_IN_REFERENT
@@ -309,6 +311,8 @@
   return;
 }
 
+/* FIXME:: unimplemented. the design intention for this API is to lauch specified num of collectors. There might be already
+   some collectors running. The specified num would be additional num. */
 void collector_execute_task_concurrent(GC* gc, TaskType task_func, Space* space, unsigned int num_collectors)
 {
   assign_collector_with_task(gc, task_func, space);
@@ -345,4 +349,5 @@
   return TRUE;
 
 }
+