You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by xl...@apache.org on 2008/10/29 04:01:03 UTC
svn commit: r708756 [2/3] - in /harmony/enhanced/drlvm/trunk/vm:
gc_gen/build/ gc_gen/src/common/ gc_gen/src/finalizer_weakref/
gc_gen/src/gen/ gc_gen/src/los/ gc_gen/src/mark_sweep/ gc_gen/src/thread/
gc_gen/src/trace_forward/ gc_gen/src/verify/ inclu...
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.cpp Tue Oct 28 20:01:01 2008
@@ -26,6 +26,7 @@
#include "compressed_ref.h"
#include "../utils/sync_stack.h"
#include "../gen/gen.h"
+#include "../verify/verify_live_heap.h"
#define GC_METADATA_SIZE_BYTES (1*MB)
#define GC_METADATA_EXTEND_SIZE_BYTES (1*MB)
@@ -74,6 +75,7 @@
}
gc_metadata.gc_rootset_pool = sync_pool_create();
+ //gc_metadata.gc_verifier_rootset_pool = sync_pool_create();
gc_metadata.gc_uncompressed_rootset_pool = sync_pool_create();
gc_metadata.mutator_remset_pool = sync_pool_create();
gc_metadata.collector_remset_pool = sync_pool_create();
@@ -552,6 +554,7 @@
Boolean obj_is_mark_black_in_table(Partial_Reveal_Object* p_obj);
#endif
+void analyze_bad_obj(Partial_Reveal_Object *p_obj);
void gc_reset_dirty_set(GC* gc)
{
GC_Metadata* metadata = gc->metadata;
@@ -559,18 +562,14 @@
Mutator *mutator = gc->mutator_list;
while (mutator) {
Vector_Block* local_dirty_set = mutator->dirty_set;
- assert(local_dirty_set);
- if(!vector_block_is_empty(local_dirty_set)){
-#ifdef _DEBUG
+ if(!vector_block_is_empty(local_dirty_set)) {
POINTER_SIZE_INT* iter = vector_block_iterator_init(local_dirty_set);
while(!vector_block_iterator_end(local_dirty_set,iter)){
Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) *iter;
iter = vector_block_iterator_advance(local_dirty_set, iter);
-#ifdef USE_UNIQUE_MARK_SWEEP_GC
- assert(obj_is_mark_black_in_table(p_obj));
-#endif
+ analyze_bad_obj(p_obj);
}
-#endif
+ RAISE_ERROR;
vector_block_clear(mutator->dirty_set);
}
mutator = mutator->next;
@@ -581,26 +580,27 @@
if(!pool_is_empty(global_dirty_set_pool)){
Vector_Block* dirty_set = pool_get_entry(global_dirty_set_pool);
- while(dirty_set != NULL){
+ while(dirty_set != NULL) {
if(!vector_block_is_empty(dirty_set)){
-#ifdef _DEBUG
- POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set);
- while(!vector_block_iterator_end(dirty_set,iter)){
- Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) *iter;
- iter = vector_block_iterator_advance(dirty_set, iter);
-#ifdef USE_UNIQUE_MARK_SWEEP_GC
- assert(obj_is_mark_black_in_table(p_obj));
-#endif
- }
-#endif
+ /*
+ POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set);
+ while(!vector_block_iterator_end(dirty_set,iter)){
+ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) *iter;
+ iter = vector_block_iterator_advance(dirty_set, iter);
+ analyze_bad_obj(p_obj);
+ }*/
+ RAISE_ERROR;
+ vector_block_clear(dirty_set);
+ pool_put_entry(metadata->free_set_pool,dirty_set);
+ } else {
+ pool_put_entry(metadata->free_set_pool,dirty_set);
}
- vector_block_clear(dirty_set);
- pool_put_entry(metadata->free_set_pool,dirty_set);
- dirty_set = pool_get_entry(global_dirty_set_pool);
- }
+ dirty_set = pool_get_entry(global_dirty_set_pool);
}
+ }
}
+
void gc_prepare_dirty_set(GC* gc)
{
GC_Metadata* metadata = gc->metadata;
@@ -610,14 +610,59 @@
Mutator *mutator = gc->mutator_list;
while (mutator) {
+ if( vector_block_is_empty(mutator->dirty_set) ) {
+ mutator = mutator->next;
+ continue;
+ }
//FIXME: temproray solution for mostly concurrent.
- lock(mutator->dirty_set_lock);
+ //lock(mutator->dirty_set_lock);
pool_put_entry(gc_dirty_set_pool, mutator->dirty_set);
mutator->dirty_set = free_set_pool_get_entry(metadata);
+ //unlock(mutator->dirty_set_lock);
+ mutator = mutator->next;
+ }
+ unlock(gc->mutator_list_lock);
+}
+void gc_copy_local_dirty_set_to_global(GC *gc)
+{
+
+ GC_Metadata* metadata = gc->metadata;
+ if(!pool_is_empty(metadata->gc_dirty_set_pool)) //only when the global dirty is empty
+ return;
+
+ Pool* gc_dirty_set_pool = metadata->gc_dirty_set_pool;
+ Vector_Block* dirty_copy = free_set_pool_get_entry(metadata);
+ unsigned int i = 0;
+ Vector_Block* local_dirty_set = NULL;
+
+ lock(gc->mutator_list_lock);
+ Mutator *mutator = gc->mutator_list;
+
+ while (mutator) {
+ lock(mutator->dirty_set_lock);
+ local_dirty_set = mutator->dirty_set;
+ if( vector_block_is_empty(local_dirty_set) ) {
+ unlock(mutator->dirty_set_lock);
+ mutator = mutator->next;
+ continue;
+ }
+ unsigned int dirty_set_size = vector_block_entry_count(local_dirty_set);
+ for(i=0; i<dirty_set_size; i++) {
+ POINTER_SIZE_INT p_obj = vector_block_get_entry(local_dirty_set);
+ vector_block_add_entry(dirty_copy, p_obj);
+ if(vector_block_is_full(dirty_copy)) {
+ pool_put_entry(gc_dirty_set_pool, dirty_copy);
+ dirty_copy = free_set_pool_get_entry(metadata);
+ }
+ }
unlock(mutator->dirty_set_lock);
mutator = mutator->next;
}
unlock(gc->mutator_list_lock);
+ if( !vector_block_is_empty(dirty_copy) )
+ pool_put_entry(gc_dirty_set_pool, dirty_copy);
+ else
+ free_set_pool_put_entry(dirty_copy, metadata);
}
void gc_clear_dirty_set(GC* gc)
@@ -636,7 +681,11 @@
}
void free_set_pool_put_entry(Vector_Block* block, GC_Metadata *metadata)
-{ pool_put_entry(metadata->free_set_pool, block); }
+{
+ if(!vector_block_is_empty(block))
+ RAISE_ERROR;
+ pool_put_entry(metadata->free_set_pool, block);
+}
void gc_reset_collectors_rem_set(GC *gc)
@@ -655,4 +704,3 @@
}
-
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_metadata.h Tue Oct 28 20:01:01 2008
@@ -67,6 +67,7 @@
void gc_clear_remset(GC* gc);
void gc_prepare_dirty_set(GC* gc);
+void gc_copy_local_dirty_set_to_global(GC *gc);
void gc_reset_dirty_set(GC* gc);
void gc_clear_dirty_set(GC* gc);
@@ -118,14 +119,14 @@
while(!block)
block = gc_metadata_extend(metadata->free_task_pool);
- assert(vector_stack_is_empty(block));
+ assert(vector_block_is_empty(block));
return block;
}
inline void mutator_remset_add_entry(Mutator* mutator, REF* p_ref)
{
assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address());
-
+
Vector_Block* root_set = mutator->rem_set;
vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_ref);
@@ -139,17 +140,22 @@
inline void mutator_dirtyset_add_entry(Mutator* mutator, Partial_Reveal_Object* p_obj)
{
Vector_Block* dirty_set = mutator->dirty_set;
+ mutator->dirty_obj_slot_num++;
vector_block_add_entry(dirty_set, (POINTER_SIZE_INT)p_obj);
- if( !vector_block_is_full(dirty_set) ) return;
-
- vector_block_set_full(dirty_set);
-
- if(vector_block_set_exclusive(dirty_set)){
- pool_put_entry(gc_metadata.gc_dirty_set_pool, dirty_set);
+ if( !vector_block_is_full(dirty_set) ) {
+ return;
+ }
+
+ lock(mutator->dirty_set_lock);
+ if( vector_block_is_empty(dirty_set) ) {
+ vector_block_clear(dirty_set);
+ unlock(mutator->dirty_set_lock);
+ return;
}
-
+ pool_put_entry(gc_metadata.gc_dirty_set_pool, dirty_set);
mutator->dirty_set = free_set_pool_get_entry(&gc_metadata);
+ unlock(mutator->dirty_set_lock);
}
inline void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref)
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_options.cpp Tue Oct 28 20:01:01 2008
@@ -37,7 +37,10 @@
extern Boolean FORCE_FULL_COMPACT;
-extern unsigned int NUM_MARKERS;
+extern unsigned int NUM_CONCLCTORS;
+extern unsigned int NUM_CON_MARKERS;
+extern unsigned int NUM_CON_SWEEPERS;
+
extern unsigned int NUM_COLLECTORS;
extern unsigned int MINOR_COLLECTORS;
extern unsigned int MAJOR_COLLECTORS;
@@ -59,12 +62,12 @@
GC* gc_mc_create();
GC* gc_ms_create();
-static GC* gc_decide_collection_algo(char* unique_algo, Boolean has_los)
+static GC* gc_unique_decide_collection_algo(char* unique_algo, Boolean has_los)
{
/* if unique_algo is not set, gc_gen_decide_collection_algo is called. */
assert(unique_algo);
- GC_PROP = ALGO_POOL_SHARE | ALGO_DEPTH_FIRST;
+ GC_PROP = ALGO_POOL_SHARE | ALGO_DEPTH_FIRST | ALGO_IS_UNIQUE;
assert(!has_los); /* currently unique GCs don't use LOS */
if(has_los)
@@ -84,7 +87,7 @@
GC_PROP |= ALGO_MS_NORMAL;
gc = gc_ms_create();
}else{
- LWARN(48, "GC algorithm setting incorrect. Will use default value.");
+ LWARN(48, "\nGC algorithm setting incorrect. Will use default value.\n");
GC_PROP |= ALGO_COMPACT_MOVE;
gc = gc_mc_create();
}
@@ -159,7 +162,7 @@
if(minor_algo || major_algo){
LWARN(60, "Generational options cannot be set with unique_algo, ignored.");
}
- gc = gc_decide_collection_algo(unique_algo, has_los);
+ gc = gc_unique_decide_collection_algo(unique_algo, has_los);
vm_properties_destroy_value(unique_algo);
}else{ /* default */
gc = gc_gen_decide_collection_algo(minor_algo, major_algo, has_los);
@@ -225,7 +228,7 @@
if (min_heap_size > max_heap_size){
max_heap_size = min_heap_size;
- LWARN(61, "Max heap size you set is too small, reset to {0}MB" << max_heap_size/MB);
+ LWARN(61, "Max heap size is too small, reset to {0}MB" << max_heap_size/MB);
}
min_heap_size_bytes = min_heap_size;
@@ -248,11 +251,25 @@
NUM_COLLECTORS = (num==0)? NUM_COLLECTORS:num;
}
- if (vm_property_is_set("gc.num_markers", VM_PROPERTIES) == 1) {
- unsigned int num = vm_property_get_integer("gc.num_markers");
- NUM_MARKERS = (num==0)? NUM_MARKERS:num;
+ if (vm_property_is_set("gc.num_conclctors", VM_PROPERTIES) == 1) {
+ unsigned int num = vm_property_get_integer("gc.num_conclctors");
+ NUM_CONCLCTORS = (num==0)? NUM_CONCLCTORS:num;
+ }
+
+ // for concurrent GC debug
+ if (vm_property_is_set("gc.num_con_markers", VM_PROPERTIES) == 1) {
+ unsigned int num = vm_property_get_integer("gc.num_con_markers");
+ NUM_CON_MARKERS = (num==0)? NUM_CON_MARKERS:num;
+ }
+
+ if (vm_property_is_set("gc.num_con_sweepers", VM_PROPERTIES) == 1) {
+ unsigned int num = vm_property_get_integer("gc.num_con_sweepers");
+ NUM_CON_SWEEPERS = (num==0)? NUM_CON_SWEEPERS:num;
}
+
+
+
if (vm_property_is_set("gc.tospace_size", VM_PROPERTIES) == 1) {
TOSPACE_SIZE = vm_property_get_size("gc.tospace_size");
}
@@ -412,4 +429,3 @@
-
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_platform.h Tue Oct 28 20:01:01 2008
@@ -66,7 +66,7 @@
#define prefetchnta(pref_addr) _mm_prefetch((char*)(pref_addr), _MM_HINT_NTA )
#endif /*ALLOC_PREFETCH*/
-#elif defined (__linux__) || defined(FREEBSD)
+#elif defined (__linux__)
#define FORCE_INLINE inline __attribute__((always_inline))
#ifdef PREFETCH_SUPPORTED
@@ -154,6 +154,13 @@
return disable_count;
}
+inline int vm_suspend_all_threads( hythread_iterator_t *thread_iterator )
+{
+ int disable_count = hythread_reset_suspend_disable();
+ hythread_suspend_all(thread_iterator, NULL);
+ hythread_suspend_disable();
+ return disable_count;
+}
inline void vm_resume_all_threads(int disable_count)
{
hythread_suspend_enable();
@@ -323,6 +330,7 @@
#ifdef PLATFORM_POSIX
#define max(x, y) (((x)>(y))?(x):(y))
+#define min(x, y) (((x)<(y))?(x):(y))
#endif
typedef volatile unsigned int SpinLock;
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/common/gc_properties.h Tue Oct 28 20:01:01 2008
@@ -54,7 +54,8 @@
enum GC_Property{
ALGO_HAS_NOS = 0x1,
ALGO_HAS_LOS = 0x2,
- ALGO_IS_GEN = 0x4,
+ ALGO_IS_UNIQUE = 0x4,
+ ALGO_IS_GEN = 0x8,
ALGO_COPY_FORWARD = 0x10,
ALGO_COPY_SEMISPACE = 0x20,
@@ -107,6 +108,21 @@
GC_PROP &= ~ALGO_IS_GEN;
}
+FORCE_INLINE Boolean gc_is_unique_space()
+{
+ return gc_is_kind(ALGO_IS_UNIQUE);
+}
+
+FORCE_INLINE Boolean gc_is_unique_move_compact()
+{
+ return gc_is_kind(ALGO_IS_UNIQUE) && gc_is_kind(ALGO_COMPACT_MOVE);
+}
+
+FORCE_INLINE Boolean gc_is_unique_mark_sweep()
+{
+ return gc_is_kind(ALGO_IS_UNIQUE) && gc_is_kind(ALGO_MS_NORMAL);
+}
+
FORCE_INLINE Boolean gc_is_gen_mode()
{
return gc_is_kind(ALGO_IS_GEN);
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp Tue Oct 28 20:01:01 2008
@@ -178,7 +178,7 @@
} else {
assert(major_is_marksweep());
p_ref_or_obj = p_obj;
- if(!gc_mark_is_concurrent())
+ if( gc->gc_concurrent_status == GC_CON_NIL )
trace_object = trace_obj_in_ms_marking;
else
trace_object = trace_obj_in_ms_concurrent_mark;
@@ -856,4 +856,3 @@
-
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp Tue Oct 28 20:01:01 2008
@@ -22,6 +22,7 @@
#include "finalizer_weakref_metadata.h"
#include "../thread/mutator.h"
#include "../thread/collector.h"
+#include "../thread/conclctor.h"
#define FINREF_METADATA_SEG_SIZE_BIT_SHIFT 20
#define FINREF_METADATA_SEG_SIZE_BYTES (1 << FINREF_METADATA_SEG_SIZE_BIT_SHIFT)
@@ -235,20 +236,6 @@
collector->weakref_set= NULL;
collector->phanref_set= NULL;
}
-
- if(gc_mark_is_concurrent() && !gc_sweep_is_concurrent()){
- unsigned int num_active_markers = gc->num_active_markers;
- for(unsigned int i = 0; i < num_active_markers; i++)
- {
- Collector *marker = (Collector*)gc->markers[i];
- pool_put_entry(metadata->softref_pool, marker->softref_set);
- pool_put_entry(metadata->weakref_pool, marker->weakref_set);
- pool_put_entry(metadata->phanref_pool, marker->phanref_set);
- marker->softref_set = NULL;
- marker->weakref_set= NULL;
- marker->phanref_set= NULL;
- }
- }
return;
}
@@ -448,3 +435,4 @@
+
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen.h Tue Oct 28 20:01:01 2008
@@ -76,9 +76,12 @@
unsigned int num_collectors;
unsigned int num_active_collectors; /* not all collectors are working */
- Marker** markers;
- unsigned int num_markers;
+ /*concurrent markers and collectors*/
+ Conclctor** conclctors;
+ unsigned int num_conclctors;
+ //unsigned int num_active_conclctors;
unsigned int num_active_markers;
+ unsigned int num_active_sweepers;
/* metadata is the pool for rootset, markstack, etc. */
GC_Metadata* metadata;
@@ -99,7 +102,7 @@
//For_LOS_extend
Space_Tuner* tuner;
- unsigned int gc_concurrent_status;
+ volatile unsigned int gc_concurrent_status;
Collection_Scheduler* collection_scheduler;
SpinLock lock_con_mark;
@@ -219,4 +222,3 @@
-
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/los/lspace_alloc_collect.cpp Tue Oct 28 20:01:01 2008
@@ -544,4 +544,3 @@
-
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.cpp Tue Oct 28 20:01:01 2008
@@ -18,10 +18,11 @@
#include "../common/gc_common.h"
#include "gc_ms.h"
+#include "../common/gc_concurrent.h"
#include "wspace_mark_sweep.h"
#include "../finalizer_weakref/finalizer_weakref.h"
#include "../common/compressed_ref.h"
-#include "../thread/marker.h"
+#include "../thread/conclctor.h"
#include "../verify/verify_live_heap.h"
#ifdef USE_32BITS_HASHCODE
#include "../common/hashcode.h"
@@ -69,7 +70,7 @@
void gc_ms_reclaim_heap(GC_MS *gc)
{
- if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE);
+ //if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE);
Wspace *wspace = gc_ms_get_wspace(gc);
@@ -77,32 +78,48 @@
wspace_reset_after_collection(wspace);
- if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE);
+ //if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE);
}
-void wspace_mark_scan_concurrent(Marker* marker);
+void wspace_mark_scan_concurrent(Conclctor* marker);
+void wspace_last_mc_marker_work(Conclctor *last_marker);
+void wspace_last_otf_marker_work(Conclctor *last_marker);
+
void gc_ms_start_con_mark(GC_MS* gc, unsigned int num_markers)
{
if(gc->num_active_markers == 0)
pool_iterator_init(gc->metadata->gc_rootset_pool);
-
- marker_execute_task_concurrent((GC*)gc,(TaskType)wspace_mark_scan_concurrent,(Space*)gc->wspace, num_markers);
+
+ set_marker_final_func( (TaskType)wspace_last_otf_marker_work );
+ conclctor_execute_task_concurrent((GC*)gc,(TaskType)wspace_mark_scan_concurrent,(Space*)gc->wspace, num_markers, CONCLCTOR_ROLE_MARKER);
}
-void wspace_mark_scan_mostly_concurrent(Marker* marker);
+void wspace_mark_scan_mostly_concurrent(Conclctor* marker);
+void wspace_last_mc_marker_work(Conclctor* marker);
+
void gc_ms_start_mostly_con_mark(GC_MS* gc, unsigned int num_markers)
{
if(gc->num_active_markers == 0)
pool_iterator_init(gc->metadata->gc_rootset_pool);
- marker_execute_task_concurrent((GC*)gc,(TaskType)wspace_mark_scan_mostly_concurrent,(Space*)gc->wspace, num_markers);
+ set_marker_final_func( (TaskType)wspace_last_mc_marker_work );
+ conclctor_execute_task_concurrent((GC*)gc,(TaskType)wspace_mark_scan_mostly_concurrent,(Space*)gc->wspace, num_markers, CONCLCTOR_ROLE_MARKER);
}
+
+void wspace_final_mark_scan_mostly_concurrent( Conclctor *marker );
+void conclctor_execute_task_synchronized(GC* gc, TaskType task_func, Space* space, unsigned int num_markers, unsigned int role);
+
void gc_ms_start_mostly_con_final_mark(GC_MS* gc, unsigned int num_markers)
{
pool_iterator_init(gc->metadata->gc_rootset_pool);
- marker_execute_task((GC*)gc,(TaskType)wspace_mark_scan_mostly_concurrent,(Space*)gc->wspace);
+ conclctor_execute_task_synchronized( (GC*)gc,(TaskType)wspace_final_mark_scan_mostly_concurrent,(Space*)gc->wspace, num_markers, CONCLCTOR_ROLE_MARKER );
+
+ /*
+ collector_execute_task( (GC*)gc,(TaskType)wspace_mark_scan_mostly_concurrent,(Space*)gc->wspace );
+ collector_set_weakref_sets( (GC*)gc );
+ */
}
/*FIXME: move this function out of this file.*/
@@ -119,32 +136,54 @@
unlock(gc->mutator_list_lock);
}
-void wspace_sweep_concurrent(Collector* collector);
-void gc_ms_start_con_sweep(GC_MS* gc, unsigned int num_collectors)
+
+void wspace_sweep_concurrent(Conclctor* collector);
+void wspace_last_sweeper_work(Conclctor *last_sweeper);
+//void gc_con_print_stat_heap_utilization_rate(GC *gc);
+ void gc_ms_get_current_heap_usage(GC_MS *gc);
+
+void gc_ms_start_con_sweep(GC_MS* gc, unsigned int num_conclctors)
{
ops_color_flip();
mem_fence();
gc_check_mutator_allocation((GC*)gc);
- gc_disable_alloc_obj_live();
- wspace_init_pfc_pool_iterator(gc->wspace);
+ gc_disable_alloc_obj_live((GC*)gc);
+ //just debugging
+ //gc_con_print_stat_heap_utilization_rate((GC*)gc);
+ //INFO2("gc.scheduler", "=== Start Con Sweeping ===");
+ Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat(gc);
+ con_collection_stat->sweeping_time = time_now();
- collector_execute_task_concurrent((GC*)gc, (TaskType)wspace_sweep_concurrent, (Space*)gc->wspace, num_collectors);
+ gc_ms_get_current_heap_usage(gc);
+ gc_clear_conclctor_role((GC*)gc);
+ wspace_init_pfc_pool_iterator(gc->wspace);
+ set_sweeper_final_func( (TaskType)wspace_last_sweeper_work );
+ conclctor_execute_task_concurrent((GC*)gc, (TaskType)wspace_sweep_concurrent, (Space*)gc->wspace, num_conclctors, CONCLCTOR_ROLE_SWEEPER);
- collector_release_weakref_sets((GC*)gc, num_collectors);
+ //conclctor_release_weakref_sets((GC*)gc);
}
-void gc_ms_start_con_mark(GC_MS* gc)
+unsigned int gc_ms_get_live_object_size(GC_MS* gc)
{
- pool_iterator_init(gc->metadata->gc_rootset_pool);
+ POINTER_SIZE_INT num_live_obj = 0;
+ POINTER_SIZE_INT size_live_obj = 0;
- marker_execute_task_concurrent((GC*)gc,(TaskType)wspace_mark_scan_concurrent,(Space*)gc->wspace);
+ unsigned int num_collectors = gc->num_active_collectors;
+ Collector** collectors = gc->collectors;
+ unsigned int i;
+ for(i = 0; i < num_collectors; i++){
+ Collector* collector = collectors[i];
+ num_live_obj += collector->live_obj_num;
+ size_live_obj += collector->live_obj_size;
+ }
+ return size_live_obj;
}
+
void gc_ms_update_space_stat(GC_MS* gc)
{
POINTER_SIZE_INT num_live_obj = 0;
- POINTER_SIZE_INT size_live_obj = 0;
- POINTER_SIZE_INT new_obj_size = 0;
+ POINTER_SIZE_INT size_live_obj = 0;
Space_Statistics* wspace_stat = gc->wspace->space_statistic;
@@ -157,22 +196,20 @@
size_live_obj += collector->live_obj_size;
}
- new_obj_size = gc_get_new_object_size((GC*)gc, TRUE);
-
- wspace_stat->size_new_obj = new_obj_size;
-
+ wspace_stat->size_new_obj = gc_get_mutator_new_obj_size( (GC*)gc );
wspace_stat->num_live_obj = num_live_obj;
wspace_stat->size_live_obj = size_live_obj;
wspace_stat->last_size_free_space = wspace_stat->size_free_space;
wspace_stat->size_free_space = gc->committed_heap_size - size_live_obj;/*TODO:inaccurate value.*/
wspace_stat->space_utilization_ratio = (float)wspace_stat->size_new_obj / wspace_stat->last_size_free_space;
-
+
INFO2("gc.space.stat","[GC][Space Stat] num_live_obj : "<<wspace_stat->num_live_obj<<" ");
INFO2("gc.space.stat","[GC][Space Stat] size_live_obj : "<<wspace_stat->size_live_obj<<" ");
INFO2("gc.space.stat","[GC][Space Stat] size_free_space : "<<wspace_stat->size_free_space<<" ");
INFO2("gc.space.stat","[GC][Space Stat] last_size_free_space: "<<wspace_stat->last_size_free_space<<" ");
INFO2("gc.space.stat","[GC][Space Stat] size_new_obj : "<<wspace_stat->size_new_obj<<" ");
INFO2("gc.space.stat","[GC][Space Stat] utilization_ratio : "<<wspace_stat->space_utilization_ratio<<" ");
+
}
void gc_ms_reset_space_stat(GC_MS* gc)
@@ -180,10 +217,12 @@
Space_Statistics* wspace_stat = gc->wspace->space_statistic;
wspace_stat->size_new_obj = 0;
wspace_stat->num_live_obj = 0;
- wspace_stat->size_live_obj = 0;
+ wspace_stat->size_live_obj = 0;
wspace_stat->space_utilization_ratio = 0;
}
void gc_ms_iterate_heap(GC_MS *gc)
{
}
+
+
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/gc_ms.h Tue Oct 28 20:01:01 2008
@@ -47,10 +47,12 @@
unsigned int num_collectors;
unsigned int num_active_collectors; /* not all collectors are working */
- Marker** markers;
- unsigned int num_markers;
+ /*concurrent markers and collectors*/
+ Conclctor** conclctors;
+ unsigned int num_conclctors;
unsigned int num_active_markers;
-
+ unsigned int num_active_sweepers;
+
/* metadata is the pool for rootset, markstack, etc. */
GC_Metadata *metadata;
Finref_Metadata *finref_metadata;
@@ -70,7 +72,7 @@
//For_LOS_extend
Space_Tuner *tuner;
- unsigned int gc_concurrent_status;
+ volatile unsigned int gc_concurrent_status;
Collection_Scheduler* collection_scheduler;
SpinLock lock_con_mark;
@@ -91,10 +93,22 @@
//////////////////////////////////////////////////////////////////////////////////////////
inline void *gc_ms_fast_alloc(unsigned size, Allocator *allocator)
-{ return wspace_thread_local_alloc(size, allocator); }
+{
+ void *p_obj = wspace_thread_local_alloc(size, allocator);
+ if(p_obj) {
+ ((Mutator*)allocator)->new_obj_size += size;
+ ((Mutator*)allocator)->new_obj_num++;
+ }
+ return p_obj;
+ }
inline void *gc_ms_alloc(unsigned size, Allocator *allocator)
-{ return wspace_alloc(size, allocator); }
+{
+ void * p_obj = wspace_alloc(size, allocator);
+ if(p_obj)
+ ((Mutator*)allocator)->new_obj_num++;
+ return p_obj;
+}
inline Wspace *gc_ms_get_wspace(GC_MS *gc)
{ return gc->wspace; }
@@ -108,6 +122,8 @@
inline POINTER_SIZE_INT gc_ms_total_memory_size(GC_MS *gc)
{ return space_committed_size((Space*)gc_ms_get_wspace(gc)); }
+void gc_ms_print_detail_stat(GC_MS *gc);
+
/////////////////////////////////////////////////////////////////////////////////////////
void gc_ms_initialize(GC_MS *gc, POINTER_SIZE_INT initial_heap_size, POINTER_SIZE_INT final_heap_size);
@@ -123,4 +139,11 @@
void gc_ms_start_mostly_con_final_mark(GC_MS* gc, unsigned int num_markers);
void gc_ms_reset_space_stat(GC_MS* gc);
+unsigned int gc_ms_get_live_object_size(GC_MS* gc);
+
+FORCE_INLINE Con_Collection_Statistics *gc_ms_get_con_collection_stat(GC_MS* gc)
+{
+ return gc->wspace->con_collection_statistics;
+}
+
#endif // _GC_MS_H_
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp Tue Oct 28 20:01:01 2008
@@ -63,6 +63,10 @@
memset(wspace->space_statistic, 0, sizeof(Space_Statistics));
wspace->space_statistic->size_free_space = commit_size;
+ wspace->con_collection_statistics = (Con_Collection_Statistics*)STD_MALLOC(sizeof(Con_Collection_Statistics));
+ memset(wspace->con_collection_statistics, 0, sizeof(Con_Collection_Statistics));
+ wspace->con_collection_statistics->heap_utilization_rate = DEFAULT_HEAP_UTILIZATION_RATE;
+
#ifdef USE_UNIQUE_MARK_SWEEP_GC
gc_ms_set_wspace((GC_MS*)gc, wspace);
#else
@@ -207,17 +211,6 @@
}
}
-#ifdef USE_UNIQUE_MARK_SWEEP_GC
-void wspace_set_space_statistic(Wspace *wspace)
-{
- GC_MS *gc = (GC_MS*)wspace->gc;
-
- for(unsigned int i = 0; i < gc->num_collectors; ++i){
- wspace->surviving_obj_num += gc->collectors[i]->live_obj_num;
- wspace->surviving_obj_size += gc->collectors[i]->live_obj_size;
- }
-}
-#endif
extern void wspace_decide_compaction_need(Wspace *wspace);
extern void mark_sweep_wspace(Collector *collector);
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h Tue Oct 28 20:01:01 2008
@@ -24,12 +24,14 @@
#include "../common/gc_common.h"
#include "../common/gc_concurrent.h"
+#define DEFAULT_HEAP_UTILIZATION_RATE 0.92f
/*
* The sweep space accomodates objects collected by mark-sweep
*/
struct Size_Segment;
struct Free_Chunk_List;
+struct Con_Collection_Statistics;
typedef struct Wspace {
/* <-- first couple of fields are overloadded as Space */
@@ -71,14 +73,11 @@
Free_Chunk_List *aligned_free_chunk_lists;
Free_Chunk_List *unaligned_free_chunk_lists;
Free_Chunk_List *hyper_free_chunk_list;
- POINTER_SIZE_INT surviving_obj_num;
- POINTER_SIZE_INT surviving_obj_size;
+
+ Con_Collection_Statistics *con_collection_statistics;
+
} Wspace;
-#ifdef USE_UNIQUE_MARK_SWEEP_GC
-void wspace_set_space_statistic(Wspace *wspace);
-#endif
-
Wspace *wspace_initialize(GC *gc, void *start, POINTER_SIZE_INT wspace_size, POINTER_SIZE_INT commit_size);
void wspace_destruct(Wspace *wspace);
void wspace_reset_after_collection(Wspace *wspace);
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp Tue Oct 28 20:01:01 2008
@@ -120,8 +120,9 @@
#ifdef SSPACE_VERIFY
wspace_verify_alloc(p_obj, size);
#endif
-
- if(p_obj) ((Mutator*)allocator)->new_obj_size += size;
+ if(p_obj) {
+ ((Mutator*)allocator)->new_obj_occupied_size+=size;
+ }
return p_obj;
}
@@ -149,6 +150,7 @@
//if(!chunk) chunk = wspace_steal_pfc(wspace, seg_index, index);
if(!chunk){
mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
+ //INFO2("gc.wspace", "[Local Alloc Failed] alloc obj with size" << size << " bytes" );
return NULL;
}
chunk->status |= CHUNK_IN_USE;
@@ -184,6 +186,7 @@
//if(!chunk) chunk = wspace_steal_pfc(wspace, seg_index, index);
if(!chunk) {
mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
+ //INFO2("gc.wspace", "[Non-Local Alloc Failed] alloc obj with size" << size << " bytes" );
return NULL;
}
p_obj = alloc_in_chunk(chunk);
@@ -201,7 +204,9 @@
mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
}
-
+ if(p_obj) {
+ ((Mutator*)allocator)->new_obj_occupied_size+=size;
+ }
return p_obj;
}
@@ -224,7 +229,7 @@
mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_ENTER_ALLOC_MARK);
if(is_obj_alloced_live()){
- chunk->table[0] |= cur_mark_black_color ;
+ chunk->table[0] |= cur_mark_black_color; // just for debugging, mark new object
}
mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
@@ -234,6 +239,8 @@
chunk->status = CHUNK_ABNORMAL| CHUNK_USED;
wspace_reg_used_chunk(wspace, chunk);
assert(get_obj_info_raw((Partial_Reveal_Object*)chunk->base) & SUPER_OBJ_MASK);
+
+ ((Mutator*)allocator)->new_obj_occupied_size+=chunk_size;
return chunk->base;
}
@@ -257,14 +264,21 @@
#ifdef WSPACE_CONCURRENT_GC_STATS
if(p_obj && gc_con_is_in_marking()) ((Partial_Reveal_Object*)p_obj)->obj_info |= NEW_OBJ_MASK;
#endif
-
+
+
return p_obj;
}
+Free_Chunk_List *get_hyper_free_chunk_list();
+
/* FIXME:: the collection should be seperated from the alloation */
void *wspace_alloc(unsigned size, Allocator *allocator)
{
void *p_obj = NULL;
+ /*
+ if( get_hyper_free_chunk_list()->head == NULL )
+ INFO2("gc.wspace", "[BEFORE ALLOC]hyper free chunk is EMPTY!!");
+ */
if(gc_is_specify_con_gc())
gc_sched_collection(allocator->gc, GC_CAUSE_CONCURRENT_GC);
@@ -273,6 +287,10 @@
p_obj = wspace_try_alloc(size, allocator);
if(p_obj){
((Mutator*)allocator)->new_obj_size += size;
+ /*
+ if( get_hyper_free_chunk_list()->head == NULL )
+ INFO2("gc.wspace", "[AFTER FIRST ALLOC]hyper free chunk is EMPTY!!");
+ */
return p_obj;
}
@@ -284,9 +302,21 @@
if(p_obj){
vm_gc_unlock_enum();
((Mutator*)allocator)->new_obj_size += size;
+ /*
+ if( get_hyper_free_chunk_list()->head == NULL )
+ INFO2("gc.wspace", "[AFTER SECOND ALLOC]hyper free chunk is EMPTY!!");
+ */
return p_obj;
}
- gc_reclaim_heap(allocator->gc, GC_CAUSE_MOS_IS_FULL);
+
+ INFO2("gc.con.info", "[Exhausted Cause] Allocation size is :" << size << " bytes");
+ GC *gc = allocator->gc;
+ /*
+ gc->cause = GC_CAUSE_MOS_IS_FULL;
+ if(gc_is_specify_con_gc())
+ gc_relaim_heap_con_mode(gc);
+ else*/
+ gc_reclaim_heap(gc, GC_CAUSE_MOS_IS_FULL);
vm_gc_unlock_enum();
#ifdef SSPACE_CHUNK_INFO
@@ -294,7 +324,10 @@
#endif
p_obj = wspace_try_alloc(size, allocator);
-
+ /*
+ if( get_hyper_free_chunk_list()->head == NULL )
+ INFO2("gc.wspace", "[AFTER COLLECTION ALLOC]hyper free chunk is EMPTY!!");
+ */
if(p_obj) ((Mutator*)allocator)->new_obj_size += size;
return p_obj;
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h Tue Oct 28 20:01:01 2008
@@ -184,8 +184,9 @@
void *p_obj = (void*)((POINTER_SIZE_INT)chunk->base + ((POINTER_SIZE_INT)chunk->slot_size * slot_index));
/*mark black is placed here because of race condition between ops color flip. */
- if(p_obj && is_obj_alloced_live())
+ if(p_obj && is_obj_alloced_live()) {
obj_mark_black_in_table((Partial_Reveal_Object*)p_obj, chunk->slot_size);
+ }
alloc_slot_in_table(table, slot_index);
if(chunk->status & CHUNK_NEED_ZEROING)
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp Tue Oct 28 20:01:01 2008
@@ -29,6 +29,9 @@
static Free_Chunk_List unaligned_free_chunk_lists[NUM_UNALIGNED_FREE_CHUNK_BUCKET];
static Free_Chunk_List hyper_free_chunk_list;
+Free_Chunk_List *get_hyper_free_chunk_list() {
+ return &hyper_free_chunk_list;
+}
static void init_size_segment(Size_Segment *seg, unsigned int size_min, unsigned int size_max, unsigned int gran_shift_bits, Boolean local_alloc)
{
@@ -313,15 +316,20 @@
{
POINTER_SIZE_INT chunk_size = CHUNK_SIZE(chunk);
assert(!(chunk_size % CHUNK_GRANULARITY));
-
+
+ Free_Chunk_List *free_list = NULL;
if(chunk_size > HYPER_OBJ_THRESHOLD){
- lock(wspace->hyper_free_chunk_list->lock);
+ free_list = wspace->hyper_free_chunk_list;
+ lock(free_list->lock);
list_put_hyper_free_chunk_to_tail(wspace->hyper_free_chunk_list, chunk);
- unlock(wspace->hyper_free_chunk_list->lock);
- }else if(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK) && !(chunk_size & NORMAL_CHUNK_LOW_MASK))
- list_put_free_chunk_to_tail(&wspace->aligned_free_chunk_lists[ALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
- else
- list_put_free_chunk_to_tail(&wspace->unaligned_free_chunk_lists[UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
+ unlock(free_list->lock);
+ }else if(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK) && !(chunk_size & NORMAL_CHUNK_LOW_MASK)) {
+ free_list = &wspace->aligned_free_chunk_lists[ALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)];
+ list_put_free_chunk_to_tail(free_list, chunk);
+ } else {
+ free_list = &wspace->unaligned_free_chunk_lists[UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)];
+ list_put_free_chunk_to_tail(free_list, chunk);
+ }
}
@@ -410,9 +418,11 @@
assert(!chunk);
/* search in the hyper free chunk list */
+
chunk = wspace_get_hyper_free_chunk(wspace, NORMAL_CHUNK_SIZE_BYTES, TRUE);
assert(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK));
-
+ /*if(chunk == NULL )
+ INFO2("gc.wspace", "return from hyper free chunk list");*/
return chunk;
}
@@ -476,7 +486,12 @@
Free_Chunk *prev_chunk = NULL;
Free_Chunk *chunk = list->head;
+ /*
+ if( chunk == NULL )
+ INFO2("gc.wspace", "NO free hyper chunk now!!!" );
+ */
while(chunk){
+
if(CHUNK_SIZE(chunk) >= chunk_size){
Free_Chunk *next_chunk = chunk->next;
if(prev_chunk)
@@ -488,6 +503,8 @@
else
list->tail = prev_chunk;
break;
+ } else {
+ //INFO2("gc.wspace", "check chunk with SIZE "<<CHUNK_SIZE(chunk) << " ,not enough" );
}
prev_chunk = chunk;
chunk = chunk->next;
@@ -955,4 +972,3 @@
-
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.h Tue Oct 28 20:01:01 2008
@@ -204,6 +204,35 @@
assert(list->lock == FREE_LOCK);
}
+inline void free_chunk_list_add_tail(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+ chunk->next = NULL;
+ if(list->head) {
+ list->tail->next = chunk;
+ chunk->prev = list->tail;
+ list->tail = chunk;
+ } else {
+ chunk->prev = NULL;
+ list->head = list->tail = chunk;
+ }
+ list->chunk_num++;
+}
+
+inline void free_chunk_list_add_head(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+ chunk->prev = NULL;
+ if(list->head) {
+ list->head->prev = chunk;
+ chunk->next = list->head;
+ list->head = chunk;
+ } else {
+ chunk->next = NULL;
+ list->head = list->tail = chunk;
+ }
+ list->chunk_num++;
+}
+
+
inline void free_list_detach_chunk(Free_Chunk_List *list, Free_Chunk *chunk)
{
if(chunk->prev)
@@ -218,6 +247,18 @@
--list->chunk_num;
}
+inline Boolean chunk_is_in_list(Free_Chunk_List *from_list, Free_Chunk *chunk)
+{
+ Free_Chunk *pro_chunk = from_list->head;
+ while(pro_chunk) {
+ if(pro_chunk == chunk)
+ return TRUE;
+ pro_chunk = pro_chunk->next;
+ }
+ return FALSE;
+}
+
+
inline void move_free_chunks_between_lists(Free_Chunk_List *to_list, Free_Chunk_List *from_list)
{
if(to_list->tail){
@@ -229,8 +270,10 @@
from_list->tail->next = to_list->head;
to_list->head = from_list->head;
}
+ //to_list->chunk_num += from_list->chunk_num;
from_list->head = NULL;
from_list->tail = NULL;
+ from_list->chunk_num = 0;
}
/* Padding the last index word in table to facilitate allocation */
@@ -402,7 +445,7 @@
Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
/*2. If in concurrent sweeping phase, search PFC backup pool*/
- if(!chunk && gc_con_is_in_sweeping()){
+ if(!chunk && in_con_sweeping_phase(wspace->gc)){
pfc_pool = wspace->pfc_pools_backup[seg_index][index];
chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
}
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_compact.cpp Tue Oct 28 20:01:01 2008
@@ -33,7 +33,7 @@
float free_mem_ratio = (float)free_mem_size / wspace->committed_heap_size;
#ifdef USE_UNIQUE_MARK_SWEEP_GC
- if(!gc_mark_is_concurrent() && (free_mem_ratio > WSPACE_COMPACT_RATIO) && (wspace->gc->cause != GC_CAUSE_RUNTIME_FORCE_GC)){
+ if( gc_con_is_in_STW(wspace->gc) && (free_mem_ratio > WSPACE_COMPACT_RATIO) && (wspace->gc->cause != GC_CAUSE_RUNTIME_FORCE_GC)){
#else
if(collect_is_major()){
#endif
@@ -280,4 +280,3 @@
-
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp Tue Oct 28 20:01:01 2008
@@ -17,9 +17,12 @@
#include "wspace_mark_sweep.h"
#include "../finalizer_weakref/finalizer_weakref.h"
-#include "../thread/marker.h"
+#include "../thread/conclctor.h"
+#include "gc_ms.h"
volatile Boolean need_terminate_mostly_con_mark;
+extern unsigned int mostly_con_final_marker_num;
+extern unsigned int mostly_con_long_marker_num;
Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj);
@@ -35,7 +38,7 @@
}
}
-static FORCE_INLINE void scan_object(Marker* marker, Partial_Reveal_Object *p_obj)
+static FORCE_INLINE void scan_object(Conclctor* marker, Partial_Reveal_Object *p_obj)
{
assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
if(obj_is_dirty_in_table(p_obj)){
@@ -67,21 +70,22 @@
}
#ifndef BUILD_IN_REFERENT
- scan_weak_reference((Collector*)marker, p_obj, scan_slot);
+ //scan_weak_reference((Collector*)marker, p_obj, scan_slot);
+ scan_weak_reference_direct((Collector*)marker, p_obj, scan_slot);
#endif
}
-static void trace_object(Marker* marker, Partial_Reveal_Object *p_obj)
+static void trace_object(Conclctor* marker, Partial_Reveal_Object *p_obj)
{
scan_object(marker, p_obj);
- obj_mark_black_in_table(p_obj);
+ obj_mark_black_in_table(p_obj, marker);
Vector_Block *trace_stack = marker->trace_stack;
while(!vector_stack_is_empty(trace_stack)){
p_obj = (Partial_Reveal_Object*)vector_stack_pop(trace_stack);
scan_object(marker, p_obj);
- obj_mark_black_in_table(p_obj);
+ obj_mark_black_in_table(p_obj, marker);
trace_stack = marker->trace_stack;
}
}
@@ -91,34 +95,31 @@
{ need_terminate_mostly_con_mark = FALSE; }
void terminate_mostly_con_mark()
-{ need_terminate_mostly_con_mark = TRUE; }
+{ need_terminate_mostly_con_mark = TRUE; }
-static Boolean concurrent_mark_need_terminating(GC* gc)
+static Boolean concurrent_mark_need_terminating_mc(GC* gc)
{
- if(need_terminate_mostly_con_mark) return TRUE;
-
+ return need_terminate_mostly_con_mark;
+ /*
GC_Metadata *metadata = gc->metadata;
return pool_is_empty(metadata->gc_dirty_set_pool);
+ */
}
static volatile unsigned int num_active_markers = 0;
-
-void wspace_mark_scan_mostly_concurrent(Marker* marker)
+static SpinLock info_lock;
+void wspace_mark_scan_mostly_concurrent(Conclctor* marker)
{
- int64 time_mark_start = time_now();
GC *gc = marker->gc;
GC_Metadata *metadata = gc->metadata;
- /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
- unsigned int current_thread_id = atomic_inc32(&num_active_markers);
+ unsigned int num_dirtyset_slot = 0;
- unsigned int num_dirtyset_slot = 0;
-
marker->trace_stack = free_task_pool_get_entry(metadata);
- Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);
-
/* first step: copy all root objects to mark tasks.*/
+ Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
while(root_set){
POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
while(!vector_block_iterator_end(root_set,iter)){
@@ -132,15 +133,28 @@
}
root_set = pool_iterator_next(metadata->gc_rootset_pool);
}
+
/* put back the last trace_stack task */
pool_put_entry(metadata->mark_task_pool, marker->trace_stack);
-
marker->trace_stack = free_task_pool_get_entry(metadata);
+ /* following code has such concerns:
+ 1, current_thread_id should be unique
+ 2, mostly concurrent do not need adding new marker dynamically
+ 3, when the heap is exhausted, final marking will enumeration rootset, it should be after above actions
+ */
+ unsigned int current_thread_id = atomic_inc32(&num_active_markers);
+
+ if((current_thread_id+1) == gc->num_active_markers )
+ state_transformation( gc, GC_CON_START_MARKERS, GC_CON_TRACING);
+
+ while( gc->gc_concurrent_status == GC_CON_START_MARKERS );
+
retry:
+
+
/*second step: mark dirty pool*/
-
Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
while(dirty_set){
@@ -152,7 +166,7 @@
assert(p_obj!=NULL); //FIXME: restrict condition?
obj_clear_dirty_in_table(p_obj);
- obj_clear_mark_in_table(p_obj);
+ obj_clear_mark_in_table(p_obj, marker);
if(obj_mark_gray_in_table(p_obj))
collector_tracestack_push((Collector*)marker, p_obj);
@@ -186,41 +200,187 @@
mark_task = pool_get_entry(metadata->mark_task_pool);
}
+ /*
if(current_thread_id == 0){
gc_prepare_dirty_set(marker->gc);
- }
+ }*/
+ gc_copy_local_dirty_set_to_global(gc);
+
/* conditions to terminate mark:
1.All thread finished current job.
2.Flag is set to terminate concurrent mark.
*/
atomic_dec32(&num_active_markers);
- while(num_active_markers != 0 || !concurrent_mark_need_terminating(gc)){
+ while(num_active_markers != 0 || !concurrent_mark_need_terminating_mc(gc) ) {
+ if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->gc_dirty_set_pool)) {
+ atomic_inc32(&num_active_markers);
+ goto retry;
+ } else if( current_thread_id >= mostly_con_long_marker_num ) {
+ break;
+ }
+ apr_sleep(15000);
+ }
+
+ /*
+ while(num_active_markers != 0 || !concurrent_mark_need_terminating_mc(gc)){
if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->gc_dirty_set_pool)){
atomic_inc32(&num_active_markers);
goto retry;
}
+ }*/
+
+ /* put back the last mark stack to the free pool */
+ mark_task = (Vector_Block*)marker->trace_stack;
+ vector_stack_clear(mark_task);
+ pool_put_entry(metadata->free_task_pool, mark_task);
+ marker->trace_stack = NULL;
+ marker->num_dirty_slots_traced = num_dirtyset_slot;
+
+ /*
+ if(num_dirtyset_slot!=0) {
+ lock(info_lock);
+ INFO2("gc.marker", "marker ["<< current_thread_id <<"] processed dirty slot="<<num_dirtyset_slot);
+ unlock(info_lock);
+ }*/
+ return;
+}
+
+
+void wspace_final_mark_scan_mostly_concurrent(Conclctor* marker)
+{
+
+ GC *gc = marker->gc;
+ GC_Metadata *metadata = gc->metadata;
+
+ unsigned int num_dirtyset_slot = 0;
+
+ marker->trace_stack = free_task_pool_get_entry(metadata);
+ Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+ /* first step: copy all root objects to mark tasks.*/
+ while(root_set){
+ POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set,iter)){
+ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
+ iter = vector_block_iterator_advance(root_set,iter);
+
+ assert(p_obj!=NULL);
+ assert(address_belongs_to_gc_heap(p_obj, gc));
+ if(obj_mark_gray_in_table(p_obj))
+ collector_tracestack_push((Collector*)marker, p_obj);
+ }
+ root_set = pool_iterator_next(metadata->gc_rootset_pool);
}
+ /* put back the last trace_stack task */
+ pool_put_entry(metadata->mark_task_pool, marker->trace_stack);
+ marker->trace_stack = free_task_pool_get_entry(metadata);
+
+
+ /*second step: mark dirty pool*/
+ Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
+
+ while(dirty_set){
+ POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set);
+ while(!vector_block_iterator_end(dirty_set,iter)){
+ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
+ iter = vector_block_iterator_advance(dirty_set,iter);
+
+ assert(p_obj!=NULL); //FIXME: restrict condition?
+
+ obj_clear_dirty_in_table(p_obj);
+ obj_clear_mark_in_table(p_obj, marker);
+
+ if(obj_mark_gray_in_table(p_obj))
+ collector_tracestack_push((Collector*)marker, p_obj);
+
+ num_dirtyset_slot ++;
+ }
+ vector_block_clear(dirty_set);
+ pool_put_entry(metadata->free_set_pool, dirty_set);
+ dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
+ }
+ /* put back the last trace_stack task */
+ pool_put_entry(metadata->mark_task_pool, marker->trace_stack);
+
+ /* third step: iterate over the mark tasks and scan objects */
+ marker->trace_stack = free_task_pool_get_entry(metadata);
+
+ Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
+ while(mark_task){
+ POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
+ while(!vector_block_iterator_end(mark_task,iter)){
+ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
+ iter = vector_block_iterator_advance(mark_task,iter);
+ trace_object(marker, p_obj);
+ }
+ /* run out one task, put back to the pool and grab another task */
+ vector_stack_clear(mark_task);
+ pool_put_entry(metadata->free_task_pool, mark_task);
+ mark_task = pool_get_entry(metadata->mark_task_pool);
+ }
+
/* put back the last mark stack to the free pool */
mark_task = (Vector_Block*)marker->trace_stack;
vector_stack_clear(mark_task);
pool_put_entry(metadata->free_task_pool, mark_task);
marker->trace_stack = NULL;
+
+ //marker->time_mark += time_mark;
+ marker->num_dirty_slots_traced = num_dirtyset_slot;
+ //INFO2("gc.marker", "[final marker] processed dirty slot="<<num_dirtyset_slot);
- int64 time_mark = time_now() - time_mark_start;
- marker->time_mark += time_mark;
- marker->num_dirty_slots_traced += num_dirtyset_slot;
return;
}
+
+
+
+void wspace_last_mc_marker_work( Conclctor *last_marker ) {
+
+ GC *gc = last_marker->gc;
+ if( gc->gc_concurrent_status != GC_CON_TRACING )
+ return;
+
+ gc_con_update_stat_after_marking(gc); //calculate marked size
+ //just debugging
+ Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
+ con_collection_stat->marking_end_time = time_now();
+ int64 con_marking_time = con_collection_stat->marking_end_time - con_collection_stat->marking_start_time;
+ INFO2("gc.scheduler", "[MOSTLY_CON] con marking time=" << con_marking_time << " us");
+
+ state_transformation( gc, GC_CON_TRACING, GC_CON_TRACE_DONE );
+ //INFO2("gc.con.info", "<new state 3> first marking thread finished its job, GC is waiting for all the marking threads finish, current marker num is [" << gc->num_active_markers << "]" );
+}
+
+void gc_mostly_con_update_stat_after_final_marking(GC *gc);
+void wspace_mostly_con_final_mark( GC *gc ) {
+
+ /*init the root set pool*/
+ pool_iterator_init(gc->metadata->gc_rootset_pool);
+ /*prepare dirty object*/
+ gc_prepare_dirty_set(gc);
+ /*new asssign thread may reuse the one just finished in the same phase*/
+ conclctor_set_weakref_sets(gc);
+
+ /*start final mostly concurrent mark */
+ gc_ms_start_mostly_con_final_mark((GC_MS*)gc, mostly_con_final_marker_num);
+
+ mostly_con_mark_terminate_reset();
+ gc_mostly_con_update_stat_after_final_marking(gc);
+
+ gc_reset_dirty_set(gc);
+ gc_clear_rootset(gc);
+ gc_prepare_sweeping(gc);
+ state_transformation( gc, GC_CON_TRACE_DONE, GC_CON_BEFORE_SWEEP );
+}
+
void trace_obj_in_ms_mostly_concurrent_mark(Collector *collector, void *p_obj)
{
obj_mark_gray_in_table((Partial_Reveal_Object*)p_obj);
- trace_object((Marker*)collector, (Partial_Reveal_Object *)p_obj);
+ trace_object((Conclctor*)collector, (Partial_Reveal_Object *)p_obj);
}
-
-
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp Tue Oct 28 20:01:01 2008
@@ -16,7 +16,11 @@
*/
#include "wspace_mark_sweep.h"
#include "../finalizer_weakref/finalizer_weakref.h"
-#include "../thread/marker.h"
+#include "../thread/conclctor.h"
+#include "gc_ms.h"
+struct GC_MS;
+struct Wspace;
+struct Space_Statistics;
Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj);
@@ -32,7 +36,7 @@
}
}
-static FORCE_INLINE void scan_object(Marker* marker, Partial_Reveal_Object *p_obj)
+static FORCE_INLINE void scan_object(Conclctor* marker, Partial_Reveal_Object *p_obj)
{
assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
@@ -71,42 +75,59 @@
}
-static void trace_object(Marker* marker, Partial_Reveal_Object *p_obj)
+static void trace_object(Conclctor* marker, Partial_Reveal_Object *p_obj)
{
scan_object(marker, p_obj);
- obj_mark_black_in_table(p_obj);
+ //obj_mark_black_in_table(p_obj);
+ obj_mark_black_in_table(p_obj, marker);
Vector_Block *trace_stack = marker->trace_stack;
while(!vector_stack_is_empty(trace_stack)){
p_obj = (Partial_Reveal_Object*)vector_stack_pop(trace_stack);
scan_object(marker, p_obj);
- obj_mark_black_in_table(p_obj);
+ //obj_mark_black_in_table(p_obj);
+ obj_mark_black_in_table(p_obj, marker);
trace_stack = marker->trace_stack;
}
}
-static Boolean concurrent_mark_need_terminating(GC* gc)
+static Boolean dirty_set_is_empty(GC *gc)
{
+ lock(gc->mutator_list_lock);
+ Mutator *mutator = gc->mutator_list;
+ while (mutator) {
+ Vector_Block* local_dirty_set = mutator->dirty_set;
+ if(!vector_block_is_empty(local_dirty_set)){
+ unlock(gc->mutator_list_lock);
+ return FALSE;
+ }
+ mutator = mutator->next;
+ }
GC_Metadata *metadata = gc->metadata;
- return gc_local_dirtyset_is_empty(gc) && pool_is_empty(metadata->gc_dirty_set_pool);
+ Boolean is_empty = pool_is_empty(metadata->gc_dirty_set_pool);
+ unlock(gc->mutator_list_lock); //unlock put here to prevent creating new mutators before checking global dirty set
+ return is_empty;
+}
+static Boolean concurrent_mark_need_terminating_otf(GC* gc)
+{
+ return dirty_set_is_empty(gc);
}
/* for marking phase termination detection */
static volatile unsigned int num_active_markers = 0;
+//static volatile unsigned int root_set_obj_size = 0;
-void wspace_mark_scan_concurrent(Marker* marker)
+void wspace_mark_scan_concurrent(Conclctor* marker)
{
- marker->time_measurement_start = time_now();
+ //marker->time_measurement_start = time_now();
GC *gc = marker->gc;
GC_Metadata *metadata = gc->metadata;
/* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
- atomic_inc32(&num_active_markers);
-
+ unsigned int current_thread_id = atomic_inc32(&num_active_markers);
marker->trace_stack = free_task_pool_get_entry(metadata);
-
Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);
-
+
/* first step: copy all root objects to mark tasks.*/
while(root_set){
POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
@@ -116,6 +137,7 @@
assert(p_obj!=NULL);
assert(address_belongs_to_gc_heap(p_obj, gc));
+ //if(obj_mark_gray_in_table(p_obj, &root_set_obj_size))
if(obj_mark_gray_in_table(p_obj))
collector_tracestack_push((Collector*)marker, p_obj);
}
@@ -126,8 +148,10 @@
marker->trace_stack = free_task_pool_get_entry(metadata);
+ state_transformation( gc, GC_CON_START_MARKERS, GC_CON_TRACING);
retry:
+ gc_copy_local_dirty_set_to_global(marker->gc);
/*second step: mark dirty object snapshot pool*/
Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
@@ -137,7 +161,10 @@
Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
iter = vector_block_iterator_advance(dirty_set,iter);
- assert(p_obj!=NULL); //FIXME: restrict?
+ if(p_obj==NULL) { //FIXME: restrict?
+ RAISE_ERROR;
+ }
+ marker->num_dirty_slots_traced++;
if(obj_mark_gray_in_table(p_obj))
collector_tracestack_push((Collector*)marker, p_obj);
}
@@ -175,39 +202,15 @@
3.global snapshot pool is empty.
*/
atomic_dec32(&num_active_markers);
- while(num_active_markers != 0 || !concurrent_mark_need_terminating(gc)){
- if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->gc_dirty_set_pool)){
- atomic_inc32(&num_active_markers);
- goto retry;
- }else{
- /*grab a block from mutator and begin tracing*/
- POINTER_SIZE_INT thread_num = (POINTER_SIZE_INT)marker->thread_handle;
- Vector_Block* local_dirty_set = gc_get_local_dirty_set(gc, (unsigned int)(thread_num + 1));
- /*1. If local_dirty_set has been set full bit, the block is full and will no longer be put into global snapshot pool;
- so it should be checked again to see if there're remaining entries unscanned in it. In this case, the
- share bit in local_dirty_set should not be cleared, beacause of rescanning exclusively.
- 2. If local_dirty_set has not been set full bit, the block is used by mutator and has the chance to be put into
- global snapshot pool. In this case, we simply clear the share bit in local_dirty_set.
- */
- if(local_dirty_set != NULL){
- atomic_inc32(&num_active_markers);
- do{
- while(!vector_block_is_empty(local_dirty_set)){ //|| !vector_block_not_full_set_unshared(local_dirty_set)){
- Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) vector_block_get_entry(local_dirty_set);
- if(!obj_belongs_to_gc_heap(p_obj)) {
- assert(0);
- }
-
- if(obj_mark_gray_in_table(p_obj)){
- collector_tracestack_push((Collector*)marker, p_obj);
- }
- }
- }while(!vector_block_not_full_set_unshared(local_dirty_set) && !vector_block_is_empty(local_dirty_set));
- goto retry;
- }
+ while(num_active_markers != 0 || !concurrent_mark_need_terminating_otf(gc)){
+ if(!pool_is_empty(metadata->mark_task_pool) || !concurrent_mark_need_terminating_otf(gc)){
+ atomic_inc32(&num_active_markers);
+ goto retry;
}
+ apr_sleep(15000);
}
-
+
+ state_transformation( gc, GC_CON_TRACING, GC_CON_TRACE_DONE );
/* put back the last mark stack to the free pool */
mark_task = (Vector_Block*)marker->trace_stack;
vector_stack_clear(mark_task);
@@ -215,16 +218,31 @@
marker->trace_stack = NULL;
assert(pool_is_empty(metadata->gc_dirty_set_pool));
- marker->time_measurement_end = time_now();
- marker->time_mark = marker->time_measurement_end - marker->time_measurement_start;
-
+ //INFO2("gc.con.info", "<stage 5>first marker finishes its job");
+
return;
}
+void wspace_last_otf_marker_work( Conclctor *last_marker ) {
+ GC *gc = last_marker->gc;
+
+ gc_reset_dirty_set(gc);
+ gc_set_barrier_function(WB_REM_NIL);
+
+ //INFO2("gc.con.info", "<stage 6>all markers finish ");
+ gc_con_update_stat_after_marking(gc); //calculate marked size
+
+ gc_clear_rootset(gc);
+
+ gc_prepare_sweeping(gc);
+ state_transformation( gc, GC_CON_TRACE_DONE, GC_CON_BEFORE_SWEEP );
+}
+
+
void trace_obj_in_ms_concurrent_mark(Collector *collector, void *p_obj)
{
obj_mark_gray_in_table((Partial_Reveal_Object*)p_obj);
- trace_object((Marker*)collector, (Partial_Reveal_Object *)p_obj);
+ trace_object((Conclctor*)collector, (Partial_Reveal_Object *)p_obj);
}
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp Tue Oct 28 20:01:01 2008
@@ -21,6 +21,7 @@
#include "gc_ms.h"
#include "../gen/gen.h"
#include "../thread/collector.h"
+#include "../thread/conclctor.h"
#include "../finalizer_weakref/finalizer_weakref.h"
#include "../common/fix_repointed_refs.h"
#include "../common/gc_concurrent.h"
@@ -269,13 +270,14 @@
Mark all live objects in heap ****************************/
atomic_cas32(&num_marking_collectors, 0, num_active_collectors+1);
- if(!gc_mark_is_concurrent()){
+ //if mark has been done in a concurrent manner, skip this mark
+ if( gc_con_is_in_STW(gc) ) {
if(collect_is_fallback())
wspace_fallback_mark_scan(collector, wspace);
else
wspace_mark_scan(collector, wspace);
}
-
+
unsigned int old_num = atomic_inc32(&num_marking_collectors);
if( ++old_num == num_active_collectors ){
/* last collector's world here */
@@ -292,8 +294,9 @@
#endif
gc_identify_dead_weak_roots(gc);
gc_init_chunk_for_sweep(gc, wspace);
- /* let other collectors go */
- num_marking_collectors++;
+
+ /* let other collectors go */
+ num_marking_collectors++;
}
while(num_marking_collectors != num_active_collectors + 1);
@@ -302,8 +305,8 @@
atomic_cas32( &num_sweeping_collectors, 0, num_active_collectors+1);
wspace_sweep(collector, wspace);
-
old_num = atomic_inc32(&num_sweeping_collectors);
+ //INFO2("gc.con.scheduler", "[SWEEPER NUM] num_sweeping_collectors = " << num_sweeping_collectors);
if( ++old_num == num_active_collectors ){
#ifdef SSPACE_TIME
wspace_sweep_time(FALSE, wspace->need_compact);
@@ -392,10 +395,6 @@
if(!collect_is_major())
wspace_merge_free_chunks(gc, wspace);
-#ifdef USE_UNIQUE_MARK_SWEEP_GC
- wspace_set_space_statistic(wspace);
-#endif
-
#ifdef SSPACE_VERIFY
wspace_verify_after_collection(gc);
#endif
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h Tue Oct 28 20:01:01 2008
@@ -20,6 +20,7 @@
#include "wspace_chunk.h"
#include "wspace_verify.h"
+#include "../thread/conclctor.h"
#define PFC_REUSABLE_RATIO 0.1
#define WSPACE_COMPACT_RATIO 0.06
@@ -27,6 +28,8 @@
inline Boolean chunk_is_reusable(Chunk_Header *chunk)
{ return (float)(chunk->slot_num-chunk->alloc_num)/chunk->slot_num > PFC_REUSABLE_RATIO; }
+struct Conclctor;
+
#define OBJ_ALLOC_BIT_IN_TABLE 0x01
#define OBJ_BLACK_BIT_IN_TABLE 0x02
#define OBJ_GRAY_BIT_IN_TABLE 0x04
@@ -172,6 +175,52 @@
}
+//just debugging for root set size
+FORCE_INLINE Boolean obj_mark_gray_in_table(Partial_Reveal_Object *obj, volatile unsigned int *slot_size)
+{
+ volatile POINTER_SIZE_INT *p_color_word;
+ Chunk_Header *chunk;
+ unsigned int slot_index;
+
+ if(is_super_obj(obj)){
+ chunk = ABNORMAL_CHUNK_HEADER(obj);
+ slot_index = 0;
+ } else {
+ chunk = NORMAL_CHUNK_HEADER(obj);
+ slot_index = slot_addr_to_index(chunk, obj);
+ }
+
+ unsigned int word_index = slot_index >> SLOT_NUM_PER_WORD_SHIT;
+ unsigned int index_in_word = COLOR_BITS_PER_OBJ * (slot_index & (((unsigned int)(SLOT_NUM_PER_WORD_IN_TABLE-1))));
+ p_color_word = &chunk->table[word_index];
+
+ assert(p_color_word);
+
+ //POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
+ POINTER_SIZE_INT mark_color = cur_mark_gray_color << index_in_word;
+
+ POINTER_SIZE_INT old_word = *p_color_word;
+ if(old_word & mark_color) return FALSE; /*already marked gray*/
+
+ apr_atomic_add32(slot_size, chunk->slot_size);
+
+ //POINTER_SIZE_INT new_word = (old_word & color_bits_mask) | mark_color;
+ POINTER_SIZE_INT new_word = old_word | mark_color;
+ while(new_word != old_word) {
+ POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+ if(temp == old_word){
+ return TRUE; /*returning true does not mean it's marked by this thread. */
+ }
+ old_word = *p_color_word;
+ if(old_word & mark_color) return FALSE; /*already marked gray*/
+
+ //new_word = (old_word & color_bits_mask) | mark_color;
+ new_word = old_word | mark_color;
+ }
+
+ return FALSE;
+}
+
FORCE_INLINE Boolean obj_mark_gray_in_table(Partial_Reveal_Object *obj)
{
@@ -184,7 +233,7 @@
POINTER_SIZE_INT mark_color = cur_mark_gray_color << index_in_word;
POINTER_SIZE_INT old_word = *p_color_word;
- if(old_word & mark_color) return FALSE; /*already marked gray or black.*/
+ if(old_word & mark_color) return FALSE; /*already marked gray*/
//POINTER_SIZE_INT new_word = (old_word & color_bits_mask) | mark_color;
POINTER_SIZE_INT new_word = old_word | mark_color;
@@ -194,7 +243,7 @@
return TRUE; /*returning true does not mean it's marked by this thread. */
}
old_word = *p_color_word;
- if(old_word & mark_color) return FALSE; /*already marked gray or black.*/
+ if(old_word & mark_color) return FALSE; /*already marked gray*/
//new_word = (old_word & color_bits_mask) | mark_color;
new_word = old_word | mark_color;
@@ -203,6 +252,37 @@
return FALSE;
}
+FORCE_INLINE Boolean obj_mark_black_in_table(Partial_Reveal_Object *obj)
+{
+ //assert(obj_is_mark_in_table(obj));
+ volatile POINTER_SIZE_INT *p_color_word;
+ unsigned int index_in_word;
+ p_color_word = get_color_word_in_table(obj, index_in_word);
+ assert(p_color_word);
+
+ //POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
+ POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word;
+
+ POINTER_SIZE_INT old_word = *p_color_word;
+ if(old_word & mark_black_color) return FALSE; /*already marked black*/
+
+ POINTER_SIZE_INT new_word = old_word | mark_black_color;
+ while(new_word != old_word) {
+ POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+ if(temp == old_word){
+ return TRUE; /*returning true does not mean it's marked by this thread. */
+ }
+ old_word = *p_color_word;
+ if(old_word & mark_black_color) return FALSE; /*already marked black*/
+
+ new_word = old_word | mark_black_color;
+ }
+
+ return FALSE;
+
+}
+
+
FORCE_INLINE Boolean obj_mark_black_in_table(Partial_Reveal_Object *obj, unsigned int size)
{
//assert(obj_is_mark_in_table(obj));
@@ -212,7 +292,7 @@
assert(p_color_word);
//POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
- POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word;
+ POINTER_SIZE_INT mark_black_color = (OBJ_DIRTY_BIT_IN_TABLE|cur_mark_black_color) << index_in_word; //just debugging, to mark new object
POINTER_SIZE_INT old_word = *p_color_word;
if(old_word & mark_black_color) return FALSE; /*already marked black*/
@@ -233,12 +313,28 @@
}
-FORCE_INLINE Boolean obj_mark_black_in_table(Partial_Reveal_Object *obj)
+FORCE_INLINE Boolean obj_mark_black_in_table(Partial_Reveal_Object *obj, Conclctor* marker)
{
// assert(obj_is_mark_in_table(obj));
volatile POINTER_SIZE_INT *p_color_word;
- unsigned int index_in_word;
- p_color_word = get_color_word_in_table(obj, index_in_word);
+ Chunk_Header *chunk;
+ unsigned int slot_index;
+ unsigned int obj_ocuppied_size = 0;
+
+ if(is_super_obj(obj)){
+ chunk = ABNORMAL_CHUNK_HEADER(obj);
+ slot_index = 0;
+ obj_ocuppied_size = CHUNK_SIZE(chunk);
+ } else {
+ chunk = NORMAL_CHUNK_HEADER(obj);
+ slot_index = slot_addr_to_index(chunk, obj);
+ obj_ocuppied_size = chunk->slot_size;
+ }
+
+ unsigned int word_index = slot_index >> SLOT_NUM_PER_WORD_SHIT;
+ unsigned int index_in_word = COLOR_BITS_PER_OBJ * (slot_index & (((unsigned int)(SLOT_NUM_PER_WORD_IN_TABLE-1))));
+ p_color_word = &chunk->table[word_index];
+
assert(p_color_word);
//POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
@@ -246,6 +342,58 @@
POINTER_SIZE_INT old_word = *p_color_word;
if(obj_is_mark_black_in_table(obj)) return FALSE; /*already marked black*/
+
+ marker->live_obj_num++;
+ marker->live_obj_size+=obj_ocuppied_size;
+
+ POINTER_SIZE_INT new_word = old_word | mark_black_color;
+ while(new_word != old_word) {
+ POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+ if(temp == old_word){
+ return TRUE; /*returning true does not mean it's marked by this thread. */
+ }
+ old_word = *p_color_word;
+ if(obj_is_mark_black_in_table(obj)) return FALSE; /*already marked black*/
+
+ new_word = old_word | mark_black_color;
+ }
+
+ return FALSE;
+}
+
+static volatile unsigned int mutator_marked = 0;
+
+FORCE_INLINE Boolean obj_mark_black_in_table(Partial_Reveal_Object *obj, Mutator *mutator)
+{
+ // assert(obj_is_mark_in_table(obj));
+ volatile POINTER_SIZE_INT *p_color_word;
+ Chunk_Header *chunk;
+ unsigned int slot_index;
+ unsigned int obj_size = 0;
+
+ if(is_super_obj(obj)){
+ chunk = ABNORMAL_CHUNK_HEADER(obj);
+ slot_index = 0;
+ obj_size = CHUNK_SIZE(chunk);
+ } else {
+ chunk = NORMAL_CHUNK_HEADER(obj);
+ slot_index = slot_addr_to_index(chunk, obj);
+ obj_size = chunk->slot_size;
+ }
+
+ unsigned int word_index = slot_index >> SLOT_NUM_PER_WORD_SHIT;
+ unsigned int index_in_word = COLOR_BITS_PER_OBJ * (slot_index & (((unsigned int)(SLOT_NUM_PER_WORD_IN_TABLE-1))));
+ p_color_word = &chunk->table[word_index];
+ assert(p_color_word);
+
+ //POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
+ POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word;
+
+ POINTER_SIZE_INT old_word = *p_color_word;
+ if(obj_is_mark_black_in_table(obj)) return FALSE; /*already marked black*/
+
+ //mutator->new_obj_size += vm_object_size(obj);
+ mutator->write_barrier_marked_size += obj_size;
POINTER_SIZE_INT new_word = old_word | mark_black_color;
while(new_word != old_word) {
@@ -304,12 +452,29 @@
return FALSE;
}
-FORCE_INLINE Boolean obj_clear_mark_in_table(Partial_Reveal_Object *obj)
+FORCE_INLINE Boolean obj_clear_mark_in_table(Partial_Reveal_Object *obj, Conclctor *marker)
{
- volatile POINTER_SIZE_INT *p_color_word;
- unsigned int index_in_word;
- p_color_word = get_color_word_in_table(obj, index_in_word);
+ volatile POINTER_SIZE_INT *p_color_word;
+ Chunk_Header *chunk;
+ unsigned int slot_index;
+
+ if(is_super_obj(obj)){
+ chunk = ABNORMAL_CHUNK_HEADER(obj);
+ slot_index = 0;
+ } else {
+ chunk = NORMAL_CHUNK_HEADER(obj);
+ slot_index = slot_addr_to_index(chunk, obj);
+ }
+
+ unsigned int word_index = slot_index >> SLOT_NUM_PER_WORD_SHIT;
+ unsigned int index_in_word = COLOR_BITS_PER_OBJ * (slot_index & (((unsigned int)(SLOT_NUM_PER_WORD_IN_TABLE-1))));
+ p_color_word = &chunk->table[word_index];
assert(p_color_word);
+
+ if(obj_is_mark_black_in_table(obj)) {
+ marker->live_obj_num--;
+ marker->live_obj_size-=chunk->slot_size;
+ }
//POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
POINTER_SIZE_INT mark_color = (cur_mark_black_color|cur_mark_gray_color) << index_in_word;
@@ -417,7 +582,7 @@
inline void ops_color_flip(void)
{
POINTER_SIZE_INT temp = cur_alloc_color;
- cur_alloc_color = cur_mark_black_color;
+ cur_alloc_color = cur_mark_black_color; //can not use mark = alloc, otherwise some obj alloc when swapping may be lost
cur_mark_black_color = temp;
cur_alloc_mask = (~cur_alloc_mask) & FLIP_COLOR_MASK_IN_TABLE;
cur_mark_mask = (~cur_mark_mask) & FLIP_COLOR_MASK_IN_TABLE;
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep.cpp Tue Oct 28 20:01:01 2008
@@ -231,6 +231,7 @@
void wspace_merge_free_chunks(GC *gc, Wspace *wspace)
{
+
Free_Chunk_List free_chunk_list;
free_chunk_list.head = NULL;
free_chunk_list.tail = NULL;