You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by xl...@apache.org on 2008/10/29 04:01:03 UTC
svn commit: r708756 [3/3] - in /harmony/enhanced/drlvm/trunk/vm:
gc_gen/build/ gc_gen/src/common/ gc_gen/src/finalizer_weakref/
gc_gen/src/gen/ gc_gen/src/los/ gc_gen/src/mark_sweep/ gc_gen/src/thread/
gc_gen/src/trace_forward/ gc_gen/src/verify/ inclu...
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp Tue Oct 28 20:01:01 2008
@@ -2,9 +2,60 @@
#include "wspace_chunk.h"
#include "wspace_mark_sweep.h"
#include "gc_ms.h"
+#include "../thread/conclctor.h"
#include "../gen/gen.h"
-static void collector_sweep_normal_chunk_con(Collector *collector, Wspace *wspace, Chunk_Header *chunk)
+
+static void wspace_check_free_list_chunks(Free_Chunk_List* free_list)
+{
+ Free_Chunk* chunk = free_list->head;
+ while(chunk ){
+ assert(!(chunk->status & (CHUNK_TO_MERGE |CHUNK_MERGED) ));
+ chunk = chunk->next;
+ }
+}
+
+static void wspace_check_free_chunks_status(Wspace* wspace)
+{
+ unsigned int i;
+
+ for(i = NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+ wspace_check_free_list_chunks(&wspace->aligned_free_chunk_lists[i]);
+
+ for(i = NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+ wspace_check_free_list_chunks(&wspace->unaligned_free_chunk_lists[i]);
+
+ wspace_check_free_list_chunks(wspace->hyper_free_chunk_list);
+
+}
+
+inline static void check_list(Free_Chunk_List *chunk_list)
+{
+ Free_Chunk *chunk = chunk_list->head;
+ unsigned int count = 0;
+ while(chunk) {
+ count++;
+ chunk = chunk->next;
+ }
+ assert( count == chunk_list->chunk_num );
+}
+
+inline static void collector_add_free_chunk(Conclctor *sweeper, Free_Chunk *chunk)
+{
+ Free_Chunk_List *list = sweeper->free_chunk_list;
+
+ chunk->status = CHUNK_FREE | CHUNK_TO_MERGE;
+ chunk->next = list->head;
+ chunk->prev = NULL;
+ if(list->head)
+ list->head->prev = chunk;
+ else
+ list->tail = chunk;
+ list->head = chunk;
+ list->chunk_num++;
+}
+
+static void collector_sweep_normal_chunk_con(Conclctor *sweeper, Wspace *wspace, Chunk_Header *chunk)
{
unsigned int slot_num = chunk->slot_num;
unsigned int live_num = 0;
@@ -13,20 +64,24 @@
unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
for(unsigned int i=0; i<index_word_num; ++i){
+
table[i] &= cur_alloc_mask;
unsigned int live_num_in_word = (table[i] == cur_alloc_mask) ? SLOT_NUM_PER_WORD_IN_TABLE : word_set_bit_num(table[i]);
live_num += live_num_in_word;
+
+ /* for concurrent sweeping, sweeping and allocation are performed concurrently. so we can not just count the current live obj*/
+
if((first_free_word_index == MAX_SLOT_INDEX) && (live_num_in_word < SLOT_NUM_PER_WORD_IN_TABLE)){
first_free_word_index = i;
pfc_set_slot_index((Chunk_Header*)chunk, first_free_word_index, cur_alloc_color);
}
}
assert(live_num <= slot_num);
- collector->live_obj_size += live_num * chunk->slot_size;
- collector->live_obj_num += live_num;
+ sweeper->live_obj_size += live_num * chunk->slot_size;
+ sweeper->live_obj_num += live_num;
if(!live_num){ /* all objects in this chunk are dead */
- collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+ collector_add_free_chunk(sweeper, (Free_Chunk*)chunk);
} else {
chunk->alloc_num = live_num;
if(!chunk_is_reusable(chunk)){ /* most objects in this chunk are swept, add chunk to pfc list*/
@@ -37,172 +92,173 @@
}
}
-static inline void collector_sweep_abnormal_chunk_con(Collector *collector, Wspace *wspace, Chunk_Header *chunk)
+static inline void collector_sweep_abnormal_chunk_con(Conclctor *sweeper, Wspace *wspace, Chunk_Header *chunk)
{
assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_USED));
POINTER_SIZE_INT *table = chunk->table;
table[0] &= cur_alloc_mask;
if(!table[0]){
- collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+ collector_add_free_chunk(sweeper, (Free_Chunk*)chunk);
}
else {
wspace_reg_live_abnormal_chunk(wspace, chunk);
- collector->live_obj_size += CHUNK_SIZE(chunk);
- collector->live_obj_num++;
+ sweeper->live_obj_size += CHUNK_SIZE(chunk);
+ sweeper->live_obj_num++;
}
}
-static void wspace_sweep_chunk_con(Wspace* wspace, Collector* collector, Chunk_Header_Basic* chunk)
+static void wspace_sweep_chunk_con(Wspace* wspace, Conclctor* sweeper, Chunk_Header_Basic* chunk)
{
if(chunk->status & CHUNK_NORMAL){ /* chunk is used as a normal sized obj chunk */
assert(chunk->status == (CHUNK_NORMAL | CHUNK_USED));
- collector_sweep_normal_chunk_con(collector, wspace, (Chunk_Header*)chunk);
+ collector_sweep_normal_chunk_con(sweeper, wspace, (Chunk_Header*)chunk);
} else { /* chunk is used as a super obj chunk */
assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_USED));
- collector_sweep_abnormal_chunk_con(collector, wspace, (Chunk_Header*)chunk);
+ collector_sweep_abnormal_chunk_con(sweeper, wspace, (Chunk_Header*)chunk);
}
}
-static Free_Chunk_List* wspace_get_free_chunk_list(Wspace* wspace)
+//used in last sweeper and final stw reset
+Free_Chunk_List merged_free_chunk_list;
+Free_Chunk_List free_chunk_list_from_sweepers;
+Free_Chunk_List global_free_chunk_list;
+
+static Free_Chunk_List* wspace_collect_free_chunks_from_sweepers(GC *gc)
{
- GC* gc = wspace->gc;
- Free_Chunk_List* free_chunk_list = (Free_Chunk_List*) STD_MALLOC(sizeof(Free_Chunk_List));
+ Free_Chunk_List* free_chunk_list = &free_chunk_list_from_sweepers;
assert(free_chunk_list);
- memset(free_chunk_list, 0, sizeof(Free_Chunk_List));
+ free_chunk_list_init(free_chunk_list);
- /* Collect free chunks from collectors to one list */
- for(unsigned int i=0; i<gc->num_collectors; ++i){
- Free_Chunk_List *list = gc->collectors[i]->free_chunk_list;
+ for( unsigned int i=0; i<gc->num_conclctors; i++ ) {
+ Conclctor *conclctor = gc->conclctors[i];
+ if( conclctor->role != CONCLCTOR_ROLE_SWEEPER )
+ continue;
+ Free_Chunk_List *list = conclctor->free_chunk_list;
move_free_chunks_between_lists(free_chunk_list, list);
}
-
return free_chunk_list;
}
-Boolean wspace_get_free_chunk_concurrent(Wspace *wspace, Free_Chunk* chunk)
-{
- POINTER_SIZE_INT chunk_size = CHUNK_SIZE(chunk);
- assert(!(chunk_size % CHUNK_GRANULARITY));
-
- Free_Chunk_List* free_list = NULL;
-
- /*Find list*/
- if(chunk_size > HYPER_OBJ_THRESHOLD)
- free_list = wspace->hyper_free_chunk_list;
- else if(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK) && !(chunk_size & NORMAL_CHUNK_LOW_MASK))
- free_list = &wspace->aligned_free_chunk_lists[ALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)];
- else
- free_list = &wspace->unaligned_free_chunk_lists[UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)];
- /*Lock this free list*/
- lock(free_list->lock);
+static void wspace_reset_free_list_chunks(Free_Chunk_List* free_list)
+{
+ Free_Chunk* chunk = free_list->head;
+ while(chunk ){
+ assert(chunk->status & CHUNK_FREE);
+ chunk->status = CHUNK_FREE;
+ chunk = chunk->next;
+ }
+}
- /*Search free list for chunk*/
- Free_Chunk* chunk_iter = free_list->head;
- while((POINTER_SIZE_INT)chunk_iter){
- if((POINTER_SIZE_INT)chunk_iter == (POINTER_SIZE_INT)chunk){
- /*Find chunk and delete from list.*/
- free_list_detach_chunk(free_list, chunk);
- unlock(free_list->lock);
- return TRUE;
- }
- chunk_iter = chunk_iter->next;
+static void wspace_reset_free_list_chunks(Free_Chunk_List* free_list, Chunk_Status_t status)
+{
+ Free_Chunk* chunk = free_list->head;
+ while(chunk ){
+ assert(chunk->status & CHUNK_FREE);
+ chunk->status = status;
+ chunk = chunk->next;
}
-
- unlock(free_list->lock);
-
- return FALSE;
}
-void wspace_merge_adj_free_chunks(Wspace* wspace,Free_Chunk* chunk)
+static unsigned int get_to_merge_length(Free_Chunk_List *free_list)
{
+ Free_Chunk* chunk = free_list->head;
+ unsigned int counter = 0;
+ while(chunk) {
+ if(chunk->status&CHUNK_MERGED) {
+ return counter;
+ }
+ counter++;
+ chunk = chunk->next;
+ }
+ return counter;
+}
+
+static unsigned int get_length(Free_Chunk_List *free_list)
+{
+ Free_Chunk* chunk = free_list->head;
+ unsigned int counter = 0;
+ while(chunk) {
+ counter++;
+ chunk = chunk->next;
+ }
+ return counter;
+}
+
+static void wspace_merge_free_list(Wspace* wspace, Free_Chunk_List *free_list)
+{
+ int64 merge_start = time_now();
Free_Chunk *wspace_ceiling = (Free_Chunk*)space_heap_end((Space*)wspace);
+ Free_Chunk *chunk = free_list->head;
+ while(chunk && !(chunk->status &CHUNK_MERGED)) {
- /* Check if the back adjcent chunks are free */
- Free_Chunk *back_chunk = (Free_Chunk*)chunk->adj_next;
- while(back_chunk < wspace_ceiling && (back_chunk->status & CHUNK_FREE)){
- assert(chunk < back_chunk);
- /* Remove back_chunk from list */
- if(wspace_get_free_chunk_concurrent(wspace,back_chunk)){
+ free_list->head = chunk->next;
+ free_list->chunk_num--;
+ if(free_list->head)
+ free_list->head->prev = NULL;
+ /* Check if the back adjcent chunks are free */
+ Free_Chunk *back_chunk = (Free_Chunk*)chunk->adj_next;
+ while(back_chunk < wspace_ceiling && (back_chunk->status & (CHUNK_TO_MERGE|CHUNK_MERGED))) {
+ assert(chunk < back_chunk);
+ /* Remove back_chunk from list */
+ free_list_detach_chunk(free_list, back_chunk);
back_chunk = (Free_Chunk*)back_chunk->adj_next;
chunk->adj_next = (Chunk_Header_Basic*)back_chunk;
- }else{
- break;
}
- }
-
- chunk->status = CHUNK_FREE | CHUNK_MERGED;
- /* put the free chunk to the according free chunk list */
- wspace_put_free_chunk_to_tail(wspace, chunk);
-
-}
+ if(back_chunk < wspace_ceiling)
+ back_chunk->adj_prev = (Chunk_Header_Basic*)chunk;
-static void wspace_merge_list_concurrent(Wspace* wspace, Free_Chunk_List* free_list)
-{
- lock(free_list->lock);
- Free_Chunk* chunk = free_list->head;
-
- while(chunk && !is_free_chunk_merged(chunk)){
- free_list_detach_chunk(free_list, chunk);
- unlock(free_list->lock);
-
- wspace_merge_adj_free_chunks(wspace, chunk);
-
- lock(free_list->lock);
+ //INFO2("gc.con.info", "the iteration merges [" << counter << "] chunks, to merge length=" << get_to_merge_length(free_list));
+ chunk->status = CHUNK_FREE | CHUNK_MERGED;
+ free_chunk_list_add_tail(free_list, chunk);
chunk = free_list->head;
}
-
- unlock(free_list->lock);
+ //INFO2("gc.con.info", "after "<< counter <<" mergings, chunks num [" << get_length(free_list) << "], time=" << (time_now()-merge_start) << " us");
}
+
-static void wspace_merge_free_chunks_concurrent(Wspace* wspace, Free_Chunk_List* free_list)
+static inline Free_Chunk_List * gc_collect_global_free_chunk_list(Wspace *wspace, GC *gc)
{
- Free_Chunk *chunk = free_list->head;
-
- /*merge free list*/
- wspace_merge_list_concurrent(wspace, free_list);
- /*check free pool*/
+ free_chunk_list_init(&global_free_chunk_list);
+ Free_Chunk_List *global_free_list = &global_free_chunk_list;
unsigned int i;
-
for(i = NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
- wspace_merge_list_concurrent(wspace, &wspace->aligned_free_chunk_lists[i]);
-
+ move_free_chunks_between_lists(global_free_list, &wspace->aligned_free_chunk_lists[i]);
for(i = NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
- wspace_merge_list_concurrent(wspace, &wspace->unaligned_free_chunk_lists[i]);
+ move_free_chunks_between_lists(global_free_list, &wspace->unaligned_free_chunk_lists[i]);
- wspace_merge_list_concurrent(wspace, wspace->hyper_free_chunk_list);
+ move_free_chunks_between_lists(global_free_list, wspace->hyper_free_chunk_list);
+ move_free_chunks_between_lists(global_free_list, &free_chunk_list_from_sweepers);
+
+ wspace_reset_free_list_chunks(global_free_list, CHUNK_FREE|CHUNK_TO_MERGE);
+
+ return global_free_list;
}
-static void wspace_reset_free_list_chunks(Wspace* wspace, Free_Chunk_List* free_list)
-{
- lock(free_list->lock);
- Free_Chunk* chunk = free_list->head;
+//final remerge in a STW manner, this can reduce the lock of merging global free list
+void gc_merge_free_list_global(GC *gc) {
+ Wspace *wspace = gc_get_wspace(gc);
+ int64 start_merge = time_now();
- while(chunk ){
- assert(chunk->status & CHUNK_FREE);
- chunk->status = CHUNK_FREE;
- chunk = chunk->next;
+ Free_Chunk_List *global_free_list = gc_collect_global_free_chunk_list(wspace, gc);
+ wspace_merge_free_list(wspace, global_free_list);
+ wspace_reset_free_list_chunks(global_free_list);
+
+ //put to global list
+ Free_Chunk *chunk = global_free_list->head;
+ while(chunk) {
+ global_free_list->head = chunk->next;
+ if(global_free_list->head)
+ global_free_list->head->prev = NULL;
+ wspace_put_free_chunk(wspace, chunk);
+ chunk = global_free_list->head;
}
+ //INFO2("gc.merge", "[merge global] time=" << (time_now()-start_merge) << " us" );
- unlock(free_list->lock);
}
-static void wspace_reset_free_chunks_status(Wspace* wspace)
-{
- unsigned int i;
-
- for(i = NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
- wspace_reset_free_list_chunks(wspace, &wspace->aligned_free_chunk_lists[i]);
-
- for(i = NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
- wspace_reset_free_list_chunks(wspace, &wspace->unaligned_free_chunk_lists[i]);
-
- wspace_reset_free_list_chunks(wspace, wspace->hyper_free_chunk_list);
-
-}
-
static void allocator_sweep_local_chunks(Allocator *allocator)
{
Wspace *wspace = gc_get_wspace(allocator->gc);
@@ -243,9 +299,7 @@
static void gc_sweep_mutator_local_chunks(GC *gc)
{
-#ifdef USE_UNIQUE_MARK_SWEEP_GC
lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
-
/* release local chunks of each mutator in unique mark-sweep GC */
Mutator *mutator = gc->mutator_list;
while(mutator){
@@ -253,9 +307,7 @@
allocator_sweep_local_chunks((Allocator*)mutator);
mutator = mutator->next;
}
-
unlock(gc->mutator_list_lock);
-#endif
}
static void gc_wait_mutator_signal(GC *gc, unsigned int handshake_signal)
@@ -279,18 +331,14 @@
The mark bit and alloc bit is exchanged before entering this function.
This function is to clear the mark bit and merge the free chunks concurrently.
*/
-void wspace_sweep_concurrent(Collector* collector)
+void wspace_sweep_concurrent(Conclctor* sweeper)
{
- collector->time_measurement_start = time_now();
- GC *gc = collector->gc;
+ GC *gc = sweeper->gc;
+
Wspace *wspace = gc_get_wspace(gc);
- collector->live_obj_size = 0;
- collector->live_obj_num = 0;
-
- unsigned int num_active_collectors = gc->num_active_collectors;
-
- atomic_cas32(&num_sweeping_collectors, 0, num_active_collectors+1);
+ sweeper->live_obj_size = 0;
+ sweeper->live_obj_num = 0;
Pool* used_chunk_pool = wspace->used_chunk_pool;
@@ -299,7 +347,7 @@
/*1. Grab chunks from used list, sweep the chunk and push back to PFC backup list & free list.*/
chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
while(chunk_to_sweep != NULL){
- wspace_sweep_chunk_con(wspace, collector, chunk_to_sweep);
+ wspace_sweep_chunk_con(wspace, sweeper, chunk_to_sweep);
chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
}
@@ -312,7 +360,7 @@
while(chunk_to_sweep != NULL){
assert(chunk_to_sweep->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
chunk_to_sweep->status = CHUNK_NORMAL | CHUNK_USED;
- wspace_sweep_chunk_con(wspace, collector, chunk_to_sweep);
+ wspace_sweep_chunk_con(wspace, sweeper, chunk_to_sweep);
chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
}
}
@@ -320,12 +368,23 @@
pfc_pool = wspace_grab_next_pfc_pool(wspace);
}
- unsigned int old_num = atomic_inc32(&num_sweeping_collectors);
- if( ++old_num == num_active_collectors ){
-
- /*3. Check the local chunk of mutator*/
- gc_sweep_mutator_local_chunks(wspace->gc);
+}
+
+
+//final work should be done by the last sweeper
+void wspace_last_sweeper_work( Conclctor *last_sweeper ) {
+ GC *gc = last_sweeper->gc;
+ Wspace *wspace = gc_get_wspace(gc);
+ Chunk_Header_Basic* chunk_to_sweep;
+ Pool* used_chunk_pool = wspace->used_chunk_pool;
+
+ /* all but one sweeper finishes its job*/
+ state_transformation( gc, GC_CON_SWEEPING, GC_CON_SWEEP_DONE );
+
+ /*3. Check the local chunk of mutator*/
+ gc_sweep_mutator_local_chunks(wspace->gc);
+
/*4. Sweep gloabl alloc normal chunks again*/
gc_set_sweep_global_normal_chunk();
gc_wait_mutator_signal(wspace->gc, HSIG_MUTATOR_SAFE);
@@ -337,27 +396,27 @@
while(chunk_to_sweep != NULL){
assert(chunk_to_sweep->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
chunk_to_sweep->status = CHUNK_NORMAL | CHUNK_USED;
- wspace_sweep_chunk_con(wspace, collector, chunk_to_sweep);
+ wspace_sweep_chunk_con(wspace, last_sweeper, chunk_to_sweep);
chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
}
}
/*grab more pfc pools*/
pfc_pool = wspace_grab_next_pfc_pool(wspace);
}
-
- /*4. Check the used list again.*/
+
+ /*5. Check the used list again.*/
chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
while(chunk_to_sweep != NULL){
- wspace_sweep_chunk_con(wspace, collector, chunk_to_sweep);
+ wspace_sweep_chunk_con(wspace, last_sweeper, chunk_to_sweep);
chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
}
- /*5. Switch the PFC backup list to PFC list.*/
+ /*6. Switch the PFC backup list to PFC list.*/
wspace_exchange_pfc_pool(wspace);
gc_unset_sweep_global_normal_chunk();
- /*6. Put back live abnormal chunk and normal unreusable chunk*/
+ /*7. Put back live abnormal chunk and normal unreusable chunk*/
Chunk_Header* used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace);
while(used_abnormal_chunk){
used_abnormal_chunk->status = CHUNK_USED | CHUNK_ABNORMAL;
@@ -373,21 +432,17 @@
unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace);
}
pool_empty(wspace->unreusable_normal_chunk_pool);
-
-
- /*7. Merge free chunks*/
- Free_Chunk_List* free_chunk_list = wspace_get_free_chunk_list(wspace);
- wspace_merge_free_chunks_concurrent(wspace, free_chunk_list);
- wspace_reset_free_chunks_status(wspace);
-
- /* let other collectors go */
- num_sweeping_collectors++;
- }
- while(num_sweeping_collectors != num_active_collectors + 1);
- collector->time_measurement_end = time_now();
+
+ /*8. Merge free chunks from sweepers*/
+ Free_Chunk_List *free_list_from_sweeper = wspace_collect_free_chunks_from_sweepers(gc);
+ wspace_merge_free_list(wspace, free_list_from_sweeper);
+
+ /* last sweeper will transform the state to before_finish */
+ state_transformation( gc, GC_CON_SWEEP_DONE, GC_CON_BEFORE_FINISH );
}
+
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/collector.cpp Tue Oct 28 20:01:01 2008
@@ -367,4 +367,3 @@
-
Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.cpp?rev=708756&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.cpp Tue Oct 28 20:01:01 2008
@@ -0,0 +1,411 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "conclctor.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+#include "../gen/gen.h"
+#include "../mark_sweep/gc_ms.h"
+
+TaskType marker_final_func;
+TaskType sweeper_final_func;
+
+SpinLock print_lock;
+
+static volatile unsigned int live_conclctor_num = 0;
+
+static inline void notify_conclctor_to_work(Conclctor* conclctor)
+{
+ vm_set_event(conclctor->task_assigned_event);
+}
+
+static inline void conclctor_wait_for_task(Conclctor* conclctor)
+{
+ vm_wait_event(conclctor->task_assigned_event);
+}
+
+static inline void conclctor_reset_thread(Conclctor *conclctor)
+{
+ conclctor->task_func = NULL;
+#ifndef BUILD_IN_REFERENT
+ if(conclctor->role == CONCLCTOR_ROLE_MARKER) //only marker use weakref sets
+ conclctor_reset_weakref_sets(conclctor);
+#endif
+ return;
+}
+
+
+
+static inline void conclctor_finish(Conclctor *conclctor)
+{
+ GC *gc = conclctor->gc;
+ switch( conclctor->role ) {
+ case CONCLCTOR_ROLE_MARKER:
+ if(apr_atomic_dec32(&gc->num_active_markers) == 0 ) {
+ if(marker_final_func!=NULL)
+ marker_final_func( conclctor );
+ }
+ break;
+ case CONCLCTOR_ROLE_SWEEPER:
+ if(apr_atomic_dec32(&gc->num_active_sweepers) == 0) {
+ if(sweeper_final_func!=NULL)
+ sweeper_final_func( conclctor );
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static inline int round_conclctor_num(GC* gc, unsigned int req_num)
+{
+ unsigned int free_num = gc->num_conclctors - gc->num_active_markers - gc->num_active_sweepers;
+ assert(free_num>=0);
+ if( free_num > req_num )
+ return req_num;
+ return free_num;
+}
+
+/*just for debugging*/
+inline static void assign_event_info( unsigned int role, unsigned int index ) {
+ switch( role ) {
+ case CONCLCTOR_ROLE_MARKER:
+ INFO2("gc.con.info", "Activate a MARKER at index ["<<index<<"]");
+ break;
+ case CONCLCTOR_ROLE_SWEEPER:
+ INFO2("gc.con.info", "Activate a SWEEPER at index ["<<index<<"]");
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static void assign_conclctor_with_task(GC* gc, TaskType task_func, Space* space, unsigned int num_conclctors, unsigned int role )
+{
+
+ unsigned int num_assign = round_conclctor_num(gc, num_conclctors);
+ if( num_assign < num_conclctors ) {
+ INFO2( "gc.con.info", "<Oops> There is no free conclctors" );
+ assert(0);
+ return;
+ }
+ //INFO2("gc.con.info", "request number = " << num_conclctors << ", actual num = " << num_assign );
+ switch( role ) {
+ case CONCLCTOR_ROLE_MARKER:
+ apr_atomic_add32(&gc->num_active_markers, num_assign);
+ break;
+ case CONCLCTOR_ROLE_SWEEPER:
+ apr_atomic_add32(&gc->num_active_sweepers, num_assign);
+ break;
+ default:
+ assert(0);
+ }
+ //INFO2("gc.con.info", "active markers=" <<gc->num_active_markers);
+ unsigned int j = 0;
+ for(unsigned int i=0; i<gc->num_conclctors; i++)
+ {
+ Conclctor* conclctor = gc->conclctors[i];
+ if( conclctor->status != CONCLCTOR_NIL )
+ continue;
+ conclctor_reset_thread(conclctor);
+ conclctor->task_func = task_func;
+ conclctor->con_space = space;
+ conclctor->role = role;
+ conclctor->status = CONCLCTOR_ACTIVE;
+ //assign_event_info( role, i );
+ notify_conclctor_to_work(conclctor);
+ if( ++j >= num_assign) break;
+ }
+ return;
+}
+
+
+
+static int conclctor_thread_func(void *arg)
+{
+ Conclctor *conclctor = (Conclctor *)arg;
+ assert(conclctor);
+
+ while(true){
+ /* Waiting for newly assigned task */
+ conclctor_wait_for_task(conclctor);
+ //conclctor->status = CONCLCTOR_ACTIVE;
+ /* waken up and check for new task */
+ TaskType task_func = conclctor->task_func;
+ if(task_func == NULL) {
+ atomic_dec32(&live_conclctor_num);
+ conclctor->status = CONCLCTOR_DEAD;
+ //INFO2( "gc.con.info", "CONCLCTOR DEAD");
+ return 1;
+ }
+ conclctor->time_measurement_start = time_now();
+ task_func(conclctor);
+
+
+ /*
+ if( conclctor->role == CONCLCTOR_ROLE_MARKER ) {
+ int64 marking_time = conclctor->time_measurement_end - conclctor->time_measurement_start;
+ double marking_rate = conclctor->num_dirty_slots_traced;
+ if( marking_time != 0 )
+ marking_rate = (double)conclctor->num_dirty_slots_traced/(marking_time>>10);
+ lock( print_lock );
+ INFO2( "gc.con.info", "[MR] Marking Time=" << (unsigned int)marking_time << ", Dirty Slots Traced=" << conclctor->num_dirty_slots_traced << ", Trace Rate=" << marking_rate << "/ms" );
+ unlock( print_lock );
+ }*/
+
+ conclctor_finish(conclctor);
+ conclctor->time_measurement_end = time_now();
+ conclctor->status = CONCLCTOR_NIL;
+ }
+
+ return 0;
+}
+
+
+static void conclctor_init_thread(Conclctor *conclctor)
+{
+ conclctor->rem_set = NULL;
+ conclctor->rep_set = NULL;
+
+ int status = vm_create_event(&conclctor->task_assigned_event);
+ assert(status == THREAD_OK);
+ /* for concurrent collector, we do not need finished event */
+ status = (unsigned int)vm_create_thread(conclctor_thread_func, (void*)conclctor);
+ assert(status == THREAD_OK);
+
+ return;
+}
+
+
+
+static void conclctor_terminate_thread(Conclctor* conclctor)
+{
+ assert(live_conclctor_num);
+ unsigned int old_live_conclctor_num = live_conclctor_num;
+ while (conclctor->status == CONCLCTOR_ACTIVE) { //wait conclctor to finish
+ vm_thread_yield();
+ }
+ conclctor->task_func = NULL; /* NULL to notify thread exit */
+ notify_conclctor_to_work(conclctor);
+ while(conclctor->status != CONCLCTOR_DEAD)
+ vm_thread_yield(); /* give conclctor time to die */
+ return;
+}
+
+ void terminate_mostly_con_mark();
+
+void conclctor_destruct(GC* gc)
+{
+ TRACE2("gc.process", "GC: GC conclctors destruct ...");
+
+ set_marker_final_func(NULL);
+ set_sweeper_final_func(NULL);
+
+ terminate_mostly_con_mark(); // mostly concurrent marker may be still running and never stops because heap will not be exhuasted
+
+ for(unsigned int i=0; i<gc->num_conclctors; i++)
+ {
+ Conclctor* conclctor = gc->conclctors[i];
+ conclctor_terminate_thread(conclctor);
+ STD_FREE(conclctor);
+ }
+ assert(live_conclctor_num == 0);
+ STD_FREE(gc->conclctors);
+ return;
+}
+
+void conclctor_init_free_chunk_list(Conclctor *conclctor)
+{
+ Free_Chunk_List *list = (Free_Chunk_List*)STD_MALLOC(sizeof(Free_Chunk_List));
+ free_chunk_list_init(list);
+ conclctor->free_chunk_list = list;
+}
+
+unsigned int NUM_CONCLCTORS = 0;
+unsigned int NUM_CON_MARKERS = 0;
+unsigned int NUM_CON_SWEEPERS = 0;
+
+void conclctor_initialize(GC* gc)
+{
+ TRACE2("gc.process", "GC: GC conclctors init ... \n");
+ //FIXME::
+ unsigned int num_processors = gc_get_processor_num(gc);
+
+ unsigned int nthreads = max(NUM_CONCLCTORS, num_processors);
+
+ unsigned int size = sizeof(Conclctor *) * nthreads;
+ gc->conclctors = (Conclctor **) STD_MALLOC(size);
+ memset(gc->conclctors, 0, size);
+
+ size = sizeof(Conclctor);
+ for (unsigned int i = 0; i < nthreads; i++) {
+ Conclctor* conclctor = (Conclctor *)STD_MALLOC(size);
+ memset(conclctor, 0, size);
+ /* FIXME:: thread_handle is for temporary control */
+ conclctor->thread_handle = (VmThreadHandle)(POINTER_SIZE_INT)i;
+ conclctor->gc = gc;
+ conclctor->status = CONCLCTOR_NIL;
+ conclctor->role = CONCLCTOR_ROLE_NIL;
+ conclctor_init_free_chunk_list(conclctor);
+ //init thread scheduling related stuff, creating conclctor thread
+ conclctor_init_thread(conclctor);
+ //#ifdef GC_GEN_STATS
+ //collector_init_stats((Collector *)conclctor);
+ //#endif
+ gc->conclctors[i] = conclctor;
+ }
+ gc->num_conclctors = NUM_CONCLCTORS? NUM_CONCLCTORS:num_processors;
+ live_conclctor_num = gc->num_conclctors;
+ return;
+}
+
+
+void conclctor_execute_task_concurrent(GC* gc, TaskType task_func, Space* space, unsigned int num_conclctors, unsigned int role)
+{
+ assign_conclctor_with_task(gc, task_func, space, num_conclctors, role);
+ return;
+}
+
+/* this method is an exception for concurrent gc, it will wait the task finished and then return,
+basiclly, it is for mostly concurrent's final marking phase. of course, it can be used for other propurse.
+in most cases, this method is used in a short STW phase, so the spin lock here will not effect the whole performance badly
+*/
+void conclctor_execute_task_synchronized(GC* gc, TaskType task_func, Space* space, unsigned int num_conclctors, unsigned int role)
+{
+ assign_conclctor_with_task(gc, task_func, space, num_conclctors, role);
+ switch( role ) {
+ case CONCLCTOR_ROLE_MARKER:
+ while( gc->num_active_markers != 0 ) {
+ vm_thread_yield();
+ }
+ break;
+ case CONCLCTOR_ROLE_SWEEPER: /* now, this case will never be reached*/
+ while( gc->num_active_sweepers != 0 ) {
+ vm_thread_yield();
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+}
+
+unsigned int gc_get_conclcor_num(GC* gc, unsigned int req_role) {
+ assert( req_role != CONCLCTOR_ROLE_NIL );
+ unsigned int i = 0;
+ unsigned int num = 0;
+ for(; i<gc->num_conclctors; i++){
+ Conclctor* conclctor = gc->conclctors[i];
+ if( conclctor->role != req_role )
+ continue;
+ num++;
+ }
+ return num;
+}
+
+int64 gc_get_conclctor_time(GC* gc, unsigned int req_role)
+{
+ assert( req_role != CONCLCTOR_ROLE_NIL );
+ int64 time_conclctor = 0;
+ unsigned int i = 0;
+ for(; i<gc->num_conclctors; i++){
+ Conclctor* conclctor = gc->conclctors[i];
+ if( conclctor->role != req_role )
+ continue;
+ int64 time_measured = conclctor->time_measurement_end - conclctor->time_measurement_start;
+ if(time_measured > time_conclctor)
+ time_conclctor = time_measured;
+ }
+ return time_conclctor;
+}
+
+void gc_clear_conclctor_role(GC *gc) {
+ unsigned int i = 0;
+ for(; i<gc->num_conclctors; i++){
+ Conclctor* conclctor = gc->conclctors[i];
+ conclctor->live_obj_num = 0;
+ conclctor->live_obj_size = 0;
+ conclctor->time_measurement_start = 0;
+ conclctor->time_measurement_end = 0;
+ conclctor->role = CONCLCTOR_ROLE_NIL;
+ }
+ gc->num_active_sweepers = 0;
+ gc->num_active_markers = 0;
+}
+
+void conclctor_set_weakref_sets(GC* gc)// now only marker uses this
+{
+ unsigned int req_role = CONCLCTOR_ROLE_MARKER;
+ Finref_Metadata *metadata = gc->finref_metadata;
+ unsigned int num_conclctors = gc->num_conclctors;
+ unsigned int i = 0;
+ for(; i<num_conclctors; i++){
+ Conclctor* conclctor = gc->conclctors[i];
+ if( conclctor->role != req_role )
+ continue;
+ //check_ref_pool(conclctor);
+ /* for mostly concurrent, some conclctors's weak sets have already been reclaimed, so the NOT NULL check is need here */
+ if( conclctor->softref_set != NULL ) {
+ pool_put_entry(metadata->softref_pool, conclctor->softref_set);
+ conclctor->softref_set = NULL;
+ }
+
+ if( conclctor->weakref_set != NULL ) {
+ pool_put_entry(metadata->weakref_pool, conclctor->weakref_set);
+ conclctor->weakref_set = NULL;
+ }
+
+ if( conclctor->phanref_set != NULL ) {
+ pool_put_entry(metadata->phanref_pool, conclctor->phanref_set);
+ conclctor->phanref_set = NULL;
+ }
+
+ }
+}
+
+
+void conclctor_release_weakref_sets(GC* gc) // now only sweeper use this
+{
+ unsigned int req_role = CONCLCTOR_ROLE_SWEEPER;
+ Finref_Metadata *metadata = gc->finref_metadata;
+ unsigned int num_conclctors = gc->num_conclctors;
+ unsigned int i = 0;
+ for(; i<num_conclctors; i++){
+ Conclctor* conclctor = gc->conclctors[i];
+ if( conclctor->role != req_role )
+ continue;
+
+ pool_put_entry(metadata->free_pool, conclctor->softref_set);
+ pool_put_entry(metadata->free_pool, conclctor->weakref_set);
+ pool_put_entry(metadata->free_pool, conclctor->phanref_set);
+ conclctor->softref_set = NULL;
+ conclctor->weakref_set = NULL;
+ conclctor->phanref_set = NULL;
+ }
+}
+
+
+/* reset weak references vetctor block of each conclctor */
+void conclctor_reset_weakref_sets(Conclctor *conclctor)
+{
+ GC *gc = conclctor->gc;
+ assert(conclctor->softref_set == NULL);
+ assert(conclctor->weakref_set == NULL);
+ assert(conclctor->phanref_set == NULL);
+ conclctor->softref_set = finref_get_free_block(gc);
+ conclctor->weakref_set = finref_get_free_block(gc);
+ conclctor->phanref_set= finref_get_free_block(gc);
+}
Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.cpp
------------------------------------------------------------------------------
svn:eol-style = native
Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.h?rev=708756&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.h Tue Oct 28 20:01:01 2008
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef _CONCLCTOR_H_
+#define _CONCLCTOR_H_
+
+#include "../common/gc_space.h"
+#include "../mark_sweep/wspace_chunk.h"
+
+extern SpinLock print_lock; //just for debug, print information
+
+enum CONCLCTOR_STATUS {
+ CONCLCTOR_NIL = 0x00,
+ CONCLCTOR_ACTIVE = 0x01,
+ CONCLCTOR_DEAD = 0x02,
+};
+
+enum CONCLCTOR_ROLE {
+ CONCLCTOR_ROLE_NIL = 0x0,
+ CONCLCTOR_ROLE_MARKER = 0x1,
+ CONCLCTOR_ROLE_SWEEPER = 0x2,
+};
+
+typedef struct Conclctor {
+ void *free;
+ void *ceiling;
+ void *end;
+ void *alloc_block;
+ Chunk_Header ***local_chunks;
+ Space* alloc_space;
+ GC* gc;
+ VmThreadHandle thread_handle; /* This thread; */
+ unsigned int handshake_signal; /*Handshake is used in concurrent GC.*/
+ unsigned int num_alloc_blocks; /* the number of allocated blocks in this collection. */
+ int64 time_measurement_start;
+ int64 time_measurement_end;
+ /* End of Allocator --> */
+
+ /* FIXME:: for testing */
+ Space* con_space;
+
+ /* backup allocator in case there are two target copy spaces, such as semispace GC */
+ Allocator* backup_allocator;
+
+ Vector_Block *trace_stack;
+
+ Vector_Block* rep_set; /* repointed set */
+ Vector_Block* rem_set;
+#ifdef USE_32BITS_HASHCODE
+ Vector_Block* hashcode_set;
+#endif
+
+ Vector_Block *softref_set;
+ Vector_Block *weakref_set;
+ Vector_Block *phanref_set;
+
+ VmEventHandle task_assigned_event;
+ VmEventHandle task_finished_event;
+
+ Block_Header* cur_compact_block;
+ Block_Header* cur_target_block;
+
+ Free_Chunk_List *free_chunk_list;
+
+ POINTER_SIZE_INT live_obj_size;
+ POINTER_SIZE_INT live_obj_num;
+
+ void(*task_func)(void*) ; /* current task */
+
+ POINTER_SIZE_INT non_los_live_obj_size;
+ POINTER_SIZE_INT los_live_obj_size;
+ POINTER_SIZE_INT segment_live_size[NORMAL_SIZE_SEGMENT_NUM];
+
+ unsigned int result;
+
+ /* idle, active or dead */
+ unsigned int status;
+ /* null, marker or sweeper */
+ unsigned int role;
+ //VmEventHandle markroot_finished_event;
+ int64 time_conclctor;
+
+ unsigned int num_dirty_slots_traced;
+
+ Conclctor* next;
+
+} Conclctor;
+
+//#define MAX_NUM_CONCLCTORS 0xff
+//#define MIN_NUM_CONCLCTORS 0x01
+#define MAX_NUM_MARKERS 0xff
+#define MIN_NUM_MARKERS 0x01
+
+typedef Conclctor* Conclctor_List;
+
+void conclctor_destruct(GC* gc);
+void conclctor_initialize(GC* gc);
+void conclctor_execute_task_concurrent(GC* gc, TaskType task_func, Space* space, unsigned int num_conclctors, unsigned int role);
+int64 gc_get_conclctor_time(GC* gc, unsigned int req_role);
+void gc_clear_conclctor_role(GC *gc);
+void conclctor_set_weakref_sets(GC* gc);
+void conclctor_release_weakref_sets(GC* gc);
+void conclctor_reset_weakref_sets(Conclctor *conclctor);
+
+//void conclctor_release_weakref_sets(GC* gc, unsigned int num_conclctor);
+//void conclctor_restore_obj_info(Collector* collector);
+
+extern TaskType marker_final_func;
+extern TaskType sweeper_final_func;
+
+inline void set_marker_final_func( TaskType func ) {
+ marker_final_func = func;
+}
+
+inline void set_sweeper_final_func( TaskType func ) {
+ sweeper_final_func = func;
+}
+
+#endif //#ifndef _CONCLCTOR_H_
+
Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/conclctor.h
------------------------------------------------------------------------------
svn:eol-style = native
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.cpp Tue Oct 28 20:01:01 2008
@@ -208,29 +208,86 @@
return time_mutator;
}
-static POINTER_SIZE_INT size_new_obj_desturcted_mutator_alloced;
+static POINTER_SIZE_INT desturcted_mutator_alloced_size;
+static POINTER_SIZE_INT desturcted_mutator_alloced_num;
+static POINTER_SIZE_INT desturcted_mutator_alloced_occupied_size;
+static POINTER_SIZE_INT desturcted_mutator_write_barrier_marked_size;
void mutator_register_new_obj_size(Mutator * mutator)
{
- size_new_obj_desturcted_mutator_alloced += mutator->new_obj_size;
+ desturcted_mutator_alloced_size += mutator->new_obj_size;
+ desturcted_mutator_alloced_num += mutator->new_obj_num;
+ desturcted_mutator_alloced_occupied_size += mutator->new_obj_occupied_size;
+ desturcted_mutator_write_barrier_marked_size += mutator->write_barrier_marked_size;
}
-POINTER_SIZE_INT gc_get_new_object_size(GC* gc, Boolean need_reset)
+
+unsigned int gc_get_mutator_write_barrier_marked_size(GC* gc)
+{
+ POINTER_SIZE_INT write_barrier_marked_size = 0;
+ lock(gc->mutator_list_lock);
+ Mutator* mutator = gc->mutator_list;
+ while (mutator) {
+ write_barrier_marked_size += mutator->write_barrier_marked_size;
+ mutator = mutator->next;
+ }
+ unlock(gc->mutator_list_lock);
+ return write_barrier_marked_size;
+}
+unsigned int gc_get_mutator_dirty_obj_num(GC *gc)
{
- POINTER_SIZE_INT new_obj_size = 0;
+ POINTER_SIZE_INT dirty_obj_num = 0;
+ lock(gc->mutator_list_lock);
+ Mutator* mutator = gc->mutator_list;
+ while (mutator) {
+ dirty_obj_num += mutator->dirty_obj_num;
+ mutator = mutator->next;
+ }
+ unlock(gc->mutator_list_lock);
+ return dirty_obj_num;
+}
+unsigned int gc_get_mutator_new_obj_size(GC* gc)
+{
+ POINTER_SIZE_INT new_obj_occupied_size = 0;
lock(gc->mutator_list_lock);
Mutator* mutator = gc->mutator_list;
while (mutator) {
- new_obj_size += mutator->new_obj_size;
- if(need_reset) mutator->new_obj_size = 0;
+ new_obj_occupied_size += mutator->new_obj_occupied_size;
mutator = mutator->next;
}
unlock(gc->mutator_list_lock);
- new_obj_size += size_new_obj_desturcted_mutator_alloced;
- if(need_reset) size_new_obj_desturcted_mutator_alloced = 0;
- return new_obj_size;
+ return new_obj_occupied_size + desturcted_mutator_alloced_occupied_size;
+
+}
+
+unsigned int gc_reset_mutator_new_obj_size(GC * gc)
+{
+ POINTER_SIZE_INT new_obj_occupied_size = 0;
+ lock(gc->mutator_list_lock);
+ Mutator* mutator = gc->mutator_list;
+ while (mutator) {
+ new_obj_occupied_size += mutator->new_obj_occupied_size;
+ mutator->new_obj_size = 0;
+ mutator->new_obj_num = 0;
+ mutator->new_obj_occupied_size = 0;
+ mutator->write_barrier_marked_size = 0;
+ mutator->dirty_obj_num = 0;
+ mutator = mutator->next;
+ }
+ new_obj_occupied_size += desturcted_mutator_alloced_occupied_size;
+ desturcted_mutator_alloced_size = 0;
+ desturcted_mutator_alloced_num = 0;
+ desturcted_mutator_alloced_occupied_size = 0;
+ unlock(gc->mutator_list_lock);
+
+ return new_obj_occupied_size;
+}
+
+unsigned int gc_get_mutator_number(GC *gc)
+{
+ return gc->num_mutators;
}
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator.h Tue Oct 28 20:01:01 2008
@@ -50,7 +50,14 @@
SpinLock dirty_set_lock;
unsigned int dirty_obj_slot_num; //only ON_THE_FLY
unsigned int dirty_obj_num; //concurrent mark
+
+ /* obj alloc information */
POINTER_SIZE_INT new_obj_size;
+ /* accurate object number and total size*/
+ POINTER_SIZE_INT new_obj_num;
+ POINTER_SIZE_INT new_obj_occupied_size;
+ POINTER_SIZE_INT write_barrier_marked_size;
+
} Mutator;
void mutator_initialize(GC* gc, void* tls_gc_info);
@@ -64,7 +71,11 @@
Vector_Block* gc_get_local_dirty_set(GC* gc, unsigned int shared_id);
void gc_start_mutator_time_measure(GC* gc);
int64 gc_get_mutator_time(GC* gc);
-POINTER_SIZE_INT gc_get_new_object_size(GC* gc, Boolean need_reset);
+
+unsigned int gc_get_mutator_write_barrier_marked_size( GC *gc );
+unsigned int gc_get_mutator_dirty_obj_num(GC *gc);
+unsigned int gc_get_mutator_new_obj_size( GC* gc );
+unsigned int gc_reset_mutator_new_obj_size( GC* gc );
inline void mutator_post_signal(Mutator* mutator, unsigned int handshake_signal)
{
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/thread/mutator_alloc.cpp Tue Oct 28 20:01:01 2008
@@ -31,8 +31,6 @@
//#define GC_OBJ_SIZE_STATISTIC
-volatile Boolean obj_alloced_live = FALSE;
-
#ifdef GC_OBJ_SIZE_STATISTIC
#define GC_OBJ_SIZE_STA_MAX 256*KB
unsigned int obj_size_distribution_map[GC_OBJ_SIZE_STA_MAX>>10];
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/trace_forward/fspace_alloc.cpp Tue Oct 28 20:01:01 2008
@@ -97,4 +97,3 @@
}
-
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.cpp Tue Oct 28 20:01:01 2008
@@ -209,7 +209,7 @@
}
}
-void verifier_log_start(const char* message)
+void verifier_log_start(char* message)
{
printf("------------------------------%-16s------------------------------\n", message);
}
@@ -217,7 +217,7 @@
void verifier_collect_kind_log(Heap_Verifier* heap_verifier)
{
GC* gc = heap_verifier->gc;
- const char* gc_kind;
+ char* gc_kind;
if(collect_is_minor()){
gc_kind = " minor collection.";
}else if(collect_is_fallback()){
@@ -270,3 +270,4 @@
+
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.h?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_common.h Tue Oct 28 20:01:01 2008
@@ -77,7 +77,7 @@
Boolean verifier_parse_options(Heap_Verifier* heap_verifier, char* options);
void verifier_log_before_gc(Heap_Verifier* heap_verifier);
void verifier_log_after_gc(Heap_Verifier* heap_verifier);
-void verifier_log_start(const char* message);
+void verifier_log_start(char* message);
Boolean verify_rootset_slot(REF* p_ref, Heap_Verifier* heap_verifier);
@@ -128,7 +128,7 @@
if(!obj_is_alloc_in_color_table(p_obj))
printf("\nERROR: obj after GC should be set its alloc color!\n");
}else{
- if(gc_mark_is_concurrent())
+ if( !in_con_idle(heap_verifier->gc) )
assert(obj_is_mark_black_in_table(p_obj));
}
#endif
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verifier_scanner.cpp Tue Oct 28 20:01:01 2008
@@ -445,4 +445,3 @@
-
Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_concurrent_mark.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_concurrent_mark.cpp?rev=708756&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_concurrent_mark.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_concurrent_mark.cpp Tue Oct 28 20:01:01 2008
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "verify_live_heap.h"
+#include "verifier_common.h"
+#include "verify_gc_effect.h"
+#include "verify_mutator_effect.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+#include "../mark_sweep/wspace_mark_sweep.h"
+void analyze_bad_obj(Partial_Reveal_Object *p_obj)
+{
+ Chunk_Header *chunk;
+ unsigned int slot_index;
+ unsigned int obj_size = 0;
+
+ if(is_super_obj(p_obj)){
+ chunk = ABNORMAL_CHUNK_HEADER(p_obj);
+ slot_index = 0;
+ obj_size = CHUNK_SIZE(chunk);
+ INFO2("gc.verifier", "[super bad obj]=" << p_obj << " size=" << obj_size << ", chunk" << chunk);
+ } else {
+ chunk = NORMAL_CHUNK_HEADER(p_obj);
+ slot_index = slot_addr_to_index(chunk, p_obj);
+ obj_size = chunk->slot_size;
+ INFO2("gc.verifier", "[normal bad obj]=" << p_obj << ", size=" << obj_size << ", chunk[" << chunk <<"] slot index[" <<slot_index <<"]" );
+ }
+
+ if(obj_is_mark_gray_in_table(p_obj))
+ INFO2("gc.verifier", "Bad Gray object!!!");
+
+ if(obj_is_mark_black_in_table(p_obj))
+ INFO2("gc.verifier", "It is not a Bad object!!!");
+
+ Partial_Reveal_VTable *vt = decode_vt(obj_get_vt(p_obj));
+ INFO2( "gc.verifier", "bad object is class " << vtable_get_gcvt(vt)->gc_class_name << " jlC=" << vt->jlC);
+ INFO2( "gc.verifier", "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^");
+
+}
+
+
+ /*<--------live objects scanner begin-------->*/
+static FORCE_INLINE void scan_slot(Heap_Verifier* heap_verifier, REF*p_ref)
+{
+ GC_Verifier* gc_verifier = heap_verifier->gc_verifier;
+ Partial_Reveal_Object *p_obj = read_slot(p_ref);
+ if(p_obj == NULL) return;
+ assert(address_belongs_to_gc_heap(p_obj, heap_verifier->gc));
+ verifier_tracestack_push(p_obj, gc_verifier->trace_stack);
+ return;
+}
+
+static void scan_object(Heap_Verifier* heap_verifier, Partial_Reveal_Object *p_obj)
+{
+ GC_Verifier* gc_verifier = heap_verifier->gc_verifier;
+ if(!obj_mark_in_vt(p_obj)) return;
+ assert(obj_is_mark_black_in_table(p_obj));
+ if(!obj_is_mark_black_in_table(p_obj)) {
+ analyze_bad_obj(p_obj);
+ }
+ verify_object_header(p_obj, heap_verifier);
+ verifier_update_verify_info(p_obj, heap_verifier);
+
+ if (!object_has_ref_field(p_obj)) return;
+ REF* p_ref;
+ if (object_is_array(p_obj)) {
+ Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj;
+ unsigned int array_length = array->array_len;
+ //INFO2("gc.verifier","\tscan array "<< p_obj <<"(" << array_length << ")");
+ p_ref = (REF*)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+ for (unsigned int i = 0; i < array_length; i++) {
+ scan_slot(heap_verifier, p_ref+i);
+ }
+ }else{
+ unsigned int num_refs = object_ref_field_num(p_obj);
+ int* ref_iterator = object_ref_iterator_init(p_obj);
+
+ //INFO2("gc.verifier","\tscan object "<< p_obj <<"(" << num_refs << ")");
+ for(unsigned int i=0; i<num_refs; i++){
+ p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);
+ scan_slot(heap_verifier, p_ref);
+ }
+
+ #ifndef BUILD_IN_REFERENT
+ WeakReferenceType type = special_reference_type(p_obj);
+ if(type != NOT_REFERENCE) {
+ REF *p_referent_field = obj_get_referent_field(p_obj);
+ scan_slot(heap_verifier, p_referent_field);
+ }
+ #endif
+ }
+ return;
+}
+
+
+static void trace_object(Heap_Verifier* heap_verifier, Partial_Reveal_Object* p_obj)
+{
+ //INFO2("gc.verifier","trace root ["<< p_obj <<"] => ");
+ scan_object(heap_verifier, p_obj);
+ GC_Verifier* gc_verifier = heap_verifier->gc_verifier;
+ Vector_Block* trace_stack = (Vector_Block*)gc_verifier->trace_stack;
+ Partial_Reveal_Object *sub_obj = NULL;
+ while( !vector_stack_is_empty(trace_stack)){
+ sub_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack);
+ scan_object(heap_verifier, sub_obj);
+ trace_stack = (Vector_Block*)gc_verifier->trace_stack;
+ }
+ return;
+}
+
+void con_verifier_trace_from_rootsets(Heap_Verifier* heap_verifier, Pool* root_set_pool)
+{
+ Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata;
+ GC_Verifier* gc_verifier = heap_verifier->gc_verifier;
+ gc_verifier->objects_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
+ gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool);
+ gc_verifier->hashcode_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
+ pool_iterator_init(root_set_pool);
+ Vector_Block* root_set = pool_iterator_next(root_set_pool);
+
+ /* first step: copy all root objects to trace tasks. */
+ while(root_set){
+ POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set,iter)){
+ REF* p_ref = (REF* )*iter;
+ iter = vector_block_iterator_advance(root_set,iter);
+ Partial_Reveal_Object* p_obj = read_slot(p_ref);
+ verifier_tracestack_push(p_obj, gc_verifier->trace_stack);
+ }
+ root_set = pool_iterator_next(root_set_pool);
+ }
+ /* put back the last trace_stack task */
+ pool_put_entry(verifier_metadata->mark_task_pool, gc_verifier->trace_stack);
+
+ /* second step: iterate over the trace tasks and forward objects */
+ gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool);
+
+ Vector_Block* trace_task = pool_get_entry(verifier_metadata->mark_task_pool);
+
+ while(trace_task){
+ POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
+ while(!vector_block_iterator_end(trace_task,iter)){
+ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object* )*iter;
+ iter = vector_block_iterator_advance(trace_task,iter);
+ trace_object(heap_verifier, p_obj);
+ }
+ vector_stack_clear(trace_task);
+ pool_put_entry(verifier_metadata->free_task_pool, trace_task);
+ trace_task = pool_get_entry(verifier_metadata->mark_task_pool);
+ }
+
+ pool_put_entry(verifier_metadata->objects_pool_before_gc, gc_verifier->objects_set);
+
+ vector_stack_clear(gc_verifier->trace_stack);
+ pool_put_entry(verifier_metadata->free_task_pool, gc_verifier->trace_stack);
+ gc_verifier->trace_stack = NULL;
+
+}
+
+unsigned int clear_objs_mark_bit(Heap_Verifier* heap_verifier)
+{
+ Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata;
+ Pool* marked_objs_pool = verifier_metadata->objects_pool_before_gc;
+
+ pool_iterator_init(marked_objs_pool);
+ Vector_Block* objs_set = pool_iterator_next(marked_objs_pool);
+ unsigned int clear_counter = 0;
+ while(objs_set){
+ POINTER_SIZE_INT* iter = vector_block_iterator_init(objs_set);
+ while(!vector_block_iterator_end(objs_set,iter)){
+ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
+ iter = vector_block_iterator_advance(objs_set,iter);
+ assert(p_obj != NULL);
+ assert(obj_is_marked_in_vt(p_obj));
+ clear_counter++;
+ obj_unmark_in_vt(p_obj);
+ }
+ objs_set = pool_iterator_next(marked_objs_pool);
+ }
+ return clear_counter;
+}
+
+void verifier_rescan_after_con(Heap_Verifier* heap_verifier)
+{
+ INFO2("gc.con.verify", "start scan live object %%%%%%%%%%%%%%%%%%%%%%");
+ Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata;
+ con_verifier_trace_from_rootsets(heap_verifier, verifier_metadata->root_set_pool);
+ clear_objs_mark_bit(heap_verifier);
+ INFO2("gc.con.verify", "end of scan live object %%%%%%%%%%%%%%%%%%%%%%");
+}
+
+void verify_gc_reset(Heap_Verifier* heap_verifier);
+void verify_heap_after_con_gc(GC *gc)
+{
+ Heap_Verifier *heap_verifier = get_heap_verifier();
+ int64 verify_start_time = time_now();
+ verifier_copy_rootsets(gc, heap_verifier);
+ verifier_rescan_after_con(heap_verifier);
+ INFO2("gc.verifier", "[Verifier] verifier marked num=" << heap_verifier->gc_verifier->num_live_objects_before_gc );
+ verify_gc_reset(heap_verifier);
+ unsigned int verify_time = trans_time_unit(time_now() - verify_start_time);
+ INFO2("gc.verifier", "[Verifier] verify time = [" << verify_time << "] ms")
+}
+
Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_concurrent_mark.cpp
------------------------------------------------------------------------------
svn:eol-style = native
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_gc_effect.cpp Tue Oct 28 20:01:01 2008
@@ -328,7 +328,7 @@
if(!obj_is_alloc_in_color_table(p_obj))
printf("\nERROR: obj after GC should be set its alloc color!\n");
}else{
- if(gc_mark_is_concurrent())
+ if( !in_con_idle(heap_verifier->gc) )
assert(obj_is_mark_black_in_table(p_obj));
}
#endif
@@ -563,3 +563,4 @@
+
Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/verify/verify_live_heap.cpp Tue Oct 28 20:01:01 2008
@@ -157,4 +157,3 @@
-
Modified: harmony/enhanced/drlvm/trunk/vm/include/open/gc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/include/open/gc.h?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/include/open/gc.h (original)
+++ harmony/enhanced/drlvm/trunk/vm/include/open/gc.h Tue Oct 28 20:01:01 2008
@@ -651,6 +651,7 @@
Managed_Object_Handle value);
extern void (*gc_heap_wrote_object)(Managed_Object_Handle p_base_of_object_just_written);
+extern Boolean (*gc_heap_copy_object_array)(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length);
/*
* The variables below are exported by the VM so other DLLs modules
* may use them. <code>dll_gc.cpp</code> initializes them to the addresses exported
@@ -693,6 +694,12 @@
*/
GCExport void gc_heap_wrote_object (Managed_Object_Handle p_base_of_object_just_written);
+/**
+ * * By calling this function VM notifies GC that a array copy operation should be performed.
+ * *
+ * * This function is for write barriers on array copy operations
+ * */
+GCExport Boolean gc_heap_copy_object_array(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length);
/**
* By calling this function VM notifies GC that a heap reference was written to
Modified: harmony/enhanced/drlvm/trunk/vm/vmcore/build/vmcore.exp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/vmcore/build/vmcore.exp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/vmcore/build/vmcore.exp (original)
+++ harmony/enhanced/drlvm/trunk/vm/vmcore/build/vmcore.exp Tue Oct 28 20:01:01 2008
@@ -633,6 +633,9 @@
apr_atomic_dec32;
apr_atomic_inc32;
apr_atomic_cas32;
+ apr_atomic_add32;
+ apr_atomic_set32;
+ apr_sleep;
apr_time_now;
apr_atomic_casptr;
port_rw_barrier;
Modified: harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/vmcore/src/gc/dll_gc.cpp Tue Oct 28 20:01:01 2008
@@ -91,6 +91,7 @@
void (*gc_heap_write_ref)(Managed_Object_Handle p_base_of_object_with_slot,
unsigned offset,
Managed_Object_Handle value) = 0;
+Boolean (*gc_heap_copy_object_array)(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length)=0;
void (*gc_heap_wrote_object)(Managed_Object_Handle p_base_of_object_just_written) = 0;
int (*gc_init)() = 0;
Boolean (*gc_is_object_pinned)(Managed_Object_Handle obj) = 0;
@@ -226,6 +227,11 @@
"gc_heap_write_global_slot_compressed",
dllName,
(apr_dso_handle_sym_t)default_gc_heap_write_global_slot_compressed);
+ gc_heap_copy_object_array = (Boolean (*)(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length))
+ getFunctionOptional(handle,
+ "gc_heap_copy_object_array",
+ dllName,
+ (apr_dso_handle_sym_t)default_gc_heap_wrote_object);
gc_heap_wrote_object = (void (*)(Managed_Object_Handle p_base_of_object_just_written))
getFunctionOptional(handle,
"gc_heap_wrote_object",
Modified: harmony/enhanced/drlvm/trunk/vm/vmcore/src/object/vm_arrays.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/vmcore/src/object/vm_arrays.cpp?rev=708756&r1=708755&r2=708756&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/vmcore/src/object/vm_arrays.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/vmcore/src/object/vm_arrays.cpp Tue Oct 28 20:01:01 2008
@@ -543,78 +543,23 @@
{
#ifdef VM_STATS
increment_array_copy_counter(VM_Statistics::get_vm_stats().num_arraycopy_object);
-#endif // VM_STATS
- ManagedObject **src_body =
- (ManagedObject **)get_vector_element_address_ref(src, srcOffset);
- ManagedObject **dst_body =
- (ManagedObject **)get_vector_element_address_ref(dst, dstOffset);
-
if(src_class == dst_class) {
- // If the types of arrays are the same, no type conflicts of array elements are possible.
-#ifdef VM_STATS
- increment_array_copy_counter(VM_Statistics::get_vm_stats().num_arraycopy_object_same_type);
-#endif // VM_STATS
- memmove(dst_body, src_body, length * REF_SIZE);
+ increment_array_copy_counter(VM_Statistics::get_vm_stats().num_arraycopy_object_same_type);
} else {
- // If the types are different, the arrays are different and no overlap of the source and destination is possible.
-#ifdef VM_STATS
increment_array_copy_counter(VM_Statistics::get_vm_stats().num_arraycopy_object_different_type);
-#endif // VM_STATS
- Class* dst_elem_clss = dst_class->get_array_element_class();
- assert(dst_elem_clss);
-
- REFS_RUNTIME_SWITCH_IF
-#ifdef REFS_RUNTIME_OR_COMPRESSED
- COMPRESSED_REFERENCE *src_body_compressed = (COMPRESSED_REFERENCE *)src_body;
- COMPRESSED_REFERENCE *dst_body_compressed = (COMPRESSED_REFERENCE *)dst_body;
- for (int count = 0; count < length; count++) {
- // For non-null elements check if types are compatible.
- COMPRESSED_REFERENCE src_elem_offset = src_body_compressed[count];
- if (src_elem_offset != 0) {
- ManagedObject *src_elem = (ManagedObject *)uncompress_compressed_reference(src_elem_offset);
- Class *src_elem_clss = src_elem->vt()->clss;
- if (src_elem_clss == dst_elem_clss) {
- } else if (!src_elem_clss->is_instanceof(dst_elem_clss)) {
- // note: VM_STATS values are updated when Class::is_instanceof() is called.
- // Since we only flag the base do it before we throw exception
- gc_heap_wrote_object(dst);
- return ACR_TypeMismatch;
- }
- }
- // If ArrayStoreException hasn't been thrown, copy the element.
- dst_body_compressed[count] = src_body_compressed[count];
- // There is not a gc_heap_write_ref call here since gc is disabled and we use gc_heap_wrote_object interface below.
- }
-#endif // REFS_RUNTIME_OR_COMPRESSED
- REFS_RUNTIME_SWITCH_ELSE
-#ifdef REFS_RUNTIME_OR_UNCOMPRESSED
- for (int count = 0; count < length; count++) {
- // For non-null elements check if types are compatible.
- if (src_body[count] != NULL) {
- Class *src_elem_clss = src_body[count]->vt()->clss;
- if (src_elem_clss == dst_elem_clss) {
- } else if (!src_elem_clss->is_instanceof(dst_elem_clss)) {
- // note: VM_STATS values are updated when class_is_subtype_fast() is called.
- // Since we only flag the base do it before we throw exception
- gc_heap_wrote_object(dst);
- return ACR_TypeMismatch;
- }
- }
- // If ArrayStoreException hasn't been thrown, copy the element.
- dst_body[count] = src_body[count];
- // There is not a gc_heap_write_ref call here since gc is disabled and we use gc_heap_wrote_object interface below.
- }
-#endif // REFS_RUNTIME_OR_UNCOMPRESSED
- REFS_RUNTIME_SWITCH_ENDIF
}
+#endif // VM_STATS
- gc_heap_wrote_object(dst);
+ //the object oopy is handled in GC module
+ if( !gc_heap_copy_object_array(src, srcOffset, dst, dstOffset, length) )
+ return ACR_TypeMismatch;
}
break;
default:
- LDIE(62, "Unexpected type specifier");
+ DIE(("Unexpected type specifier"));
}
return ACR_Okay;
} //array_copy
+