You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by xl...@apache.org on 2007/08/17 06:49:24 UTC

svn commit: r566918 - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: gen/gen_stats.cpp gen/gen_stats.h mark_sweep/sspace_alloc.h mark_sweep/sspace_compact.cpp

Author: xli
Date: Thu Aug 16 21:49:23 2007
New Revision: 566918

URL: http://svn.apache.org/viewvc?view=rev&rev=566918
Log:
HARMONY-3818 and HARMONY-4325 : (missed new files) gc verbose support and mark-sweep algorithm improvement

Added:
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp   (with props)
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.h   (with props)
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h   (with props)
    harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp   (with props)

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp?view=auto&rev=566918
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp Thu Aug 16 21:49:23 2007
@@ -0,0 +1,252 @@
+/*
+*  Licensed to the Apache Software Foundation (ASF) under one or more
+*  contributor license agreements.  See the NOTICE file distributed with
+*  this work for additional information regarding copyright ownership.
+*  The ASF licenses this file to You under the Apache License, Version 2.0
+*  (the "License"); you may not use this file except in compliance with
+*  the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+*  Unless required by applicable law or agreed to in writing, software
+*  distributed under the License is distributed on an "AS IS" BASIS,
+*  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+*  See the License for the specific language governing permissions and
+*  limitations under the License.
+*/
+
+#include "../common/gc_common.h"
+
+#ifdef GC_GEN_STATS
+
+#include "gen.h"
+#include "gen_stats.h"
+
+void gc_gen_stats_initialize(GC_Gen* gc)
+{
+  GC_Gen_Stats* stats = (GC_Gen_Stats*)STD_MALLOC(sizeof(GC_Gen_Stats));
+
+  memset(stats, 0, sizeof(GC_Gen_Stats));
+  stats->is_los_collected = false;
+
+  gc->stats = stats; 
+}
+
+void gc_gen_stats_destruct(GC_Gen* gc)
+{
+  STD_FREE(gc->stats);
+}
+
+void gc_gen_stats_reset_before_collection(GC_Gen* gc)
+{
+  GC_Gen_Stats* stats = gc->stats;
+
+  if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+    stats->nos_surviving_obj_num_minor = 0;
+    stats->nos_surviving_obj_size_minor = 0;
+    stats->los_suviving_obj_num = 0;
+    stats->los_suviving_obj_size = 0;
+    stats->is_los_collected = false;
+  }else{
+    stats->nos_mos_suviving_obj_num_major = 0;
+    stats->nos_mos_suviving_obj_size_major = 0;
+    stats->los_suviving_obj_num = 0;
+    stats->los_suviving_obj_size = 0;
+    stats->is_los_collected = false;  
+  }
+}
+
+void gc_gen_stats_update_after_collection(GC_Gen* gc)
+{
+  Collector** collector = gc->collectors;
+  GC_Gen_Stats* gc_gen_stats = gc->stats;
+  GC_Gen_Collector_Stats* collector_stats;
+  Boolean is_los_collected = gc_gen_stats->is_los_collected;
+
+  if(gc_match_kind((GC*)gc, MINOR_COLLECTION)) {
+
+    for (unsigned int i=0; i<gc->num_active_collectors; i++) {
+      collector_stats = (GC_Gen_Collector_Stats*)collector[i]->stats;
+      gc_gen_stats->nos_surviving_obj_num_minor += collector_stats->nos_obj_num_moved_minor;
+      gc_gen_stats->nos_surviving_obj_size_minor += collector_stats->nos_obj_size_moved_minor;
+    }
+
+    gc_gen_stats->nos_surviving_ration_minor = ((float)gc_gen_stats->nos_surviving_obj_size_minor)/gc->nos->committed_heap_size;
+
+  }else{
+
+    for (unsigned int i=0; i < gc->num_active_collectors; i++) {
+      collector_stats = (GC_Gen_Collector_Stats*)collector[i]->stats;
+      gc_gen_stats->nos_mos_suviving_obj_num_major += collector_stats->nos_mos_obj_num_moved_major;
+      gc_gen_stats->nos_mos_suviving_obj_size_major += collector_stats->nos_mos_obj_size_moved_major;
+
+      /*need to accumulate the los related info if los is collected when major*/
+      if(is_los_collected) {
+        gc_gen_stats->los_suviving_obj_num += collector_stats->los_obj_num_moved_major;
+        gc_gen_stats->los_suviving_obj_size += collector_stats->los_obj_size_moved_major;
+      }
+    }
+
+    gc_gen_stats->nos_mos_suviving_ratio_major = ((float)gc_gen_stats->nos_mos_suviving_obj_size_major)/(gc->nos->committed_heap_size+gc->mos->committed_heap_size);
+  }
+
+  if (is_los_collected) {
+    gc_gen_stats->los_surviving_ration = ((float)gc_gen_stats->los_suviving_obj_size)/gc->los->committed_heap_size;
+  }
+}
+
+void gc_gen_stats_verbose(GC_Gen* gc)
+{
+  GC_Gen_Stats* stats = gc->stats;
+  Boolean is_los_collected = stats->is_los_collected;
+  if (gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+    TRACE2("gc.space", "GC: Fspace Collection stats: "
+      <<"\nGC: collection algo: "<<((stats->nos_collection_algo_minor==MINOR_NONGEN_FORWARD_POOL)?"nongen forward":"gen forward")
+      <<"\nGC: num surviving objs: "<<stats->nos_surviving_obj_num_minor
+      <<"\nGC: size surviving objs: "<<verbose_print_size(stats->nos_surviving_obj_size_minor)
+      <<"\nGC: surviving ratio: "<<(int)(stats->nos_surviving_ration_minor*100)<<"%\n");
+  }else{
+    TRACE2("gc.space", "GC: Mspace Collection stats: "
+      <<"\nGC: collection algo: "<<((stats->nos_mos_collection_algo_major==MAJOR_COMPACT_SLIDE)?"slide compact":"move compact")
+      <<"\nGC: num surviving objs: "<<stats->nos_mos_suviving_obj_num_major
+      <<"\nGC: size surviving objs: "<<verbose_print_size(stats->nos_mos_suviving_obj_size_major)
+      <<"\nGC: surviving ratio: "<<(int)(stats->nos_mos_suviving_ratio_major*100)<<"%\n");
+  }
+
+  if(stats->is_los_collected) { /*if los is collected, need to output los related info*/
+    TRACE2("gc.space", "GC: Lspace Collection stats: "
+      <<"\nGC: collection algo: "<<((stats->los_collection_algo==MAJOR_COMPACT_SLIDE)?"slide compact":"mark sweep")
+      <<"\nGC: num surviving objs: "<<stats->los_suviving_obj_num
+      <<"\nGC: size surviving objs: "<<verbose_print_size(stats->los_suviving_obj_size)
+      <<"\nGC: surviving ratio: "<<(int)(stats->los_surviving_ration*100)<<"%\n");
+  }
+
+}
+
+void gc_gen_collector_stats_initialize(Collector* collector)
+{
+  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)STD_MALLOC(sizeof(GC_Gen_Collector_Stats));
+  memset(stats, 0, sizeof(GC_Gen_Collector_Stats));
+  collector->stats = (void*)stats; 
+}
+
+
+void gc_gen_collector_stats_destruct(Collector* collector)
+{
+  STD_FREE(collector->stats);
+}
+
+void gc_gen_collector_stats_reset(GC_Gen* gc)
+{
+  Collector** collector = gc->collectors;
+  GC_Gen_Collector_Stats* stats;
+  for (unsigned int i=0; i<gc->num_active_collectors; i++){
+    stats = (GC_Gen_Collector_Stats*)collector[i]->stats;
+    memset(stats, 0, sizeof(GC_Gen_Collector_Stats));
+  }
+}
+
+
+void gc_gen_collector_stats_verbose_minor_collection(GC_Gen* gc)
+{
+  Collector** collector = gc->collectors;
+  GC_Gen_Collector_Stats* stats;
+
+  /*variable used to accumulate each collector's stats when minor collection*/
+  unsigned int total_process_rootset_ref = 0;
+  unsigned int total_mark_nos_obj_num = 0;
+  unsigned int total_mark_non_nos_obj_num = 0;
+  unsigned int total_forward_obj_num = 0;
+  POINTER_SIZE_INT total_forward_obj_size = 0;
+
+  for (unsigned int i=0; i<gc->num_active_collectors; i++){
+    stats = (GC_Gen_Collector_Stats*)collector[i]->stats;
+
+    total_process_rootset_ref += stats->process_rootset_ref_num;
+    total_mark_nos_obj_num += stats->nos_obj_num_marked_minor;
+    total_mark_non_nos_obj_num += stats->nonnos_obj_num_marked_minor;
+    total_forward_obj_num += stats->nos_obj_num_moved_minor;
+    total_forward_obj_size += stats->nos_obj_size_moved_minor;
+
+    /*output each collector's stats*/
+    TRACE2("gc.collect", "GC: Collector["<<((POINTER_SIZE_INT)collector[i]->thread_handle)<<"] stats when collection:"
+      <<"\nGC: process rootset ref num: "<<stats->process_rootset_ref_num
+      <<"\nGC: mark nos obj num: "<<stats->nos_obj_num_marked_minor
+      <<"\nGC: mark nonnos obj num: "<<stats->nonnos_obj_num_marked_minor
+      <<" \nGC: forword obj num: "<<stats->nos_obj_num_moved_minor
+      <<" \nGC: forward obj size: "<<verbose_print_size(stats->nos_obj_size_moved_minor)<<"\n");
+  }
+
+  /*output accumulated info for all collectors*/
+  TRACE2("gc.collect", "GC: Total Collector Stats when collection: "
+    <<"\nGC: process rootset ref num: "<<total_process_rootset_ref
+    <<"\nGC: mark nos obj num: "<<total_mark_nos_obj_num
+    <<"\nGC: mark nonnos obj num: "<<total_mark_non_nos_obj_num
+    <<"\nGC: forword obj num: "<<total_forward_obj_num
+    <<"\nGC: forward obj size: "<<verbose_print_size(total_forward_obj_size)<<"\n");
+
+}
+
+void gc_gen_collector_stats_verbose_major_collection(GC_Gen* gc)
+{
+  Collector** collector = gc->collectors;
+  GC_Gen_Collector_Stats* stats;
+
+  Boolean is_los_collected = gc->stats->is_los_collected;
+
+  /*variable used to accumulate each collector's stats when major collection*/
+  unsigned int total_process_rootset_ref = 0;
+  unsigned int total_mark_heap_live_obj_num = 0;
+  unsigned int total_move_mos_nos_live_obj_num = 0;
+  POINTER_SIZE_INT total_move_mos_nos_live_obj_size = 0;
+  unsigned int total_move_los_live_obj_num = 0;
+  POINTER_SIZE_INT total_move_los_live_obj_size = 0;
+
+  for (unsigned int i=0; i<gc->num_active_collectors; i++){
+    stats = (GC_Gen_Collector_Stats*)collector[i]->stats;
+
+    total_process_rootset_ref = stats->process_rootset_ref_num;
+    total_mark_heap_live_obj_num = stats->num_obj_marked_major;
+    total_move_mos_nos_live_obj_num = stats->nos_mos_obj_num_moved_major;
+    total_move_mos_nos_live_obj_size = stats->nos_mos_obj_size_moved_major;
+    if (is_los_collected){/*if los is collected when major collection happened,  need to accumulate los related info*/
+      total_move_los_live_obj_num = stats->los_obj_num_moved_major;
+      total_move_los_live_obj_size = stats->los_obj_size_moved_major;
+    }
+    if(is_los_collected){
+      TRACE2("gc.collect", "GC: Collector["<<((POINTER_SIZE_INT)collector[i]->thread_handle)<<"] stats when collection:"
+        <<"\nGC: process rootset ref num: "<<stats->process_rootset_ref_num
+        <<"\nGC: mark obj num: "<<stats->num_obj_marked_major
+        <<"\nGC: move mos and nos obj num: "<<stats->nos_mos_obj_num_moved_major
+        <<"\nGC: move obj size: "<<verbose_print_size(stats->nos_mos_obj_size_moved_major)
+        <<"\nGC: move los obj num: "<<stats->los_obj_num_moved_major
+        <<"\nGC: move obj size: "<<verbose_print_size(stats->los_obj_size_moved_major)<<"\n");
+    }else{
+      TRACE2("gc.collect", "GC: Collector["<<((POINTER_SIZE_INT)collector[i]->thread_handle)<<"] stats when collection:"
+        <<"\nGC: process rootset ref num: "<<stats->process_rootset_ref_num
+        <<"\nGC: mark obj num: "<<stats->num_obj_marked_major
+        <<"\nGC: move obj num: "<<stats->nos_mos_obj_num_moved_major
+        <<"\nGC: move obj size: "<<verbose_print_size(stats->nos_mos_obj_size_moved_major)<<"\n");
+    }
+  }
+
+
+  if(is_los_collected){/*if los is collected when major collection happened,  need to output los related collector info*/
+    TRACE2("gc.collect", "GC: Total Collector Stats when collection: "
+      <<"\nGC: process rootset ref num: "<<total_process_rootset_ref
+      <<"\nGC: mark obj num: "<<total_mark_heap_live_obj_num
+      <<"\nGC: move mos and nos obj num: "<<total_move_mos_nos_live_obj_num
+      <<"\nGC: move obj size: "<<verbose_print_size(total_move_mos_nos_live_obj_size)
+      <<"\nGC: move los obj num: "<<total_move_los_live_obj_num
+      <<"\nGC: move obj size: "<<verbose_print_size(total_move_los_live_obj_size)<<"\n");
+  }else{
+    TRACE2("gc.collect", "GC: Total Collector Stats when collection: "
+      <<"\nGC: process rootset ref num: "<<total_process_rootset_ref
+      <<"\nGC: mark obj num: "<<total_mark_heap_live_obj_num
+      <<"\nGC: move mos and nos obj num: "<<total_move_mos_nos_live_obj_num
+      <<"\nGC: move obj size: "<<verbose_print_size(total_move_mos_nos_live_obj_size)<<"\n");
+  }
+}
+
+
+#endif

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.h?view=auto&rev=566918
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.h Thu Aug 16 21:49:23 2007
@@ -0,0 +1,158 @@
+/*
+*  Licensed to the Apache Software Foundation (ASF) under one or more
+*  contributor license agreements.  See the NOTICE file distributed with
+*  this work for additional information regarding copyright ownership.
+*  The ASF licenses this file to You under the Apache License, Version 2.0
+*  (the "License"); you may not use this file except in compliance with
+*  the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+*  Unless required by applicable law or agreed to in writing, software
+*  distributed under the License is distributed on an "AS IS" BASIS,
+*  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+*  See the License for the specific language governing permissions and
+*  limitations under the License.
+*/
+
+#ifndef _GEN_STATS_H_
+#define _GEN_STATS_H_
+
+#include "gen.h"
+
+typedef struct GC_Gen_Stats {
+  unsigned int num_minor_collections;
+  unsigned int num_major_collections;
+  unsigned int num_fallback_collections;
+
+  /*time related info*/
+  int64 total_pause_time;  /*total time used for collection*/
+  int64 total_mutator_time;  /*total time used for executing application program*/
+
+  unsigned int obj_num_nos_alloc;
+  POINTER_SIZE_INT total_size_nos_alloc;
+  unsigned int obj_num_los_alloc;
+  POINTER_SIZE_INT total_size_los_alloc;
+
+  /*minor related info*/
+  unsigned int nos_surviving_obj_num_minor;
+  POINTER_SIZE_INT nos_surviving_obj_size_minor;
+  float nos_surviving_ration_minor;
+  int nos_collection_algo_minor;
+
+  /*major related info*/
+  unsigned int nos_mos_suviving_obj_num_major;
+  POINTER_SIZE_INT nos_mos_suviving_obj_size_major;
+  float nos_mos_suviving_ratio_major;
+  int nos_mos_collection_algo_major;
+
+  /*los related info when minor or major*/
+  Boolean is_los_collected; /*whether large obj space is collected or not*/
+  unsigned int los_suviving_obj_num;
+  POINTER_SIZE_INT los_suviving_obj_size;
+  float los_surviving_ration;
+  int los_collection_algo;
+
+}GC_Gen_Stats;
+
+inline void gc_gen_stats_set_nos_algo(GC_Gen* gc, int algo)
+{
+  gc->stats->nos_collection_algo_minor = algo;
+}
+
+inline void gc_gen_stats_set_mos_algo(GC_Gen* gc, int algo)
+{
+  gc->stats->nos_mos_collection_algo_major = algo;
+}
+
+inline void gc_gen_stats_set_los_algo(GC_Gen* gc, int algo)
+{
+  gc->stats->los_collection_algo = algo;
+}
+
+inline void gc_gen_stats_set_los_collected_flag(GC_Gen* gc, Boolean flag)
+{
+  gc->stats->is_los_collected = flag;
+}
+
+inline void gc_gen_update_nos_alloc_obj_stats(GC_Gen_Stats* stats, POINTER_SIZE_INT size)
+{
+  stats->total_size_nos_alloc += size;
+}
+
+inline void gc_gen_update_los_alloc_obj_stats(GC_Gen_Stats* stats, POINTER_SIZE_INT size)
+{
+  stats->obj_num_nos_alloc++;
+  stats->total_size_nos_alloc += size;
+}
+
+
+void gc_gen_stats_initialize(GC_Gen* gc);
+void gc_gen_stats_destruct(GC_Gen* gc);
+void gc_gen_stats_reset_before_collection(GC_Gen* gc);
+void gc_gen_stats_update_after_collection(GC_Gen* gc);
+void gc_gen_stats_verbose(GC_Gen* gc);
+
+typedef struct GC_Gen_Collector_Stats {
+  unsigned int process_rootset_ref_num; 
+
+  /*minor related info*/
+  unsigned int nos_obj_num_moved_minor;
+  POINTER_SIZE_INT nos_obj_size_moved_minor;
+  unsigned int nos_obj_num_marked_minor;
+  unsigned int nonnos_obj_num_marked_minor;
+
+  /*major related info*/
+  unsigned int num_obj_marked_major;
+  unsigned int nos_mos_obj_num_moved_major;
+  POINTER_SIZE_INT nos_mos_obj_size_moved_major;
+  unsigned int los_obj_num_moved_major;
+  POINTER_SIZE_INT los_obj_size_moved_major;
+
+}GC_Gen_Collector_Stats;
+
+inline void gc_gen_collector_update_rootset_ref_num(GC_Gen_Collector_Stats* stats)
+{
+  stats->process_rootset_ref_num++;
+}
+
+inline void gc_gen_collector_update_moved_nos_obj_stats_minor(GC_Gen_Collector_Stats* stats, POINTER_SIZE_INT size)
+{
+  stats->nos_obj_num_moved_minor++;
+  stats->nos_obj_size_moved_minor += size;
+}
+
+inline void gc_gen_collector_update_marked_nos_obj_stats_minor(GC_Gen_Collector_Stats* stats)
+{
+  stats->nos_obj_num_marked_minor++;
+}
+
+inline void gc_gen_collector_update_marked_nonnos_obj_stats_minor(GC_Gen_Collector_Stats* stats)
+{
+  stats->nos_obj_num_marked_minor++;
+}
+
+inline void gc_gen_collector_update_marked_obj_stats_major(GC_Gen_Collector_Stats* stats)
+{
+  stats->num_obj_marked_major++;
+}
+
+inline void gc_gen_collector_update_moved_nos_mos_obj_stats_major(GC_Gen_Collector_Stats* stats, POINTER_SIZE_INT size)
+{
+  stats->nos_mos_obj_num_moved_major++;
+  stats->nos_mos_obj_size_moved_major += size;
+}
+
+inline void gc_gen_collector_update_moved_los_obj_stats_major(GC_Gen_Collector_Stats* stats, POINTER_SIZE_INT size)
+{
+  stats->los_obj_num_moved_major++;
+  stats->los_obj_size_moved_major += size;
+}
+
+void gc_gen_collector_stats_reset(GC_Gen* gc);
+void gc_gen_collector_stats_initialize(Collector* collector);
+void gc_gen_collector_stats_destruct(Collector* collector);
+void gc_gen_collector_stats_verbose_minor_collection(GC_Gen* gc);
+void gc_gen_collector_stats_verbose_major_collection(GC_Gen* gc);
+
+#endif

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/gen/gen_stats.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h?view=auto&rev=566918
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h Thu Aug 16 21:49:23 2007
@@ -0,0 +1,188 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#ifndef _SSPACE_ALLOC_H_
+#define _SSPACE_ALLOC_H_
+
+#include "sspace_chunk.h"
+
+
+extern POINTER_SIZE_INT cur_alloc_color;
+extern POINTER_SIZE_INT cur_mark_color;
+extern POINTER_SIZE_INT cur_alloc_mask;
+extern POINTER_SIZE_INT cur_mark_mask;
+
+
+inline Boolean slot_is_alloc_in_table(POINTER_SIZE_INT *table, unsigned int slot_index)
+{
+  unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
+  unsigned int word_index = color_bits_index / BITS_PER_WORD;
+  unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
+  
+  return (Boolean)(table[word_index] & (cur_alloc_color << index_in_word));
+}
+
+inline unsigned int composed_slot_index(unsigned int word_index, unsigned int index_in_word)
+{
+  unsigned int color_bits_index = word_index*BITS_PER_WORD + index_in_word;
+  return color_bits_index/COLOR_BITS_PER_OBJ;
+}
+
+inline unsigned int next_free_index_in_color_word(POINTER_SIZE_INT word, unsigned int index)
+{
+  while(index < BITS_PER_WORD){
+    if(!(word & (cur_alloc_color << index)))
+      return index;
+    index += COLOR_BITS_PER_OBJ;
+  }
+  return MAX_SLOT_INDEX;
+}
+
+inline unsigned int next_alloc_index_in_color_word(POINTER_SIZE_INT word, unsigned int index)
+{
+  while(index < BITS_PER_WORD){
+    if(word & (cur_alloc_color << index))
+      return index;
+    index += COLOR_BITS_PER_OBJ;
+  }
+  return MAX_SLOT_INDEX;
+}
+
+inline unsigned int next_free_slot_index_in_table(POINTER_SIZE_INT *table, unsigned int slot_index, unsigned int slot_num)
+{
+  assert(slot_is_alloc_in_table(table, slot_index));
+  ++slot_index;
+  
+  unsigned int max_word_index = ((slot_num-1) * COLOR_BITS_PER_OBJ) / BITS_PER_WORD;
+  
+  unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
+  unsigned int word_index = color_bits_index / BITS_PER_WORD;
+  unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
+  
+  for(; word_index <= max_word_index; ++word_index, index_in_word = 0){
+    if(table[word_index] == cur_alloc_mask)
+      continue;
+    index_in_word = next_free_index_in_color_word(table[word_index], index_in_word);
+    if(index_in_word != MAX_SLOT_INDEX){
+      assert(index_in_word < BITS_PER_WORD);
+      return composed_slot_index(word_index, index_in_word);
+    }
+  }
+  
+  return MAX_SLOT_INDEX;
+}
+
+/* Only used in sspace compaction after sweeping now */
+inline unsigned int next_alloc_slot_index_in_table(POINTER_SIZE_INT *table, unsigned int slot_index, unsigned int slot_num)
+{
+  unsigned int max_word_index = ((slot_num-1) * COLOR_BITS_PER_OBJ) / BITS_PER_WORD;
+  
+  unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
+  unsigned int word_index = color_bits_index / BITS_PER_WORD;
+  unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
+  
+  for(; word_index <= max_word_index; ++word_index, index_in_word = 0){
+    if(!table[word_index])
+      continue;
+    index_in_word = next_alloc_index_in_color_word(table[word_index], index_in_word);
+    if(index_in_word != MAX_SLOT_INDEX){
+      assert(index_in_word < BITS_PER_WORD);
+      return composed_slot_index(word_index, index_in_word);
+    }
+  }
+  
+  return MAX_SLOT_INDEX;
+}
+
+inline Partial_Reveal_Object *next_alloc_slot_in_chunk(Chunk_Header *chunk)
+{
+  POINTER_SIZE_INT *table = chunk->table;
+  
+  unsigned int slot_index = next_alloc_slot_index_in_table(table, chunk->slot_index, chunk->slot_num);
+  assert((slot_index == MAX_SLOT_INDEX)
+            || (slot_index < chunk->slot_num) && slot_is_alloc_in_table(table, slot_index));
+  if(slot_index == MAX_SLOT_INDEX)
+    return NULL;
+  Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)slot_index_to_addr(chunk, slot_index);
+  chunk->slot_index = slot_index + 1;
+  return p_obj;
+}
+
+inline void clear_free_slot_in_table(POINTER_SIZE_INT *table, unsigned int ceiling_slot_index)
+{
+  assert(ceiling_slot_index && ceiling_slot_index != MAX_SLOT_INDEX);
+  unsigned int index_word_num = ceiling_slot_index / SLOT_NUM_PER_WORD_IN_TABLE;
+  memset(table, 0, BYTES_PER_WORD*index_word_num);
+  unsigned int bits_need_clear = ceiling_slot_index % SLOT_NUM_PER_WORD_IN_TABLE;
+  if(!bits_need_clear) return;
+  POINTER_SIZE_INT bit_mask = ~(((POINTER_SIZE_INT)1 << (bits_need_clear*COLOR_BITS_PER_OBJ)) - 1);
+  table[index_word_num] &= bit_mask;
+}
+
+inline void alloc_slot_in_table(POINTER_SIZE_INT *table, unsigned int slot_index)
+{
+  assert(!slot_is_alloc_in_table(table, slot_index));
+  
+  unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
+  unsigned int word_index = color_bits_index / BITS_PER_WORD;
+  unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
+  
+  table[word_index] |= cur_alloc_color << index_in_word;
+}
+
+/* We don't enable fresh chunk alloc for now,
+ * because we observed perf down for the extra conditional statement when no many fresh chunks.
+ */
+//#define ENABLE_FRESH_CHUNK_ALLOC
+
+/* 1. No need of synchronization. This is an allocator local chunk.
+ * 2. If this chunk runs out of space, clear the chunk pointer.
+ *    So it is important to give a parameter which is a local chunk pointer of a allocator while invoking this func.
+ */
+inline void *alloc_in_chunk(Chunk_Header* &chunk)
+{
+  POINTER_SIZE_INT *table = chunk->table;
+  unsigned int slot_index = chunk->slot_index;
+  
+  void *p_obj = (void*)((POINTER_SIZE_INT)chunk->base + ((POINTER_SIZE_INT)chunk->slot_size * slot_index));
+  alloc_slot_in_table(table, slot_index);
+  if(chunk->status & CHUNK_NEED_ZEROING)
+    memset(p_obj, 0, chunk->slot_size);
+#ifdef SSPACE_VERIFY
+  sspace_verify_free_area((POINTER_SIZE_INT*)p_obj, chunk->slot_size);
+#endif
+
+#ifdef ENABLE_FRESH_CHUNK_ALLOC
+  if(chunk->status & CHUNK_FRESH){
+    ++slot_index;
+    chunk->slot_index = (slot_index < chunk->slot_num) ? slot_index : MAX_SLOT_INDEX;
+  } else
+#endif
+    chunk->slot_index = next_free_slot_index_in_table(table, slot_index, chunk->slot_num);
+  if(chunk->slot_index == MAX_SLOT_INDEX){
+    chunk->status = CHUNK_USED | CHUNK_NORMAL;
+    chunk = NULL;
+  }
+  
+  assert(!chunk || chunk->slot_index < chunk->slot_num);
+  return p_obj;
+}
+
+inline void set_super_obj_mask(void *large_obj)
+{ ((Partial_Reveal_Object*)large_obj)->obj_info |= SUPER_OBJ_MASK; }
+
+#endif // _SSPACE_ALLOC_H_

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_alloc.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp?view=auto&rev=566918
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp Thu Aug 16 21:49:23 2007
@@ -0,0 +1,335 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "sspace_chunk.h"
+#include "sspace_alloc.h"
+#include "sspace_mark_sweep.h"
+#include "sspace_verify.h"
+#include "../common/fix_repointed_refs.h"
+
+
+#define PFC_SORT_NUM  8
+
+static Chunk_Header_Basic *volatile next_chunk_for_fixing;
+
+void sspace_decide_compaction_need(Sspace *sspace)
+{
+  POINTER_SIZE_INT free_mem_size = free_mem_in_sspace(sspace, FALSE);
+  float free_mem_ratio = (float)free_mem_size / sspace->committed_heap_size;
+  if((free_mem_ratio > SSPACE_COMPACT_RATIO) && (sspace->gc->cause != GC_CAUSE_RUNTIME_FORCE_GC)){
+    sspace->need_compact = sspace->move_object = TRUE;
+  } else {
+    sspace->need_compact = sspace->move_object = FALSE;
+  }
+}
+
+static inline void sorted_chunk_bucket_add_entry(Chunk_Header **head, Chunk_Header **tail, Chunk_Header *chunk)
+{
+  chunk->adj_prev = NULL; /* Field adj_prev is used as prev */
+  
+  if(!*head){
+    assert(!*tail);
+    chunk->next = NULL;
+    *head = *tail = chunk;
+    return;
+  }
+  
+  assert(*tail);
+  chunk->next = *head;
+  (*head)->adj_prev = (Chunk_Header_Basic*)chunk;
+  *head = chunk;
+}
+
+/* One assumption: pfc_pool is not empty */
+static Boolean pfc_pool_roughly_sort(Pool *pfc_pool, Chunk_Header **least_free_chunk, Chunk_Header **most_free_chunk)
+{
+  Chunk_Header *bucket_head[PFC_SORT_NUM];  /* Sorted chunk buckets' heads */
+  Chunk_Header *bucket_tail[PFC_SORT_NUM];  /* Sorted chunk buckets' tails */
+  unsigned int slot_num;
+  unsigned int chunk_num = 0;
+  unsigned int slot_alloc_num = 0;
+  
+  /* Init buckets' heads and tails */
+  memset(bucket_head, 0, sizeof(Chunk_Header*) * PFC_SORT_NUM);
+  memset(bucket_tail, 0, sizeof(Chunk_Header*) * PFC_SORT_NUM);
+  
+  /* Roughly sort chunks in pfc_pool */
+  pool_iterator_init(pfc_pool);
+  Chunk_Header *chunk = (Chunk_Header*)pool_iterator_next(pfc_pool);
+  if(chunk) slot_num = chunk->slot_num;
+  while(chunk){
+    ++chunk_num;
+    assert(chunk->alloc_num);
+    slot_alloc_num += chunk->alloc_num;
+    Chunk_Header *next_chunk = chunk->next;
+    unsigned int bucket_index = (chunk->alloc_num*PFC_SORT_NUM-1) / slot_num;
+    assert(bucket_index < PFC_SORT_NUM);
+    sorted_chunk_bucket_add_entry(&bucket_head[bucket_index], &bucket_tail[bucket_index], chunk);
+    chunk = next_chunk;
+  }
+  
+  /* Empty the pfc pool because some chunks in this pool will be free after compaction */
+  pool_empty(pfc_pool);
+  
+  /* If we can't get a free chunk after compaction, there is no need to compact.
+   * This condition includes that the chunk num in pfc pool is equal to 1, in which case there is also no need to compact
+   */
+  if(slot_num*(chunk_num-1) <= slot_alloc_num){
+    for(unsigned int i = 0; i < PFC_SORT_NUM; i++){
+      Chunk_Header *chunk = bucket_head[i];
+      while(chunk){
+        Chunk_Header *next_chunk = chunk->next;
+        pool_put_entry(pfc_pool, chunk);
+        chunk = next_chunk;
+      }
+    }
+    return FALSE;
+  }
+  
+  /* Link the sorted chunk buckets into one single ordered bidirectional list */
+  Chunk_Header *head = NULL;
+  Chunk_Header *tail = NULL;
+  for(unsigned int i = PFC_SORT_NUM; i--;){
+    assert((head && tail) || (!head && !tail));
+    assert((bucket_head[i] && bucket_tail[i]) || (!bucket_head[i] && !bucket_tail[i]));
+    if(!bucket_head[i]) continue;
+    if(!tail){
+      head = bucket_head[i];
+      tail = bucket_tail[i];
+    } else {
+      tail->next = bucket_head[i];
+      bucket_head[i]->adj_prev = (Chunk_Header_Basic*)tail;
+      tail = bucket_tail[i];
+    }
+  }
+  
+  assert(head && tail);
+  *least_free_chunk = head;
+  *most_free_chunk = tail;
+  
+  return TRUE;
+}
+
+static inline Chunk_Header *get_least_free_chunk(Chunk_Header **least_free_chunk, Chunk_Header **most_free_chunk)
+{
+  if(!*least_free_chunk){
+    assert(!*most_free_chunk);
+    return NULL;
+  }
+  Chunk_Header *result = *least_free_chunk;
+  *least_free_chunk = (*least_free_chunk)->next;
+  if(*least_free_chunk)
+    (*least_free_chunk)->adj_prev = NULL;
+  else
+    *most_free_chunk = NULL;
+  return result;
+}
+static inline Chunk_Header *get_most_free_chunk(Chunk_Header **least_free_chunk, Chunk_Header **most_free_chunk)
+{
+  if(!*most_free_chunk){
+    assert(!*least_free_chunk);
+    return NULL;
+  }
+  Chunk_Header *result = *most_free_chunk;
+  *most_free_chunk = (Chunk_Header*)(*most_free_chunk)->adj_prev;
+  if(*most_free_chunk)
+    (*most_free_chunk)->next = NULL;
+  else
+    *least_free_chunk = NULL;
+  return result;
+}
+
+static inline void move_obj_between_chunks(Chunk_Header **dest_ptr, Chunk_Header *src)
+{
+  Chunk_Header *dest = *dest_ptr;
+  assert(dest->slot_size == src->slot_size);
+  
+  unsigned int slot_size = dest->slot_size;
+  unsigned int alloc_num = src->alloc_num;
+  assert(alloc_num);
+  
+  while(alloc_num && dest){
+    Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(src);
+    void *target = alloc_in_chunk(dest);
+    assert(p_obj && target);
+    memcpy(target, p_obj, slot_size);
+#ifdef SSPACE_VERIFY
+    sspace_modify_mark_in_compact(target, p_obj, slot_size);
+#endif
+    obj_set_fw_in_oi(p_obj, target);
+    --alloc_num;
+  }
+  
+  /* dest might be set to NULL, so we use *dest_ptr here */
+  (*dest_ptr)->alloc_num += src->alloc_num - alloc_num;
+  assert((*dest_ptr)->alloc_num <= (*dest_ptr)->slot_num);
+  src->alloc_num = alloc_num;
+  if(!dest){
+    assert((*dest_ptr)->alloc_num == (*dest_ptr)->slot_num);
+    *dest_ptr = NULL;
+    clear_free_slot_in_table(src->table, src->slot_index);
+  }
+}
+
+static void sspace_move_objects(Collector *collector, Sspace *sspace)
+{
+  Chunk_Header *least_free_chunk, *most_free_chunk;
+  Pool *pfc_pool = sspace_grab_next_pfc_pool(sspace);
+  
+  for(; pfc_pool; pfc_pool = sspace_grab_next_pfc_pool(sspace)){
+    if(pool_is_empty(pfc_pool)) continue;
+    Boolean pfc_pool_need_compact = pfc_pool_roughly_sort(pfc_pool, &least_free_chunk, &most_free_chunk);
+    if(!pfc_pool_need_compact) continue;
+    
+    Chunk_Header *dest = get_least_free_chunk(&least_free_chunk, &most_free_chunk);
+    Chunk_Header *src = get_most_free_chunk(&least_free_chunk, &most_free_chunk);
+    Boolean src_is_new = TRUE;
+    while(dest && src){
+      if(src_is_new)
+        src->slot_index = 0;
+      chunk_depad_last_index_word(src);
+      move_obj_between_chunks(&dest, src);
+      if(!dest)
+        dest = get_least_free_chunk(&least_free_chunk, &most_free_chunk);
+      if(!src->alloc_num){
+        collector_add_free_chunk(collector, (Free_Chunk*)src);
+        src = get_most_free_chunk(&least_free_chunk, &most_free_chunk);
+        src_is_new = TRUE;
+      } else {
+        src_is_new = FALSE;
+      }
+    }
+    
+    /* Rebuild the pfc_pool */
+    if(dest)
+      sspace_put_pfc(sspace, dest);
+    if(src){
+      chunk_pad_last_index_word(src, cur_alloc_mask);
+      pfc_reset_slot_index(src);
+      sspace_put_pfc(sspace, src);
+    }
+  }
+}
+
+static void sspace_init_chunk_for_ref_fixing(Sspace *sspace)
+{
+  next_chunk_for_fixing = (Chunk_Header_Basic*)space_heap_start((Space*)sspace);
+  next_chunk_for_fixing->adj_prev = NULL;
+}
+
+static void normal_chunk_fix_repointed_refs(Chunk_Header *chunk)
+{
+  /* Init field slot_index and depad the last index word in table for fixing */
+  chunk->slot_index = 0;
+  chunk_depad_last_index_word(chunk);
+  
+  unsigned int alloc_num = chunk->alloc_num;
+  assert(alloc_num);
+  
+  /* After compaction, many chunks are filled with objects.
+   * For these chunks, we needn't find the allocated slot one by one by calling next_alloc_slot_in_chunk.
+   * That is a little time consuming.
+   * We'd like to fix those objects by incrementing their addr to find the next.
+   */
+  if(alloc_num == chunk->slot_num){  /* Filled with objects */
+    unsigned int slot_size = chunk->slot_size;
+    Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)slot_index_to_addr(chunk, 0);
+    for(unsigned int i = alloc_num; i--;){
+      object_fix_ref_slots(p_obj);
+#ifdef SSPACE_VERIFY
+      sspace_verify_fix_in_compact();
+#endif
+      p_obj = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_obj + slot_size);
+    }
+  } else {  /* Chunk is not full */
+    while(alloc_num){
+      Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(chunk);
+      assert(p_obj);
+      object_fix_ref_slots(p_obj);
+#ifdef SSPACE_VERIFY
+      sspace_verify_fix_in_compact();
+#endif
+      --alloc_num;
+    }
+  }
+  
+  if(chunk->alloc_num != chunk->slot_num){
+    chunk_pad_last_index_word(chunk, cur_alloc_mask);
+    pfc_reset_slot_index(chunk);
+  }
+}
+
+static void abnormal_chunk_fix_repointed_refs(Chunk_Header *chunk)
+{
+  object_fix_ref_slots((Partial_Reveal_Object*)chunk->base);
+#ifdef SSPACE_VERIFY
+  sspace_verify_fix_in_compact();
+#endif
+}
+
+static void sspace_fix_repointed_refs(Collector *collector, Sspace *sspace)
+{
+  Chunk_Header_Basic *chunk = sspace_grab_next_chunk(sspace, &next_chunk_for_fixing, TRUE);
+  
+  while(chunk){
+    if(chunk->status & CHUNK_NORMAL)
+      normal_chunk_fix_repointed_refs((Chunk_Header*)chunk);
+    else if(chunk->status & CHUNK_ABNORMAL)
+      abnormal_chunk_fix_repointed_refs((Chunk_Header*)chunk);
+    
+    chunk = sspace_grab_next_chunk(sspace, &next_chunk_for_fixing, TRUE);
+  }
+}
+
+static volatile unsigned int num_moving_collectors = 0;
+static volatile unsigned int num_fixing_collectors = 0;
+
+void compact_sspace(Collector *collector, Sspace *sspace)
+{
+  GC *gc = collector->gc;
+  
+  unsigned int num_active_collectors = gc->num_active_collectors;
+  
+  /* Pass 1: **************************************************
+     move live objects between pfcs with the same size *****************/
+  atomic_cas32(&num_moving_collectors, 0, num_active_collectors+1);
+  
+  sspace_move_objects(collector, sspace);
+  
+  unsigned int old_num = atomic_inc32(&num_moving_collectors);
+  if( ++old_num == num_active_collectors ){
+    /* last collector's world here */
+#ifdef SSPACE_TIME
+    sspace_compact_time(FALSE);
+#endif
+    sspace_init_chunk_for_ref_fixing(sspace);
+    /* let other collectors go */
+    num_moving_collectors++;
+  }
+  while(num_moving_collectors != num_active_collectors + 1);
+  
+  /* Pass 2: **************************************************
+     sweep dead objects ***************************************/
+  atomic_cas32( &num_fixing_collectors, 0, num_active_collectors);
+  
+  sspace_fix_repointed_refs(collector, sspace);
+  
+  atomic_inc32(&num_fixing_collectors);
+  while(num_fixing_collectors != num_active_collectors);
+  
+}
+

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_compact.cpp
------------------------------------------------------------------------------
    svn:eol-style = native