You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by xl...@apache.org on 2007/12/26 11:17:15 UTC

svn commit: r606876 [3/6] - in /harmony/enhanced/drlvm/trunk/vm/gc_gen/src: common/ finalizer_weakref/ gen/ jni/ los/ mark_compact/ mark_sweep/ semi_space/ thread/ trace_forward/ utils/ verify/

Modified: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_concurrent.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_concurrent.cpp?rev=606876&r1=606875&r2=606876&view=diff
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_concurrent.cpp (original)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/sspace_mark_concurrent.cpp Wed Dec 26 02:17:10 2007
@@ -1,235 +1,2 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
 
-#include "sspace_mark_sweep.h"
-#include "../finalizer_weakref/finalizer_weakref.h"
-#include "../thread/marker.h"
-
-Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj);
-
-static FORCE_INLINE void scan_slot(Collector* marker, REF *p_ref)
-{
-  Partial_Reveal_Object *p_obj = read_slot(p_ref);
-  if( p_obj == NULL) return;
-  
-  assert(address_belongs_to_gc_heap(p_obj, marker->gc));
-  if(obj_mark_gray_in_table(p_obj)){
-    assert(p_obj);
-    collector_tracestack_push((Collector*)marker, p_obj);
-  }  
-}
-
-static FORCE_INLINE void scan_object(Marker* marker, Partial_Reveal_Object *p_obj)
-{
-  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
-
-  if(obj_is_dirty_in_table(p_obj)){ 
-    assert(obj_is_mark_black_in_table(p_obj));
-    return;
-  }
-
-  if(!object_has_ref_field(p_obj)) return;
-
-  REF *p_ref;
-  
-  if(object_is_array(p_obj)){   /* scan array object */
-    Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj;
-    unsigned int array_length = array->array_len;
-    
-    p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
-    for (unsigned int i = 0; i < array_length; i++)
-      scan_slot((Collector*)marker, p_ref+i);
-    
-    return;
-  }
-  
-  /* scan non-array object */
-  unsigned int num_refs = object_ref_field_num(p_obj);
-  int *ref_iterator = object_ref_iterator_init(p_obj);
-  
-  for(unsigned int i=0; i<num_refs; i++){
-    p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);
-    scan_slot((Collector*)marker, p_ref);
-  }
-
-#ifndef BUILD_IN_REFERENT
-  scan_weak_reference((Collector*)marker, p_obj, scan_slot);
-#endif
-
-}
-
-static void trace_object(Marker* marker, Partial_Reveal_Object *p_obj)
-{
-  scan_object(marker, p_obj);
-  obj_mark_black_in_table(p_obj);
-  
-  Vector_Block *trace_stack = marker->trace_stack;
-  while(!vector_stack_is_empty(trace_stack)){
-    p_obj = (Partial_Reveal_Object*)vector_stack_pop(trace_stack);
-    scan_object(marker, p_obj);    
-    obj_mark_black_in_table(p_obj);
-    trace_stack = marker->trace_stack;
-  }
-}
-
-static Boolean concurrent_mark_need_terminating(GC* gc)
-{
-  GC_Metadata *metadata = gc->metadata;
-  return gc_local_snapshot_is_empty(gc) && pool_is_empty(metadata->dirty_obj_snaptshot_pool);
-}
-
-/* for marking phase termination detection */
-static volatile unsigned int num_active_markers = 0;
-
-void sspace_mark_scan_concurrent(Marker* marker)
-{
-  int64 time_mark_start = time_now();
-  GC *gc = marker->gc;
-  GC_Metadata *metadata = gc->metadata;
-  
-  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
-  atomic_inc32(&num_active_markers);
-  
-  marker->trace_stack = free_task_pool_get_entry(metadata);
-  
-  Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);
-  
-  /* first step: copy all root objects to mark tasks.
-     FIXME:: can be done sequentially before coming here to eliminate atomic ops */
-  while(root_set){
-    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
-    while(!vector_block_iterator_end(root_set,iter)){
-      REF *p_ref = (REF *)*iter;
-      iter = vector_block_iterator_advance(root_set,iter);
-      
-      Partial_Reveal_Object *p_obj = read_slot(p_ref);
-      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
-      assert(p_obj!=NULL);
-      /* we have to mark the object before putting it into marktask, because
-         it is possible to have two slots containing a same object. They will
-         be scanned twice and their ref slots will be recorded twice. Problem
-         occurs after the ref slot is updated first time with new position
-         and the second time the value is the ref slot is the old position as expected.
-         This can be worked around if we want.
-      */
-      assert(address_belongs_to_gc_heap(p_obj, gc));
-      if(obj_mark_gray_in_table(p_obj))
-        collector_tracestack_push((Collector*)marker, p_obj);
-    }
-    root_set = pool_iterator_next(metadata->gc_rootset_pool);
-  }
-  /* put back the last trace_stack task */
-  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);
-  
-  marker_notify_mark_root_done(marker);
-
-  /*second step: mark dirty object snapshot pool*/
-  marker->trace_stack = free_task_pool_get_entry(metadata);
-
-retry:
-
-  Vector_Block* snapshot_set = pool_get_entry(metadata->dirty_obj_snaptshot_pool);
-
-  while(snapshot_set){
-    POINTER_SIZE_INT* iter = vector_block_iterator_init(snapshot_set);
-    while(!vector_block_iterator_end(snapshot_set,iter)){
-      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
-      iter = vector_block_iterator_advance(snapshot_set,iter);
-
-      assert(p_obj!=NULL); //ynhe, restrict?
-      if(obj_mark_gray_in_table(p_obj))
-        collector_tracestack_push((Collector*)marker, p_obj);
-    } 
-    vector_block_clear(snapshot_set);
-    pool_put_entry(metadata->free_set_pool, snapshot_set);
-    snapshot_set = pool_get_entry(metadata->dirty_obj_snaptshot_pool);
-  }
-
-    /* put back the last trace_stack task */    
-  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);  
-
-  /* third step: iterate over the mark tasks and scan objects */
-  /* get a task buf for the mark stack */
-  marker->trace_stack = free_task_pool_get_entry(metadata);
-
-  
-  Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
-  
-  while(mark_task){
-    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
-    while(!vector_block_iterator_end(mark_task,iter)){
-      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
-      iter = vector_block_iterator_advance(mark_task,iter);
-      trace_object(marker, p_obj);      
-    }
-    /* run out one task, put back to the pool and grab another task */
-    vector_stack_clear(mark_task);
-    pool_put_entry(metadata->free_task_pool, mark_task);
-    mark_task = pool_get_entry(metadata->mark_task_pool);
-  }
-  
-  /* termination condition:  
-           1.all thread finished current job.
-           2.local snapshot vectors are empty.
-           3.global snapshot pool is empty.
-    */
-  atomic_dec32(&num_active_markers);
-  while(num_active_markers != 0 || !concurrent_mark_need_terminating(gc)){
-    if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->dirty_obj_snaptshot_pool)){
-      atomic_inc32(&num_active_markers);
-      goto retry;
-    }else{
-      /*grab a block from mutator and begin tracing*/
-      POINTER_SIZE_INT thread_num = (POINTER_SIZE_INT)marker->thread_handle;
-      Vector_Block* local_snapshot_set = gc_get_local_snapshot(gc, (unsigned int)(thread_num + 1));      
-      /*1.  If local_snapshot_set has been set full bit, the block is full and will no longer be put into global snapshot pool; 
-                  so it should be checked again to see if there're remaining entrys unscanned in it. In this case, the 
-                  share bit in local_snapshot_set should not be clear, beacause of rescanning exclusively. 
-             2.  If local_snapshot_set has not been set full bit, the block is used by mutator and has the chance to be put into
-                  global snapshot pool. In this case, we simply cleared the share bit in local_snapshot_set. 
-           */
-      if(local_snapshot_set != NULL){
-        atomic_inc32(&num_active_markers);
-        while(!vector_block_is_empty(local_snapshot_set) || !vector_block_not_full_set_unshared(local_snapshot_set)){
-          Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) vector_block_get_entry(local_snapshot_set);        
-          if(obj_mark_gray_in_table(p_obj)) 
-            collector_tracestack_push((Collector*)marker, p_obj);
-        }
-        goto retry;
-      }
-    }
-  }
-  
-  /* put back the last mark stack to the free pool */
-  mark_task = (Vector_Block*)marker->trace_stack;
-  vector_stack_clear(mark_task);
-  pool_put_entry(metadata->free_task_pool, mark_task);
-  marker->trace_stack = NULL;
-  assert(pool_is_empty(metadata->dirty_obj_snaptshot_pool));
-
-  int64 time_mark = time_now() - time_mark_start;
-  marker->time_mark = time_mark;
-  
-  return;
-}
-
-void trace_obj_in_ms_concurrent_mark(Marker *marker, void *p_obj)
-{
-  obj_mark_gray_in_table((Partial_Reveal_Object*)p_obj);
-  trace_object(marker, (Partial_Reveal_Object *)p_obj);
-}
 

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,260 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "wspace.h"
+#include "wspace_chunk.h"
+#include "wspace_verify.h"
+#include "gc_ms.h"
+#include "../gen/gen.h"
+
+struct GC_Gen;
+
+Wspace *wspace_initialize(GC *gc, void *start, POINTER_SIZE_INT wspace_size, POINTER_SIZE_INT commit_size)
+{
+  /* With wspace in the heap, the heap must be composed of a single wspace or a wspace and a NOS.
+   * In either case, the reserved size and committed size of wspace must be the same.
+   * Because wspace has only mark-sweep collection, it is not possible to shrink wspace.
+   * So there is no need to use dynamic space resizing.
+   */
+  assert(wspace_size == commit_size);
+  
+  Wspace *wspace = (Wspace*)STD_MALLOC(sizeof(Wspace));
+  assert(wspace);
+  memset(wspace, 0, sizeof(Wspace));
+  
+  wspace->reserved_heap_size = wspace_size;
+  
+  void *reserved_base = start;
+  
+  /* commit wspace mem */
+  if(!large_page_hint)
+    vm_commit_mem(reserved_base, commit_size);
+  memset(reserved_base, 0, commit_size);
+  wspace->committed_heap_size = commit_size;
+  
+  wspace->heap_start = reserved_base;
+  wspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + wspace_size);
+    
+  wspace->num_collections = 0;
+  wspace->time_collections = 0;
+  wspace->survive_ratio = 0.2f;
+
+  wspace->move_object = FALSE;
+  wspace->gc = gc;
+  
+  wspace_init_chunks(wspace);
+
+  wspace->space_statistic = (Space_Statistics*)STD_MALLOC(sizeof(Space_Statistics));
+  assert(wspace->space_statistic);
+  memset(wspace->space_statistic, 0, sizeof(Space_Statistics));
+
+#ifdef USE_MARK_SWEEP_GC
+  gc_ms_set_wspace((GC_MS*)gc, wspace);
+#else
+  gc_set_mos((GC_Gen*)gc, (Space*)wspace);
+#endif
+
+#ifdef SSPACE_VERIFY
+  wspace_verify_init(gc);
+#endif
+  return wspace;
+}
+
+static void wspace_destruct_chunks(Wspace *wspace) { return; }
+
+void wspace_destruct(Wspace *wspace)
+{
+  //FIXME:: when map the to-half, the decommission start address should change
+  wspace_destruct_chunks(wspace);
+  STD_FREE(wspace);
+}
+
+void wspace_reset_after_collection(Wspace *wspace)
+{
+  wspace->move_object = FALSE;
+  wspace->need_compact = FALSE;
+  wspace->need_fix = FALSE;
+}
+
+void allocator_init_local_chunks(Allocator *allocator)
+{
+  Wspace *wspace = gc_get_wspace(allocator->gc);
+  Size_Segment **size_segs = wspace->size_segments;
+  
+  /* Alloc mem for size segments (Chunk_Header**) */
+  unsigned int seg_size = sizeof(Chunk_Header**) * SIZE_SEGMENT_NUM;
+  Chunk_Header ***local_chunks = (Chunk_Header***)STD_MALLOC(seg_size);
+  memset(local_chunks, 0, seg_size);
+  
+  /* Alloc mem for local chunk pointers */
+  unsigned int chunk_ptr_size = 0;
+  for(unsigned int i = SIZE_SEGMENT_NUM; i--;){
+    if(size_segs[i]->local_alloc){
+      chunk_ptr_size += size_segs[i]->chunk_num;
+    }
+  }
+  chunk_ptr_size *= sizeof(Chunk_Header*);
+  Chunk_Header **chunk_ptrs = (Chunk_Header**)STD_MALLOC(chunk_ptr_size);
+  memset(chunk_ptrs, 0, chunk_ptr_size);
+  
+  for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
+    if(size_segs[i]->local_alloc){
+      local_chunks[i] = chunk_ptrs;
+      chunk_ptrs += size_segs[i]->chunk_num;
+    }
+  }
+  
+  allocator->local_chunks = local_chunks;
+}
+
+void allocactor_destruct_local_chunks(Allocator *allocator)
+{
+  Wspace *wspace = gc_get_wspace(allocator->gc);
+  Size_Segment **size_segs = wspace->size_segments;
+  Chunk_Header ***local_chunks = allocator->local_chunks;
+  Chunk_Header **chunk_ptrs = NULL;
+  unsigned int chunk_ptr_num = 0;
+  
+  /* Find local chunk pointers' head and their number */
+  for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
+    if(size_segs[i]->local_alloc){
+      chunk_ptr_num += size_segs[i]->chunk_num;
+      assert(local_chunks[i]);
+      if(!chunk_ptrs)
+        chunk_ptrs = local_chunks[i];
+    }
+  }
+  
+  /* Put local pfc to the according pools */
+  for(unsigned int i = 0; i < chunk_ptr_num; ++i){
+    if(chunk_ptrs[i])
+      wspace_put_pfc(wspace, chunk_ptrs[i]);
+  }
+  
+  /* Free mem for local chunk pointers */
+  STD_FREE(chunk_ptrs);
+  
+  /* Free mem for size segments (Chunk_Header**) */
+  STD_FREE(local_chunks);
+}
+
+static void allocator_clear_local_chunks(Allocator *allocator)
+{
+  Wspace *wspace = gc_get_wspace(allocator->gc);
+  Size_Segment **size_segs = wspace->size_segments;
+  Chunk_Header ***local_chunks = allocator->local_chunks;
+  
+  for(unsigned int i = SIZE_SEGMENT_NUM; i--;){
+    if(!size_segs[i]->local_alloc){
+      assert(!local_chunks[i]);
+      continue;
+    }
+    Chunk_Header **chunks = local_chunks[i];
+    assert(chunks);
+    for(unsigned int j = size_segs[i]->chunk_num; j--;){
+      if(chunks[j])
+        wspace_put_pfc(wspace, chunks[j]);
+      chunks[j] = NULL;
+    }
+  }
+}
+
+static void gc_clear_mutator_local_chunks(GC *gc)
+{
+#ifdef USE_MARK_SWEEP_GC
+  /* release local chunks of each mutator in unique mark-sweep GC */
+  Mutator *mutator = gc->mutator_list;
+  while(mutator){
+    allocator_clear_local_chunks((Allocator*)mutator);
+    mutator = mutator->next;
+  }
+#endif
+}
+
+void gc_clear_collector_local_chunks(GC *gc)
+{
+  if(!gc_match_kind(gc, MAJOR_COLLECTION)) return;
+  /* release local chunks of each collector in gen GC */
+  for(unsigned int i = gc->num_collectors; i--;){
+    allocator_clear_local_chunks((Allocator*)gc->collectors[i]);
+  }
+}
+
+#ifdef USE_MARK_SWEEP_GC
+void wspace_set_space_statistic(Wspace *wspace)
+{
+  GC_MS *gc = (GC_MS*)wspace->gc;
+
+  for(unsigned int i = 0; i < gc->num_collectors; ++i){
+    wspace->surviving_obj_num += gc->collectors[i]->live_obj_num;
+    wspace->surviving_obj_size += gc->collectors[i]->live_obj_size;
+  }
+}
+#endif
+
+extern void wspace_decide_compaction_need(Wspace *wspace);
+extern void mark_sweep_wspace(Collector *collector);
+
+void wspace_collection(Wspace *wspace) 
+{
+  GC *gc = wspace->gc;
+  wspace->num_collections++;
+  
+  gc_clear_mutator_local_chunks(gc);
+  gc_clear_collector_local_chunks(gc);
+  
+#ifdef SSPACE_ALLOC_INFO
+  wspace_alloc_info_summary();
+#endif
+#ifdef SSPACE_CHUNK_INFO
+  wspace_chunks_info(wspace, FALSE);
+#endif
+  wspace_clear_used_chunk_pool(wspace);
+
+
+  wspace_decide_compaction_need(wspace);
+  if(wspace->need_compact && gc_match_kind(gc, MARK_SWEEP_GC)){
+    assert(gc_match_kind(gc, MS_COLLECTION));
+    gc->collect_kind = MS_COMPACT_COLLECTION;
+  }
+  if(wspace->need_compact || gc_match_kind(gc, MAJOR_COLLECTION))
+    wspace->need_fix = TRUE;
+
+  //printf("\n\n>>>>>>>>%s>>>>>>>>>>>>\n\n", wspace->need_compact ? "COMPACT" : "NO COMPACT");
+#ifdef SSPACE_VERIFY
+  wspace_verify_before_collection(gc);
+  wspace_verify_vtable_mark(gc);
+#endif
+
+#ifdef SSPACE_TIME
+  wspace_gc_time(gc, TRUE);
+#endif
+
+  pool_iterator_init(gc->metadata->gc_rootset_pool);
+  wspace_clear_chunk_list(wspace);
+  
+  collector_execute_task(gc, (TaskType)mark_sweep_wspace, (Space*)wspace);
+
+#ifdef SSPACE_TIME
+  wspace_gc_time(gc, FALSE);
+#endif
+
+#ifdef SSPACE_CHUNK_INFO
+  wspace_chunks_info(wspace, FALSE);
+#endif
+
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h Wed Dec 26 02:17:10 2007
@@ -0,0 +1,104 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#ifndef _SWEEP_SPACE_H_
+#define _SWEEP_SPACE_H_
+
+#include "../thread/gc_thread.h"
+#include "../thread/collector_alloc.h"
+#include "../thread/mutator.h"
+#include "../common/gc_common.h"
+#include "../common/gc_concurrent.h"
+
+/*
+ * The sweep space accomodates objects collected by mark-sweep
+ */
+
+struct Size_Segment;
+struct Free_Chunk_List;
+
+typedef struct Wspace {
+  /* <-- first couple of fields are overloadded as Space */
+  void *heap_start;
+  void *heap_end;
+  POINTER_SIZE_INT reserved_heap_size;
+  POINTER_SIZE_INT committed_heap_size;
+  unsigned int num_collections;
+  int64 time_collections;
+  float survive_ratio;
+  unsigned int collect_algorithm;
+  GC *gc;
+  Boolean move_object;
+
+  Space_Statistics* space_statistic;
+
+  /* Size allocted since last minor collection. */
+  volatile POINTER_SIZE_INT last_alloced_size;
+  /* Size allocted since last major collection. */
+  volatile POINTER_SIZE_INT accumu_alloced_size;
+  /* Total size allocated since VM starts. */
+  volatile POINTER_SIZE_INT total_alloced_size;
+
+  /* Size survived from last collection. */
+  POINTER_SIZE_INT last_surviving_size;
+  /* Size survived after a certain period. */
+  POINTER_SIZE_INT period_surviving_size;  
+
+  /* END of Space --> */
+  
+  Boolean need_compact;
+  Boolean need_fix;   /* There are repointed ref needing fixing */
+  Size_Segment **size_segments;
+  Pool ***pfc_pools;
+  Pool ***pfc_pools_backup;
+  Pool* used_chunk_pool;
+  Pool* unreusable_normal_chunk_pool;
+  Pool* live_abnormal_chunk_pool;
+  Free_Chunk_List *aligned_free_chunk_lists;
+  Free_Chunk_List *unaligned_free_chunk_lists;
+  Free_Chunk_List *hyper_free_chunk_list;
+  POINTER_SIZE_INT surviving_obj_num;
+  POINTER_SIZE_INT surviving_obj_size;
+} Wspace;
+
+#ifdef USE_MARK_SWEEP_GC
+void wspace_set_space_statistic(Wspace *wspace);
+#endif
+
+Wspace *wspace_initialize(GC *gc, void *start, POINTER_SIZE_INT wspace_size, POINTER_SIZE_INT commit_size);
+void wspace_destruct(Wspace *wspace);
+void wspace_reset_after_collection(Wspace *wspace);
+
+void *wspace_thread_local_alloc(unsigned size, Allocator *allocator);
+void *wspace_alloc(unsigned size, Allocator *allocator);
+
+void wspace_collection(Wspace *wspace);
+
+void allocator_init_local_chunks(Allocator *allocator);
+void allocactor_destruct_local_chunks(Allocator *allocator);
+void gc_init_collector_free_chunk_list(Collector *collector);
+
+POINTER_SIZE_INT wspace_free_memory_size(Wspace *wspace);
+
+
+#ifndef USE_MARK_SWEEP_GC
+#define gc_get_wspace(gc) ((Wspace*)gc_get_mos((GC_Gen*)(gc)))
+#else
+#define gc_get_wspace(gc) (gc_ms_get_wspace((GC_MS*)(gc)))
+#endif
+
+#endif // _SWEEP_SPACE_H_

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,281 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#include "wspace.h"
+#include "wspace_chunk.h"
+//#include "wspace_mark_sweep.h"
+#include "wspace_alloc.h"
+#include "gc_ms.h"
+#include "../gen/gen.h"
+#include "../common/gc_concurrent.h"
+
+
+/* Only used in pfc_set_slot_index() */
+inline unsigned int first_free_index_in_color_word(POINTER_SIZE_INT word, POINTER_SIZE_INT alloc_color)
+{
+  for(unsigned int index = 0; index < BITS_PER_WORD; index += COLOR_BITS_PER_OBJ)
+    if(!(word & (alloc_color << index)))
+      return index;
+  
+  assert(0);  /* There must be a free obj in this table word */
+  return MAX_SLOT_INDEX;
+}
+
+/* Given an index word in table, set pfc's slot_index
+ * The value of argument alloc_color can be cur_alloc_color or cur_mark_color.
+ * It depends on in which phase this func is called.
+ * In sweeping phase, wspace has been marked but alloc and mark colors have not been flipped,
+ * so we have to use cur_mark_color as alloc_color.
+ * In compaction phase, two colors have been flipped, so we use cur_alloc_color.
+ */
+void pfc_set_slot_index(Chunk_Header *chunk, unsigned int first_free_word_index, POINTER_SIZE_INT alloc_color)
+{
+  unsigned int index_in_word = first_free_index_in_color_word(chunk->table[first_free_word_index], alloc_color);
+  assert(index_in_word != MAX_SLOT_INDEX);
+  chunk->slot_index = composed_slot_index(first_free_word_index, index_in_word);
+}
+
+/* From the table's beginning search the first free slot, and set it to pfc's slot_index */
+void pfc_reset_slot_index(Chunk_Header *chunk)
+{
+  POINTER_SIZE_INT *table = chunk->table;
+  
+  unsigned int index_word_num = (chunk->slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
+  for(unsigned int i=0; i<index_word_num; ++i){
+    if(table[i] != cur_alloc_mask){
+      pfc_set_slot_index(chunk, i, cur_alloc_color);
+      return;
+    }
+  }
+}
+
+/* Alloc small without-fin object in wspace without getting new free chunk */
+void *wspace_thread_local_alloc(unsigned size, Allocator *allocator)
+{
+  if(size > LARGE_OBJ_THRESHOLD) return NULL;
+  
+  Wspace *wspace = gc_get_wspace(allocator->gc);
+  
+  /* Flexible alloc mechanism:
+  Size_Segment *size_seg = wspace_get_size_seg(wspace, size);
+  unsigned int seg_index = size_seg->seg_index;
+  */
+  unsigned int seg_index = (size-GC_OBJECT_ALIGNMENT) / MEDIUM_OBJ_THRESHOLD;
+  assert(seg_index <= 2);
+  Size_Segment *size_seg = wspace->size_segments[seg_index];
+  assert(size_seg->local_alloc);
+  
+  size = (unsigned int)NORMAL_SIZE_ROUNDUP(size, size_seg);
+  unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_seg);
+  
+  Chunk_Header **chunks = allocator->local_chunks[seg_index];
+  Chunk_Header *chunk = chunks[index];  
+  if(!chunk){
+    mutator_post_signal((Mutator*) allocator,DISABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS);
+    
+    chunk = wspace_get_pfc(wspace, seg_index, index);
+    //if(!chunk) chunk = wspace_steal_pfc(wspace, seg_index, index);
+    if(!chunk) return NULL;
+    chunk->status |= CHUNK_IN_USE;
+    chunks[index] = chunk;
+    
+    mutator_post_signal((Mutator*) allocator,ENABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS);
+  }
+  void *p_obj = alloc_in_chunk(chunks[index]);
+
+  if(chunk->slot_index == MAX_SLOT_INDEX){
+    chunk->status = CHUNK_USED | CHUNK_NORMAL;
+    /*register to used chunk list.*/
+    wspace_register_used_chunk(wspace,chunk);
+    chunks[index] = NULL;
+    chunk = NULL;
+  }
+  
+  assert(!chunk || chunk->slot_index <= chunk->alloc_num);
+  assert(!chunk || chunk->slot_index < chunk->slot_num);
+  assert(p_obj);
+
+#ifdef SSPACE_ALLOC_INFO
+  wspace_alloc_info(size);
+#endif
+#ifdef SSPACE_VERIFY
+  wspace_verify_alloc(p_obj, size);
+#endif
+
+  return p_obj;
+}
+
+static void *wspace_alloc_normal_obj(Wspace *wspace, unsigned size, Allocator *allocator)
+{
+  Size_Segment *size_seg = wspace_get_size_seg(wspace, size);
+  unsigned int seg_index = size_seg->seg_index;
+  
+  size = (unsigned int)NORMAL_SIZE_ROUNDUP(size, size_seg);
+  unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_seg);
+  
+  Chunk_Header *chunk = NULL;
+  void *p_obj = NULL;
+  
+  if(size_seg->local_alloc){
+    Chunk_Header **chunks = allocator->local_chunks[seg_index];
+    chunk = chunks[index];
+    if(!chunk){
+      mutator_post_signal((Mutator*) allocator,DISABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS);
+      chunk = wspace_get_pfc(wspace, seg_index, index);
+      if(!chunk){
+        chunk = (Chunk_Header*)wspace_get_normal_free_chunk(wspace);
+        if(chunk) normal_chunk_init(chunk, size);
+      }
+      //if(!chunk) chunk = wspace_steal_pfc(wspace, seg_index, index);
+      if(!chunk) return NULL;
+      chunk->status |= CHUNK_IN_USE;
+      chunks[index] = chunk;
+      mutator_post_signal((Mutator*) allocator,ENABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS);
+    }    
+    
+    p_obj = alloc_in_chunk(chunks[index]);
+    
+    if(chunk->slot_index == MAX_SLOT_INDEX){
+      chunk->status = CHUNK_USED | CHUNK_NORMAL;
+      /*register to used chunk list.*/
+      wspace_register_used_chunk(wspace,chunk);
+      chunks[index] = NULL;
+    }
+    
+  } else {
+    gc_try_schedule_collection(allocator->gc, GC_CAUSE_NIL);
+
+    mutator_post_signal((Mutator*) allocator,DISABLE_COLLECTOR_SWEEP_GLOBAL_CHUNKS);
+
+    if(USE_CONCURRENT_SWEEP){
+      while(gc_is_sweeping_global_normal_chunk()){
+        mutator_post_signal((Mutator*) allocator,ENABLE_COLLECTOR_SWEEP_GLOBAL_CHUNKS);
+      }  
+    }
+
+    chunk = wspace_get_pfc(wspace, seg_index, index);
+    if(!chunk){
+      chunk = (Chunk_Header*)wspace_get_normal_free_chunk(wspace);
+      if(chunk) normal_chunk_init(chunk, size);
+    }
+    //if(!chunk) chunk = wspace_steal_pfc(wspace, seg_index, index);
+    if(!chunk) return NULL;
+    p_obj = alloc_in_chunk(chunk);
+
+    if(chunk->slot_index == MAX_SLOT_INDEX){
+      chunk->status = CHUNK_USED | CHUNK_NORMAL;
+      /*register to used chunk list.*/
+      wspace_register_used_chunk(wspace,chunk);
+      chunk = NULL;
+    }
+    
+    if(chunk){
+      wspace_put_pfc(wspace, chunk);
+    }
+    
+    mutator_post_signal((Mutator*) allocator,ENABLE_COLLECTOR_SWEEP_GLOBAL_CHUNKS);
+  }
+  
+  return p_obj;
+}
+
+static void *wspace_alloc_super_obj(Wspace *wspace, unsigned size, Allocator *allocator)
+{
+  assert(size > SUPER_OBJ_THRESHOLD);
+
+  gc_try_schedule_collection(allocator->gc, GC_CAUSE_NIL);
+
+  unsigned int chunk_size = SUPER_SIZE_ROUNDUP(size);
+  assert(chunk_size > SUPER_OBJ_THRESHOLD);
+  assert(!(chunk_size & CHUNK_GRANULARITY_LOW_MASK));
+  
+  Chunk_Header *chunk;
+  if(chunk_size <= HYPER_OBJ_THRESHOLD)
+    chunk = (Chunk_Header*)wspace_get_abnormal_free_chunk(wspace, chunk_size);
+  else
+    chunk = (Chunk_Header*)wspace_get_hyper_free_chunk(wspace, chunk_size, FALSE);
+  
+  if(!chunk) return NULL;
+  abnormal_chunk_init(chunk, chunk_size, size);
+  if(is_obj_alloced_live()){
+    chunk->table[0] |= cur_mark_black_color  ;
+  } 
+  //FIXME: Need barrier here.   
+  //apr_memory_rw_barrier();
+  
+  chunk->table[0] |= cur_alloc_color;
+  set_super_obj_mask(chunk->base);
+  assert(chunk->status == CHUNK_ABNORMAL);
+  chunk->status = CHUNK_ABNORMAL| CHUNK_USED;
+  wspace_register_used_chunk(wspace, chunk);
+  assert(get_obj_info_raw((Partial_Reveal_Object*)chunk->base) & SUPER_OBJ_MASK);
+  return chunk->base;
+}
+
+static void *wspace_try_alloc(unsigned size, Allocator *allocator)
+{
+  Wspace *wspace = gc_get_wspace(allocator->gc);
+  void *p_obj = NULL;
+  
+  if(size <= SUPER_OBJ_THRESHOLD)
+    p_obj = wspace_alloc_normal_obj(wspace, size, allocator);
+  else
+    p_obj = wspace_alloc_super_obj(wspace, size, allocator);
+
+#ifdef SSPACE_ALLOC_INFO
+  if(p_obj) wspace_alloc_info(size);
+#endif
+#ifdef SSPACE_VERIFY
+  if(p_obj) wspace_verify_alloc(p_obj, size);
+#endif
+
+#ifdef WSPACE_CONCURRENT_GC_STATS
+  if(p_obj && gc_is_concurrent_mark_phase()) ((Partial_Reveal_Object*)p_obj)->obj_info |= NEW_OBJ_MASK;
+#endif
+
+  return p_obj;
+}
+
+/* FIXME:: the collection should be seperated from the alloation */
+void *wspace_alloc(unsigned size, Allocator *allocator)
+{
+  void *p_obj = NULL;
+  
+  /* First, try to allocate object from TLB (thread local chunk) */
+  p_obj = wspace_try_alloc(size, allocator);
+  if(p_obj)  return p_obj;
+  
+  if(allocator->gc->in_collection) return NULL;
+  
+  vm_gc_lock_enum();
+  /* after holding lock, try if other thread collected already */
+  p_obj = wspace_try_alloc(size, allocator);
+  if(p_obj){
+    vm_gc_unlock_enum();
+    return p_obj;
+  }
+  gc_reclaim_heap(allocator->gc, GC_CAUSE_WSPACE_IS_FULL);
+  vm_gc_unlock_enum();
+
+#ifdef SSPACE_CHUNK_INFO
+  printf("Failure size: %x\n", size);
+#endif
+
+  p_obj = wspace_try_alloc(size, allocator);
+  
+  return p_obj;
+}

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.cpp
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h Wed Dec 26 02:17:10 2007
@@ -0,0 +1,216 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+#ifndef _SSPACE_ALLOC_H_
+#define _SSPACE_ALLOC_H_
+
+#include "wspace_chunk.h"
+#include "wspace_mark_sweep.h"
+#include "../common/gc_concurrent.h"
+#include "../common/collection_scheduler.h"
+
+extern POINTER_SIZE_INT cur_alloc_color;
+extern POINTER_SIZE_INT cur_alloc_mask;
+extern POINTER_SIZE_INT cur_mark_mask;
+
+
+inline Boolean slot_is_alloc_in_table(POINTER_SIZE_INT *table, unsigned int slot_index)
+{
+  unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
+  unsigned int word_index = color_bits_index / BITS_PER_WORD;
+  unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
+  
+  return (Boolean)(table[word_index] & (cur_alloc_color << index_in_word));
+}
+
+inline unsigned int composed_slot_index(unsigned int word_index, unsigned int index_in_word)
+{
+  unsigned int color_bits_index = word_index*BITS_PER_WORD + index_in_word;
+  return color_bits_index/COLOR_BITS_PER_OBJ;
+}
+
+inline unsigned int next_free_index_in_color_word(POINTER_SIZE_INT word, unsigned int index)
+{
+  while(index < BITS_PER_WORD){
+    if(!(word & (cur_alloc_color << index)))
+      return index;
+    index += COLOR_BITS_PER_OBJ;
+  }
+  return MAX_SLOT_INDEX;
+}
+
+inline unsigned int next_alloc_index_in_color_word(POINTER_SIZE_INT word, unsigned int index)
+{
+  while(index < BITS_PER_WORD){
+    if(word & (cur_alloc_color << index))
+      return index;
+    index += COLOR_BITS_PER_OBJ;
+  }
+  return MAX_SLOT_INDEX;
+}
+
+inline unsigned int next_free_slot_index_in_table(POINTER_SIZE_INT *table, unsigned int slot_index, unsigned int slot_num)
+{
+  assert(slot_is_alloc_in_table(table, slot_index));
+  ++slot_index;
+  
+  unsigned int max_word_index = ((slot_num-1) * COLOR_BITS_PER_OBJ) / BITS_PER_WORD;
+  
+  unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
+  unsigned int word_index = color_bits_index / BITS_PER_WORD;
+  unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
+  
+  for(; word_index <= max_word_index; ++word_index, index_in_word = 0){
+    if((table[word_index] & cur_alloc_mask) == cur_alloc_mask)
+      continue;
+    index_in_word = next_free_index_in_color_word(table[word_index], index_in_word);
+    if(index_in_word != MAX_SLOT_INDEX){
+      assert(index_in_word < BITS_PER_WORD);
+      return composed_slot_index(word_index, index_in_word);
+    }
+  }
+  
+  return MAX_SLOT_INDEX;
+}
+
+/* Only used in wspace compaction after sweeping now */
+inline unsigned int next_alloc_slot_index_in_table(POINTER_SIZE_INT *table, unsigned int slot_index, unsigned int slot_num)
+{
+  unsigned int max_word_index = ((slot_num-1) * COLOR_BITS_PER_OBJ) / BITS_PER_WORD;
+  
+  unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
+  unsigned int word_index = color_bits_index / BITS_PER_WORD;
+  unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
+  
+  for(; word_index <= max_word_index; ++word_index, index_in_word = 0){
+    if(!table[word_index])
+      continue;
+    index_in_word = next_alloc_index_in_color_word(table[word_index], index_in_word);
+    if(index_in_word != MAX_SLOT_INDEX){
+      assert(index_in_word < BITS_PER_WORD);
+      return composed_slot_index(word_index, index_in_word);
+    }
+  }
+  
+  return MAX_SLOT_INDEX;
+}
+
+inline Partial_Reveal_Object *next_alloc_slot_in_chunk(Chunk_Header *chunk)
+{
+  POINTER_SIZE_INT *table = chunk->table;
+  
+  unsigned int slot_index = next_alloc_slot_index_in_table(table, chunk->slot_index, chunk->slot_num);
+  assert((slot_index == MAX_SLOT_INDEX)
+            || (slot_index < chunk->slot_num) && slot_is_alloc_in_table(table, slot_index));
+  if(slot_index == MAX_SLOT_INDEX)
+    return NULL;
+  Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)slot_index_to_addr(chunk, slot_index);
+  chunk->slot_index = slot_index + 1;
+  return p_obj;
+}
+
+inline void clear_free_slot_in_table(POINTER_SIZE_INT *table, unsigned int ceiling_slot_index)
+{
+  assert(ceiling_slot_index && ceiling_slot_index != MAX_SLOT_INDEX);
+  unsigned int index_word_num = ceiling_slot_index / SLOT_NUM_PER_WORD_IN_TABLE;
+  memset(table, 0, BYTES_PER_WORD*index_word_num);
+  unsigned int bits_need_clear = ceiling_slot_index % SLOT_NUM_PER_WORD_IN_TABLE;
+  if(!bits_need_clear) return;
+  POINTER_SIZE_INT bit_mask = ~(((POINTER_SIZE_INT)1 << (bits_need_clear*COLOR_BITS_PER_OBJ)) - 1);
+  table[index_word_num] &= bit_mask;
+}
+
+inline void alloc_slot_in_table(POINTER_SIZE_INT *table, unsigned int slot_index)
+{
+  assert(!slot_is_alloc_in_table(table, slot_index));
+  
+  unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
+  unsigned int word_index = color_bits_index / BITS_PER_WORD;
+  unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
+    
+  volatile POINTER_SIZE_INT *p_color_word = &table[word_index];
+  assert(p_color_word);
+  
+  POINTER_SIZE_INT mark_alloc_color = cur_alloc_color << index_in_word;
+  
+  POINTER_SIZE_INT old_word = *p_color_word;  
+  
+  POINTER_SIZE_INT new_word = old_word | mark_alloc_color;
+  while(new_word != old_word) {
+    POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+    if(temp == old_word){
+      return; /*returning true does not mean it's marked by this thread. */
+    }
+    old_word = *p_color_word;
+    assert(!slot_is_alloc_in_table(table, slot_index));
+    
+    new_word = old_word | mark_alloc_color;
+  }
+  return;
+  
+}
+
+/* We don't enable fresh chunk alloc for now,
+ * because we observed perf down for the extra conditional statement when no many fresh chunks.
+ */
+//#define ENABLE_FRESH_CHUNK_ALLOC
+
+/* 1. No need of synchronization. This is an allocator local chunk.
+ * 2. If this chunk runs out of space, clear the chunk pointer.
+ *    So it is important to give a parameter which is a local chunk pointer of a allocator while invoking this func.
+ */
+inline void *alloc_in_chunk(Chunk_Header* &chunk)
+{
+  POINTER_SIZE_INT *table = chunk->table;
+  unsigned int slot_index = chunk->slot_index;
+  
+  assert(chunk->alloc_num < chunk->slot_num);
+  ++chunk->alloc_num;
+  assert(chunk->base);
+  void *p_obj = (void*)((POINTER_SIZE_INT)chunk->base + ((POINTER_SIZE_INT)chunk->slot_size * slot_index));
+
+  /*mark black is placed here because of race condition between ops color flip. */
+  if(p_obj && is_obj_alloced_live())
+    obj_mark_black_in_table((Partial_Reveal_Object*)p_obj, chunk->slot_size);
+  
+  //FIXME: Need barrier here.
+  //apr_memory_rw_barrier();
+  
+  alloc_slot_in_table(table, slot_index);
+  if(chunk->status & CHUNK_NEED_ZEROING)
+    memset(p_obj, 0, chunk->slot_size);
+#ifdef SSPACE_VERIFY
+  wspace_verify_free_area((POINTER_SIZE_INT*)p_obj, chunk->slot_size);
+#endif
+
+#ifdef ENABLE_FRESH_CHUNK_ALLOC
+  if(chunk->status & CHUNK_FRESH){
+    ++slot_index;
+    chunk->slot_index = (slot_index < chunk->slot_num) ? slot_index : MAX_SLOT_INDEX;
+  } else
+#endif
+
+  chunk->slot_index = next_free_slot_index_in_table(table, slot_index, chunk->slot_num);
+
+  if(chunk->alloc_num == chunk->slot_num) chunk->slot_index = MAX_SLOT_INDEX;  
+  return p_obj;
+}
+
+inline void set_super_obj_mask(void *large_obj)
+{ ((Partial_Reveal_Object*)large_obj)->obj_info |= SUPER_OBJ_MASK; }
+
+#endif // _SSPACE_ALLOC_H_

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_alloc.h
------------------------------------------------------------------------------
    svn:eol-style = native

Added: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp
URL: http://svn.apache.org/viewvc/harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp?rev=606876&view=auto
==============================================================================
--- harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp (added)
+++ harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp Wed Dec 26 02:17:10 2007
@@ -0,0 +1,913 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+#include "gc_ms.h"
+#include "wspace.h"
+#include "wspace_chunk.h"
+#include "wspace_verify.h"
+
+/* PFC stands for partially free chunk */
+static Size_Segment *size_segments[SIZE_SEGMENT_NUM];
+static Pool **pfc_pools[SIZE_SEGMENT_NUM];
+static Pool **pfc_pools_backup[SIZE_SEGMENT_NUM];
+static Boolean  *pfc_steal_flags[SIZE_SEGMENT_NUM];
+
+static Free_Chunk_List  aligned_free_chunk_lists[NUM_ALIGNED_FREE_CHUNK_BUCKET];
+static Free_Chunk_List  unaligned_free_chunk_lists[NUM_UNALIGNED_FREE_CHUNK_BUCKET];
+static Free_Chunk_List  hyper_free_chunk_list;
+
+
+static void init_size_segment(Size_Segment *seg, unsigned int size_min, unsigned int size_max, unsigned int gran_shift_bits, Boolean local_alloc)
+{
+  seg->size_min = size_min;
+  seg->size_max = size_max;
+  seg->local_alloc = local_alloc;
+  seg->chunk_num = (seg->size_max - seg->size_min) >> gran_shift_bits;
+  seg->gran_shift_bits = gran_shift_bits;
+  seg->granularity = (POINTER_SIZE_INT)(1 << gran_shift_bits);
+  seg->gran_low_mask = seg->granularity - 1;
+  seg->gran_high_mask = ~seg->gran_low_mask;
+}
+
+void wspace_init_chunks(Wspace *wspace)
+{
+  unsigned int i, j;
+  
+  /* Init size segments */
+  Size_Segment *size_seg_start = (Size_Segment*)STD_MALLOC(sizeof(Size_Segment) * SIZE_SEGMENT_NUM);
+  for(i = SIZE_SEGMENT_NUM; i--;){
+    size_segments[i] = size_seg_start + i;
+    size_segments[i]->seg_index = i;
+  }
+  init_size_segment(size_segments[0], 0, MEDIUM_OBJ_THRESHOLD, SMALL_GRANULARITY_BITS, SMALL_IS_LOCAL_ALLOC);
+  init_size_segment(size_segments[1], MEDIUM_OBJ_THRESHOLD, LARGE_OBJ_THRESHOLD, MEDIUM_GRANULARITY_BITS, MEDIUM_IS_LOCAL_ALLOC);
+  init_size_segment(size_segments[2], LARGE_OBJ_THRESHOLD, SUPER_OBJ_THRESHOLD, LARGE_GRANULARITY_BITS, LARGE_IS_LOCAL_ALLOC);
+  
+  /* Init partially free chunk pools */
+  for(i = SIZE_SEGMENT_NUM; i--;){
+    pfc_pools[i] = (Pool**)STD_MALLOC(sizeof(Pool*) * size_segments[i]->chunk_num);
+    pfc_pools_backup[i] = (Pool**)STD_MALLOC(sizeof(Pool*) * size_segments[i]->chunk_num);
+    pfc_steal_flags[i] = (Boolean*)STD_MALLOC(sizeof(Boolean) * size_segments[i]->chunk_num);
+    for(j=size_segments[i]->chunk_num; j--;){
+      pfc_pools[i][j] = sync_pool_create();
+      pfc_pools_backup[i][j] = sync_pool_create();
+      pfc_steal_flags[i][j] = FALSE;
+    }
+  }
+  
+  /* Init aligned free chunk lists */
+  for(i = NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_chunk_list_init(&aligned_free_chunk_lists[i]);
+  
+  /* Init nonaligned free chunk lists */
+  for(i = NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_chunk_list_init(&unaligned_free_chunk_lists[i]);
+  
+  /* Init super free chunk lists */
+  free_chunk_list_init(&hyper_free_chunk_list);
+  
+  wspace->size_segments = size_segments;
+  wspace->pfc_pools = pfc_pools;
+  wspace->pfc_pools_backup = pfc_pools_backup;
+  wspace->used_chunk_pool = sync_pool_create();
+  wspace->unreusable_normal_chunk_pool = sync_pool_create();
+  wspace->live_abnormal_chunk_pool = sync_pool_create();
+  wspace->aligned_free_chunk_lists = aligned_free_chunk_lists;
+  wspace->unaligned_free_chunk_lists = unaligned_free_chunk_lists;
+  wspace->hyper_free_chunk_list = &hyper_free_chunk_list;
+  
+  /* Init the first free chunk: from heap start to heap end */
+  Free_Chunk *free_chunk = (Free_Chunk*)wspace->heap_start;
+  free_chunk->adj_next = (Chunk_Header_Basic*)wspace->heap_end;
+  POINTER_SIZE_INT chunk_size = wspace->reserved_heap_size;
+  assert(chunk_size > CHUNK_GRANULARITY && !(chunk_size % CHUNK_GRANULARITY));
+  wspace_put_free_chunk(wspace, free_chunk);
+}
+
+static void pfc_pool_set_steal_flag(Pool *pool, unsigned int steal_threshold, Boolean &steal_flag)
+{
+  Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pool);
+  while(chunk){
+    steal_threshold--;
+    if(!steal_threshold)
+      break;
+    chunk = chunk->next;
+  }
+  steal_flag = steal_threshold ? FALSE : TRUE;
+}
+
+void wspace_clear_chunk_list(Wspace* wspace)
+{
+  unsigned int i, j;
+  GC* gc = wspace->gc;
+  unsigned int collector_num = gc->num_collectors;
+  unsigned int steal_threshold = collector_num << PFC_STEAL_THRESHOLD;
+  
+  Pool*** pfc_pools = wspace->pfc_pools;
+  for(i = SIZE_SEGMENT_NUM; i--;){
+    for(j = size_segments[i]->chunk_num; j--;){
+      Pool *pool = pfc_pools[i][j];
+      pfc_pool_set_steal_flag(pool, steal_threshold, pfc_steal_flags[i][j]);
+      pool_empty(pool);
+    }
+  }
+  
+  Pool*** pfc_pools_backup = wspace->pfc_pools_backup;
+  for(i = SIZE_SEGMENT_NUM; i--;){
+    for(j = size_segments[i]->chunk_num; j--;){
+      Pool *pool = pfc_pools_backup[i][j];
+      pfc_pool_set_steal_flag(pool, steal_threshold, pfc_steal_flags[i][j]);
+      pool_empty(pool);
+    }
+  }
+  
+  for(i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_chunk_list_clear(&aligned_free_chunk_lists[i]);
+  
+  for(i=NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_chunk_list_clear(&unaligned_free_chunk_lists[i]);
+  
+  free_chunk_list_clear(&hyper_free_chunk_list);
+}
+
+/* Simply put the free chunk to the according list
+ * Don't merge continuous free chunks
+ * The merging job is executed in merging phase
+ */
+static void list_put_free_chunk(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+  chunk->status = CHUNK_FREE;
+  chunk->prev = NULL;
+
+  lock(list->lock);
+  chunk->next = list->head;
+  if(list->head)
+    list->head->prev = chunk;
+  list->head = chunk;
+  if(!list->tail)
+    list->tail = chunk;
+  assert(list->chunk_num < ~((unsigned int)0));
+  ++list->chunk_num;
+  unlock(list->lock);
+}
+
+static void list_put_free_chunk_to_head(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+  
+  chunk->status = CHUNK_FREE;
+  chunk->prev = NULL;
+  chunk->next = NULL;
+
+  lock(list->lock);
+  chunk->next = list->head;
+  if(list->head)
+    list->head->prev = chunk;
+  list->head = chunk;
+  if(!list->tail)
+    list->tail = chunk;
+  assert(list->chunk_num < ~((unsigned int)0));
+  ++list->chunk_num;
+  unlock(list->lock);
+}
+
+static void list_put_free_chunk_to_tail(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+  //modified for concurrent sweep.
+  //chunk->status = CHUNK_FREE;
+  chunk->prev = NULL;
+  chunk->next = NULL;
+
+  lock(list->lock);
+  chunk->prev = list->tail;
+  if(list->head)
+    list->tail->next = chunk;
+  list->tail = chunk;
+  if(!list->head)
+    list->head = chunk;
+  assert(list->chunk_num < ~((unsigned int)0));
+  ++list->chunk_num;
+  unlock(list->lock);
+}
+
+
+
+/* The difference between this and the above normal one is this func needn't hold the list lock.
+ * This is for the calling in partitioning chunk functions.
+ * Please refer to the comments of wspace_get_hyper_free_chunk().
+ */
+static void list_put_hyper_free_chunk(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+  chunk->status = CHUNK_FREE;
+  chunk->prev = NULL;
+
+  /* lock(list->lock);
+   * the list lock must have been held like in getting a free chunk and partitioning it
+   * or needn't be held like in wspace initialization and the merging phase
+   */
+  chunk->next = list->head;
+  if(list->head)
+    list->head->prev = chunk;
+  list->head = chunk;
+  if(!list->tail)
+    list->tail = chunk;
+  assert(list->chunk_num < ~((unsigned int)0));
+  ++list->chunk_num;
+  //unlock(list->lock);
+}
+
+static void list_put_hyper_free_chunk_to_head(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+  chunk->status = CHUNK_FREE;
+  chunk->prev = NULL;
+
+  chunk->next = list->head;
+  if(list->head)
+    list->head->prev = chunk;
+  list->head = chunk;
+  if(!list->tail)
+    list->tail = chunk;
+  assert(list->chunk_num < ~((unsigned int)0));
+  ++list->chunk_num;
+}
+
+static void list_put_hyper_free_chunk_to_tail(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+  //modified for concurrent sweep.
+  //chunk->status = CHUNK_FREE;
+  chunk->next = NULL;
+
+  chunk->prev = list->tail;
+  if(list->tail)
+    list->tail->next = chunk;
+  list->tail = chunk;
+  if(!list->head)
+    list->head = chunk;
+  assert(list->chunk_num < ~((unsigned int)0));
+  ++list->chunk_num;
+}
+
+
+
+static Free_Chunk *free_list_get_head(Free_Chunk_List *list)
+{
+  lock(list->lock);
+  Free_Chunk *chunk = list->head;
+  if(chunk){
+    list->head = chunk->next;
+    if(list->head)
+      list->head->prev = NULL;
+    else
+      list->tail = NULL;
+    assert(list->chunk_num);
+    --list->chunk_num;
+    //assert(chunk->status == CHUNK_FREE);
+    assert(chunk->status & CHUNK_FREE);
+  }
+  unlock(list->lock);
+  return chunk;
+}
+
+void wspace_put_free_chunk(Wspace *wspace, Free_Chunk *chunk)
+{
+  POINTER_SIZE_INT chunk_size = CHUNK_SIZE(chunk);
+  assert(!(chunk_size % CHUNK_GRANULARITY));
+  
+  if(chunk_size > HYPER_OBJ_THRESHOLD)
+    list_put_hyper_free_chunk(wspace->hyper_free_chunk_list, chunk);
+  else if(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK) && !(chunk_size & NORMAL_CHUNK_LOW_MASK))
+    list_put_free_chunk(&wspace->aligned_free_chunk_lists[ALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
+  else
+    list_put_free_chunk(&wspace->unaligned_free_chunk_lists[UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
+}
+
+void wspace_put_free_chunk_to_head(Wspace *wspace, Free_Chunk *chunk)
+{
+  POINTER_SIZE_INT chunk_size = CHUNK_SIZE(chunk);
+  assert(!(chunk_size % CHUNK_GRANULARITY));
+  
+  if(chunk_size > HYPER_OBJ_THRESHOLD){
+    lock(wspace->hyper_free_chunk_list->lock);
+    list_put_hyper_free_chunk_to_head(wspace->hyper_free_chunk_list, chunk);
+    unlock(wspace->hyper_free_chunk_list->lock);
+  }else if(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK) && !(chunk_size & NORMAL_CHUNK_LOW_MASK))
+    list_put_free_chunk_to_head(&wspace->aligned_free_chunk_lists[ALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
+  else
+    list_put_free_chunk_to_head(&wspace->unaligned_free_chunk_lists[UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
+
+}
+
+void wspace_put_free_chunk_to_tail(Wspace *wspace, Free_Chunk *chunk)
+{
+  POINTER_SIZE_INT chunk_size = CHUNK_SIZE(chunk);
+  assert(!(chunk_size % CHUNK_GRANULARITY));
+  
+  if(chunk_size > HYPER_OBJ_THRESHOLD){
+    lock(wspace->hyper_free_chunk_list->lock);
+    list_put_hyper_free_chunk_to_tail(wspace->hyper_free_chunk_list, chunk);
+    unlock(wspace->hyper_free_chunk_list->lock);
+  }else if(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK) && !(chunk_size & NORMAL_CHUNK_LOW_MASK))
+    list_put_free_chunk_to_tail(&wspace->aligned_free_chunk_lists[ALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
+  else
+    list_put_free_chunk_to_tail(&wspace->unaligned_free_chunk_lists[UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
+
+}
+
+static inline Free_Chunk *partition_normal_free_chunk(Wspace *wspace, Free_Chunk *chunk)
+{
+  assert(CHUNK_SIZE(chunk) > NORMAL_CHUNK_SIZE_BYTES);
+  
+  Chunk_Header_Basic *adj_next = chunk->adj_next;
+  Free_Chunk *normal_chunk = (Free_Chunk*)(((POINTER_SIZE_INT)chunk + NORMAL_CHUNK_SIZE_BYTES-1) & NORMAL_CHUNK_HIGH_MASK);
+  
+  if(chunk != normal_chunk){
+    assert(chunk < normal_chunk);
+    chunk->adj_next = (Chunk_Header_Basic*)normal_chunk;
+    wspace_put_free_chunk(wspace, chunk);
+  }
+  normal_chunk->adj_next = (Chunk_Header_Basic*)((POINTER_SIZE_INT)normal_chunk + NORMAL_CHUNK_SIZE_BYTES);
+  if(normal_chunk->adj_next != adj_next){
+    assert(normal_chunk->adj_next < adj_next);
+    Free_Chunk *back_chunk = (Free_Chunk*)normal_chunk->adj_next;
+    back_chunk->adj_next = adj_next;
+    wspace_put_free_chunk(wspace, back_chunk);
+  }
+  
+  normal_chunk->status = CHUNK_FREE;
+  return normal_chunk;
+}
+
+/* Partition the free chunk to two free chunks:
+ * the first one's size is chunk_size
+ * the second will be inserted into free chunk list according to its size
+ */
+static inline Free_Chunk *partition_abnormal_free_chunk(Wspace *wspace,Free_Chunk *chunk, unsigned int chunk_size)
+{
+  assert(CHUNK_SIZE(chunk) > chunk_size);
+  
+  Free_Chunk *new_chunk = (Free_Chunk*)((POINTER_SIZE_INT)chunk->adj_next - chunk_size);
+  assert(chunk < new_chunk);
+  
+  new_chunk->adj_next = chunk->adj_next;
+  chunk->adj_next = (Chunk_Header_Basic*)new_chunk;
+  wspace_put_free_chunk(wspace, chunk);
+  new_chunk->status = CHUNK_FREE;
+  return new_chunk;
+}
+
+Free_Chunk *wspace_get_normal_free_chunk(Wspace *wspace)
+{
+  Free_Chunk_List *aligned_lists = wspace->aligned_free_chunk_lists;
+  Free_Chunk_List *unaligned_lists = wspace->unaligned_free_chunk_lists;
+  Free_Chunk_List *list = NULL;
+  Free_Chunk *chunk = NULL;
+  
+  /* Search in aligned chunk lists first */
+  unsigned int index = 0;
+  while(index < NUM_ALIGNED_FREE_CHUNK_BUCKET){
+    list = &aligned_lists[index];
+    if(list->head)
+      chunk = free_list_get_head(list);
+    if(chunk){
+      if(CHUNK_SIZE(chunk) > NORMAL_CHUNK_SIZE_BYTES)
+        chunk = partition_normal_free_chunk(wspace, chunk);
+      //zeroing_free_chunk(chunk);
+      return chunk;
+    }
+    index++;
+  }
+  assert(!chunk);
+  
+  /* Search in unaligned chunk lists with larger chunk.
+     (NORMAL_CHUNK_SIZE_BYTES + (NORMAL_CHUNK_SIZE_BYTES-CHUNK_GRANULARITY))
+     is the smallest size which can guarantee the chunk includes a normal chunk.
+  */
+  index = UNALIGNED_CHUNK_SIZE_TO_INDEX((NORMAL_CHUNK_SIZE_BYTES<<1) - CHUNK_GRANULARITY);
+  while(index < NUM_UNALIGNED_FREE_CHUNK_BUCKET){
+    list = &unaligned_lists[index];
+    if(list->head)
+      chunk = free_list_get_head(list);
+    if(chunk){
+      chunk = partition_normal_free_chunk(wspace, chunk);
+      assert(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK));
+      //zeroing_free_chunk(chunk);
+      return chunk;
+    }
+    index++;
+  }
+  assert(!chunk);
+  
+  /* search in the hyper free chunk list */
+  chunk = wspace_get_hyper_free_chunk(wspace, NORMAL_CHUNK_SIZE_BYTES, TRUE);
+  assert(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK));
+  
+  return chunk;
+}
+
+Free_Chunk *wspace_get_abnormal_free_chunk(Wspace *wspace, unsigned int chunk_size)
+{
+  assert(chunk_size > CHUNK_GRANULARITY);
+  assert(!(chunk_size % CHUNK_GRANULARITY));
+  assert(chunk_size <= HYPER_OBJ_THRESHOLD);
+  
+  Free_Chunk_List *unaligned_lists = wspace->unaligned_free_chunk_lists;
+  Free_Chunk_List *list = NULL;
+  Free_Chunk *chunk = NULL;
+  unsigned int index = 0;
+  
+  /* Search in the list with chunk size of multiple chunk_size */
+  unsigned int search_size = chunk_size;
+  while(search_size <= HYPER_OBJ_THRESHOLD){
+    index = UNALIGNED_CHUNK_SIZE_TO_INDEX(search_size);
+    list = &unaligned_lists[index];
+    if(list->head)
+      chunk = free_list_get_head(list);
+    if(chunk){
+      if(search_size > chunk_size)
+        chunk = partition_abnormal_free_chunk(wspace, chunk, chunk_size);
+      zeroing_free_chunk(chunk);
+      return chunk;
+    }
+    search_size += chunk_size;
+  }
+  assert(!chunk);
+  
+  /* search in the hyper free chunk list */
+  chunk = wspace_get_hyper_free_chunk(wspace, chunk_size, FALSE);
+  if(chunk) return chunk;
+  
+  /* Search again in abnormal chunk lists */
+  index = UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size);
+  while(index < NUM_UNALIGNED_FREE_CHUNK_BUCKET){
+    list = &unaligned_lists[index];
+    if(list->head)
+      chunk = free_list_get_head(list);
+    if(chunk){
+      if(index > UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size))
+        chunk = partition_abnormal_free_chunk(wspace, chunk, chunk_size);
+      zeroing_free_chunk(chunk);
+      return chunk;
+    }
+    ++index;
+  }
+  
+  return chunk;
+}
+
+Free_Chunk *wspace_get_hyper_free_chunk(Wspace *wspace, unsigned int chunk_size, Boolean is_normal_chunk)
+{
+  assert(chunk_size >= CHUNK_GRANULARITY);
+  assert(!(chunk_size % CHUNK_GRANULARITY));
+  
+  Free_Chunk_List *list = wspace->hyper_free_chunk_list;
+  lock(list->lock);
+  
+  Free_Chunk *prev_chunk = NULL;
+  Free_Chunk *chunk = list->head;
+  while(chunk){
+    if(CHUNK_SIZE(chunk) >= chunk_size){
+      Free_Chunk *next_chunk = chunk->next;
+      if(prev_chunk)
+        prev_chunk->next = next_chunk;
+      else
+        list->head = next_chunk;
+      if(next_chunk)
+        next_chunk->prev = prev_chunk;
+      else
+        list->tail = prev_chunk;
+      break;
+    }
+    prev_chunk = chunk;
+    chunk = chunk->next;
+  }
+  
+  /* unlock(list->lock);
+   * We move this unlock to the end of this func for the following reason.
+   * A case might occur that two allocator are asking for a hyper chunk at the same time,
+   * and there is only one chunk in the list and it can satify the requirements of both of them.
+   * If allocator 1 gets the list lock first, it will get the unique chunk and releases the lock here.
+   * And then allocator 2 holds the list lock after allocator 1 releases it,
+   * it will found there is no hyper chunk in the list and return NULL.
+   * In fact the unique hyper chunk is large enough.
+   * If allocator 1 chops down one piece and put back the rest into the list, allocator 2 will be satisfied.
+   * So we will get a wrong info here if we release the lock here, which makes us invoke GC much earlier than needed.
+   */
+  
+  if(chunk){
+    if(is_normal_chunk)
+      chunk = partition_normal_free_chunk(wspace, chunk);
+    else if(CHUNK_SIZE(chunk) > chunk_size)
+      chunk = partition_abnormal_free_chunk(wspace, chunk, chunk_size);
+    if(!is_normal_chunk)
+      zeroing_free_chunk(chunk);
+  }
+  
+  unlock(list->lock);
+  
+  return chunk;
+}
+
+void wspace_collect_free_chunks_to_list(Wspace *wspace, Free_Chunk_List *list)
+{
+  unsigned int i;
+  
+  for(i = NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+    move_free_chunks_between_lists(list, &wspace->aligned_free_chunk_lists[i]);
+  
+  for(i = NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+    move_free_chunks_between_lists(list, &wspace->unaligned_free_chunk_lists[i]);
+  
+  move_free_chunks_between_lists(list, wspace->hyper_free_chunk_list);
+  
+  Free_Chunk *chunk = list->head;
+  while(chunk){
+    chunk->status = CHUNK_FREE | CHUNK_TO_MERGE;
+    chunk = chunk->next;
+  }
+}
+
+
+typedef struct PFC_Pool_Iterator {
+  volatile unsigned int seg_index;
+  volatile unsigned int chunk_index;
+  SpinLock lock;
+} PFC_Pool_Iterator;
+
+static PFC_Pool_Iterator pfc_pool_iterator;
+
+void wspace_init_pfc_pool_iterator(Wspace *wspace)
+{
+  assert(pfc_pool_iterator.lock == FREE_LOCK);
+  pfc_pool_iterator.seg_index = 0;
+  pfc_pool_iterator.chunk_index = 0;
+}
+
+void wspace_exchange_pfc_pool(Wspace *wspace)
+{
+  Pool*** empty_pfc_pools = wspace->pfc_pools;
+  wspace->pfc_pools = wspace->pfc_pools_backup;
+  wspace->pfc_pools_backup = empty_pfc_pools;
+}
+
+Pool *wspace_grab_next_pfc_pool(Wspace *wspace)
+{
+  Pool*** pfc_pools = wspace->pfc_pools;
+  Pool *pfc_pool = NULL;
+  
+  lock(pfc_pool_iterator.lock);
+  for(; pfc_pool_iterator.seg_index < SIZE_SEGMENT_NUM; ++pfc_pool_iterator.seg_index){
+    for(; pfc_pool_iterator.chunk_index < size_segments[pfc_pool_iterator.seg_index]->chunk_num; ++pfc_pool_iterator.chunk_index){
+      pfc_pool = pfc_pools[pfc_pool_iterator.seg_index][pfc_pool_iterator.chunk_index];
+      ++pfc_pool_iterator.chunk_index;
+      unlock(pfc_pool_iterator.lock);
+      return pfc_pool;
+    }
+    pfc_pool_iterator.chunk_index = 0;
+  }
+  unlock(pfc_pool_iterator.lock);
+  
+  return NULL;
+}
+
+#define min_value(x, y) (((x) < (y)) ? (x) : (y))
+
+Chunk_Header *wspace_steal_pfc(Wspace *wspace, unsigned int seg_index, unsigned int index)
+{
+  Size_Segment *size_seg = wspace->size_segments[seg_index];
+  Chunk_Header *pfc = NULL;
+  unsigned int max_index = min_value(index + PFC_STEAL_NUM + 1, size_seg->chunk_num);
+  ++index;
+  for(; index < max_index; ++index){
+    if(!pfc_steal_flags[seg_index][index]) continue;
+    pfc = wspace_get_pfc(wspace, seg_index, index);
+    if(pfc) return pfc;
+  }
+  return NULL;
+}
+
+static POINTER_SIZE_INT free_mem_in_pfc_pools(Wspace *wspace, Boolean show_chunk_info)
+{
+  Size_Segment **size_segs = wspace->size_segments;
+  Pool ***pfc_pools = wspace->pfc_pools;
+  POINTER_SIZE_INT free_mem_size = 0;
+  
+  for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){
+    for(unsigned int j = 0; j < size_segs[i]->chunk_num; ++j){
+      Pool *pfc_pool = pfc_pools[i][j];
+      if(pool_is_empty(pfc_pool))
+        continue;
+      pool_iterator_init(pfc_pool);
+      Chunk_Header *chunk = (Chunk_Header*)pool_iterator_next(pfc_pool);
+      assert(chunk);
+      unsigned int slot_num = chunk->slot_num;
+      unsigned int chunk_num = 0;
+      unsigned int alloc_num = 0;
+      while(chunk){
+        assert(chunk->slot_num == slot_num);
+        ++chunk_num;
+        alloc_num += chunk->alloc_num;
+        chunk = (Chunk_Header*)pool_iterator_next(pfc_pool);
+      }
+      unsigned int total_slot_num = slot_num * chunk_num;
+      assert(alloc_num < total_slot_num);
+#ifdef SSPACE_CHUNK_INFO
+      if(show_chunk_info)
+        printf("Size: %x\tchunk num: %d\tLive Ratio: %f\n", NORMAL_INDEX_TO_SIZE(j, size_segs[i]), chunk_num, (float)alloc_num/total_slot_num);
+#endif
+      free_mem_size += NORMAL_INDEX_TO_SIZE(j, size_segs[i]) * (total_slot_num-alloc_num);
+      assert(free_mem_size < wspace->committed_heap_size);
+    }
+  }
+  
+  return free_mem_size;
+}
+
+static POINTER_SIZE_INT free_mem_in_free_lists(Wspace *wspace, Free_Chunk_List *lists, unsigned int list_num, Boolean show_chunk_info)
+{
+  POINTER_SIZE_INT free_mem_size = 0;
+  
+  for(unsigned int index = 0; index < list_num; ++index){
+    Free_Chunk *chunk = lists[index].head;
+    if(!chunk) continue;
+    POINTER_SIZE_INT chunk_size = CHUNK_SIZE(chunk);
+    assert(chunk_size <= HYPER_OBJ_THRESHOLD);
+    unsigned int chunk_num = 0;
+    while(chunk){
+      assert(CHUNK_SIZE(chunk) == chunk_size);
+      ++chunk_num;
+      chunk = chunk->next;
+    }
+    free_mem_size += chunk_size * chunk_num;
+    assert(free_mem_size < wspace->committed_heap_size);
+#ifdef SSPACE_CHUNK_INFO
+    if(show_chunk_info)
+      printf("Free Size: %x\tnum: %d\n", chunk_size, chunk_num);
+#endif
+  }
+  
+  return free_mem_size;
+}
+
+static POINTER_SIZE_INT free_mem_in_hyper_free_list(Wspace *wspace, Boolean show_chunk_info)
+{
+  POINTER_SIZE_INT free_mem_size = 0;
+  
+  Free_Chunk_List *list = wspace->hyper_free_chunk_list;
+  Free_Chunk *chunk = list->head;
+  while(chunk){
+#ifdef SSPACE_CHUNK_INFO
+    if(show_chunk_info)
+      printf("Size: %x\n", CHUNK_SIZE(chunk));
+#endif
+    free_mem_size += CHUNK_SIZE(chunk);
+    assert(free_mem_size <= wspace->committed_heap_size);
+    chunk = chunk->next;
+  }
+  
+  return free_mem_size;
+}
+
+POINTER_SIZE_INT free_mem_in_wspace(Wspace *wspace, Boolean show_chunk_info)
+{
+  POINTER_SIZE_INT free_mem_size = 0;
+
+#ifdef SSPACE_CHUNK_INFO
+  if(show_chunk_info)
+    printf("\n\nPFC INFO:\n\n");
+#endif
+  free_mem_size += free_mem_in_pfc_pools(wspace, show_chunk_info);
+
+#ifdef SSPACE_CHUNK_INFO
+  if(show_chunk_info)
+    printf("\n\nALIGNED FREE CHUNK INFO:\n\n");
+#endif
+  free_mem_size += free_mem_in_free_lists(wspace, aligned_free_chunk_lists, NUM_ALIGNED_FREE_CHUNK_BUCKET, show_chunk_info);
+
+#ifdef SSPACE_CHUNK_INFO
+  if(show_chunk_info)
+    printf("\n\nUNALIGNED FREE CHUNK INFO:\n\n");
+#endif
+  free_mem_size += free_mem_in_free_lists(wspace, unaligned_free_chunk_lists, NUM_UNALIGNED_FREE_CHUNK_BUCKET, show_chunk_info);
+
+#ifdef SSPACE_CHUNK_INFO
+  if(show_chunk_info)
+    printf("\n\nSUPER FREE CHUNK INFO:\n\n");
+#endif
+  free_mem_size += free_mem_in_hyper_free_list(wspace, show_chunk_info);
+  
+  return free_mem_size;
+}
+
+
+#ifdef SSPACE_CHUNK_INFO
+void wspace_chunks_info(Wspace *wspace, Boolean show_info)
+{
+  if(!show_info) return;
+  
+  POINTER_SIZE_INT free_mem_size = free_mem_in_wspace(wspace, TRUE);
+  
+  float free_mem_ratio = (float)free_mem_size / wspace->committed_heap_size;
+  printf("\n\nFree mem ratio: %f\n\n", free_mem_ratio);
+}
+#endif
+
+
+/* Because this computation doesn't use lock, its result is not accurate. And it is enough. */
+POINTER_SIZE_INT wspace_free_memory_size(Wspace *wspace)
+{
+  POINTER_SIZE_INT free_size = 0;
+  
+  vm_gc_lock_enum();
+  /*
+  for(unsigned int i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_size += NORMAL_CHUNK_SIZE_BYTES * (i+1) * wspace->aligned_free_chunk_lists[i].chunk_num;
+  
+  for(unsigned int i=NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+    free_size += CHUNK_GRANULARITY * (i+1) * wspace->unaligned_free_chunk_lists[i].chunk_num;
+  
+  Free_Chunk *hyper_chunk = wspace->hyper_free_chunk_list->head;
+  while(hyper_chunk){
+    free_size += CHUNK_SIZE(hyper_chunk);
+    hyper_chunk = hyper_chunk->next;
+  }
+  */
+  free_size = free_mem_in_wspace(wspace, FALSE);
+  vm_gc_unlock_enum();
+  
+  return free_size;
+}
+
+
+#ifdef SSPACE_ALLOC_INFO
+
+#define MEDIUM_THRESHOLD 256
+#define LARGE_THRESHOLD (1024)
+#define SUPER_THRESHOLD (6*KB)
+#define HYPER_THRESHOLD (64*KB)
+
+#define SMALL_OBJ_ARRAY_NUM  (MEDIUM_THRESHOLD >> 2)
+#define MEDIUM_OBJ_ARRAY_NUM (LARGE_THRESHOLD >> 4)
+#define LARGE_OBJ_ARRAY_NUM  (SUPER_THRESHOLD >> 6)
+#define SUPER_OBJ_ARRAY_NUM  (HYPER_THRESHOLD >> 10)
+
+volatile unsigned int small_obj_num[SMALL_OBJ_ARRAY_NUM];
+volatile unsigned int medium_obj_num[MEDIUM_OBJ_ARRAY_NUM];
+volatile unsigned int large_obj_num[LARGE_OBJ_ARRAY_NUM];
+volatile unsigned int super_obj_num[SUPER_OBJ_ARRAY_NUM];
+volatile unsigned int hyper_obj_num;
+
+void wspace_alloc_info(unsigned int size)
+{
+  if(size <= MEDIUM_THRESHOLD)
+    atomic_inc32(&small_obj_num[(size>>2)-1]);
+  else if(size <= LARGE_THRESHOLD)
+    atomic_inc32(&medium_obj_num[(size>>4)-1]);
+  else if(size <= SUPER_THRESHOLD)
+    atomic_inc32(&large_obj_num[(size>>6)-1]);
+  else if(size <= HYPER_THRESHOLD)
+    atomic_inc32(&super_obj_num[(size>>10)-1]);
+  else
+    atomic_inc32(&hyper_obj_num);
+}
+
+void wspace_alloc_info_summary(void)
+{
+  unsigned int i;
+  
+  printf("\n\nNORMAL OBJ\n\n");
+  for(i = 0; i < SMALL_OBJ_ARRAY_NUM; i++){
+    printf("Size: %x\tnum: %d\n", (i+1)<<2, small_obj_num[i]);
+    small_obj_num[i] = 0;
+  }
+  
+  i = ((MEDIUM_THRESHOLD + (1<<4))>>4) - 1;
+  for(; i < MEDIUM_OBJ_ARRAY_NUM; i++){
+    printf("Size: %x\tnum: %d\n", (i+1)<<4, medium_obj_num[i]);
+    medium_obj_num[i] = 0;
+  }
+  
+  i = ((LARGE_THRESHOLD + (1<<6))>>6) - 1;
+  for(; i < LARGE_OBJ_ARRAY_NUM; i++){
+    printf("Size: %x\tnum: %d\n", (i+1)<<6, large_obj_num[i]);
+    large_obj_num[i] = 0;
+  }
+  
+  i = ((SUPER_THRESHOLD + (1<<10))>>10) - 1;
+  for(; i < SUPER_OBJ_ARRAY_NUM; i++){
+    printf("Size: %x\tnum: %d\n", (i+1)<<10, super_obj_num[i]);
+    super_obj_num[i] = 0;
+  }
+  
+  printf("\n\nHYPER OBJ\n\n");
+  printf("num: %d\n", hyper_obj_num);
+  hyper_obj_num = 0;
+}
+
+#endif
+
+#ifdef SSPACE_USE_FASTDIV
+
+static int total_malloc_bytes = 0;
+static char *cur_free_ptr = NULL;
+static int cur_free_bytes = 0;
+
+void *malloc_wrapper(int size)
+{
+	massert(size > 0);
+	if(!cur_free_ptr) {
+		cur_free_bytes = INIT_ALLOC_SIZE;
+		cur_free_ptr = (char*) STD_MALLOC(cur_free_bytes);
+	}
+	
+	massert(cur_free_bytes >= size);
+	
+	total_malloc_bytes += size;
+	cur_free_bytes -= size;
+	
+	void * ret = cur_free_ptr;
+	cur_free_ptr += size;
+	return ret;
+}
+
+void free_wrapper(int size)
+{
+	massert(size > 0);
+	massert(cur_free_ptr);
+	massert(total_malloc_bytes >= size);
+	cur_free_bytes += size;
+	total_malloc_bytes -= size;
+	cur_free_ptr -= size;
+}
+
+unsigned int *shift_table;
+unsigned short *compact_table[MAX_SLOT_SIZE_AFTER_SHIFTING];
+unsigned int mask[MAX_SLOT_SIZE_AFTER_SHIFTING];
+static int already_inited = 0;
+void fastdiv_init()
+{
+	if(already_inited) return;
+	already_inited = 1;
+	
+	int i;
+	int shift_table_size = (MAX_SLOT_SIZE + 1) * sizeof shift_table[0];
+	shift_table = (unsigned int *)malloc_wrapper(shift_table_size);
+	memset(shift_table, 0x00, shift_table_size) ;
+	for(i = MAX_SLOT_SIZE + 1;i--;) {
+		shift_table[i] = 0;
+		int v = i;
+		while(v && !(v & 1)) {
+			v >>= 1;
+			shift_table[i]++;
+		}
+	}
+
+	memset(compact_table, 0x00, sizeof compact_table);
+	memset(mask, 0x00, sizeof mask);
+	for(i = 1;i < 32;i += 2) {
+		int cur = 1;
+		unsigned short *p = NULL;
+		while(1) {
+			p = (unsigned short*)malloc_wrapper(cur * sizeof p[0]);
+			memset(p, 0xff, cur * sizeof p[0]);
+			int j;
+			for(j = 0; j <= MAX_ADDR_OFFSET;j += i) {
+				int pos = j & (cur - 1);
+				if(p[pos] == 0xffff) {
+					p[pos] = j / i;
+				}else {
+					break;
+				}
+			}
+			if(j <= MAX_ADDR_OFFSET) {
+				free_wrapper(cur * sizeof p[0]);
+				cur <<= 1;
+				p = NULL;
+			}else {
+				break;
+			}
+		}
+		massert(p);
+		mask[i] = cur - 1;
+		while(cur && p[cur - 1] == 0xffff) {
+			free_wrapper(sizeof p[0]);
+			cur--;
+		}
+		compact_table[i] = p;
+	}
+}
+
+#endif
+
+

Propchange: harmony/enhanced/drlvm/trunk/vm/gc_gen/src/mark_sweep/wspace_chunk.cpp
------------------------------------------------------------------------------
    svn:eol-style = native