You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by ge...@apache.org on 2006/10/03 04:26:49 UTC

svn commit: r452293 [1/2] - in /incubator/harmony/enhanced/drlvm/trunk/vm: gc/src/ vmcore/include/

Author: geirm
Date: Mon Oct  2 19:26:48 2006
New Revision: 452293

URL: http://svn.apache.org/viewvc?view=rev&rev=452293
Log:
HARMONY-1372

GC v41 added em64t support

Thei also cures HARMONY-1661 apparently

Tested on Ubuntu - please test properly



Added:
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slot.cpp   (with props)
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slot.h   (with props)
Modified:
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.h
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_cache.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_copy.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_forced.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_slide_compact.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h
    incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/timer.h
    incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/include/version_svn_tag.h

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.cpp?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.cpp Mon Oct  2 19:26:48 2006
@@ -26,7 +26,7 @@
 #include "timer.h"
 #include <stdio.h>
 
-fast_list<Partial_Reveal_Object**,65536> slots;
+fast_list<Slot,65536> slots;
 reference_vector soft_references;
 reference_vector weak_references;
 reference_vector phantom_references;
@@ -150,7 +150,7 @@
             i != finalizible_objects.end();) {
 
         Partial_Reveal_Object *obj = *i;
-        if (!obj) { ++i; continue; }
+        assert (obj);
 
         int info = obj->obj_info();
         if (info & heap_mark_phase) {
@@ -193,28 +193,28 @@
             i != array.end(); ++i) {
         Partial_Reveal_Object *ref = *i;
 
-        Partial_Reveal_Object **referent = (Partial_Reveal_Object**) ((Ptr)ref + global_referent_offset);
-        Partial_Reveal_Object* obj = *referent;
+        Slot referent( (Reference*) ((Ptr)ref + global_referent_offset) );
+        Partial_Reveal_Object* obj = referent.read();
 
-        if (obj == 0) {
+        if (obj == heap_null) {
             // reference already cleared
             continue;
         }
 
-        int info = obj->obj_info();
+        unsigned info = obj->obj_info();
         if (info & heap_mark_phase) {
             // object marked, is it moved?
-            int vt = obj->vt();
+            unsigned vt = obj->vt();
             if (!(vt & FORWARDING_BIT)) continue;
             // moved, updating referent field
-            *referent = (Partial_Reveal_Object*)(vt & ~FORWARDING_BIT);
+            referent.write( fw_to_pointer(vt & ~FORWARDING_BIT) );
             continue;
         }
 
         // object not marked
-        *referent = 0;
+        referent.write(heap_null);
         TRACE2("gc.ref", "process_special_references: reference enquequed");
-        vm_enqueue_reference((Managed_Object_Handle*)ref);
+        vm_enqueue_reference((Managed_Object_Handle)ref);
     }
 }
 
@@ -261,11 +261,11 @@
 
 unsigned char *full_gc(int size) {
     Timer gc_time("FULL_GC", "gc.time.total");
-    heap.old_objects.end = heap.old_objects.pos = heap.old_objects.pos_limit = heap.base;
+    heap.old_objects.end = heap.old_objects.pos = heap.old_objects.pos_limit = heap.base + RESERVED_FOR_HEAP_NULL;
     unsigned char *res = slide_gc(size);
 
     heap.Tcompact = (float) gc_time.dt();
-    heap.working_set_size = (float) (heap.old_objects.end - heap.base);
+    heap.working_set_size = (float) (heap.old_objects.pos - heap.base);
     return res;
 }
 
@@ -279,12 +279,12 @@
     gc_slide_process_special_references(phantom_references);
 
     TIME(gc_slide_move_all,());
+    roots_update();
     gc_slide_postprocess_special_references(soft_references);
     gc_slide_postprocess_special_references(weak_references);
-    finalize_objects();
     gc_slide_postprocess_special_references(phantom_references);
-    gc_process_interior_pointers();
     gc_deallocate_mark_bits();
+    finalize_objects();
 
     heap_mark_phase ^= 3;
     // reset thread-local allocation areas
@@ -305,19 +305,18 @@
 
     pinned_areas.clear();
     pinned_areas_unsorted.clear();
+    roots_clear();
     gc_type = GC_SLIDE_COMPACT;
     gc_allocate_mark_bits();
-    gc_reset_interior_pointers();
 
     TIME(enumerate_universe,());
     return finish_slide_gc(size, 0);
 }
 
-void transition_copy_to_sliding_compaction(fast_list<Partial_Reveal_Object**,65536>& slots) {
+void transition_copy_to_sliding_compaction(fast_list<Slot,65536>& slots) {
     INFO2("gc.verbose", "COPY -> COMP on go transition");
     gc_type = GC_SLIDE_COMPACT;
     gc_allocate_mark_bits();
-    gc_reset_interior_pointers();
     gc_slide_process_transitional_slots(slots);
 }
 
@@ -328,6 +327,7 @@
 
     pinned_areas.clear();
     pinned_areas_unsorted.clear();
+    roots_clear();
 
     gc_type = GC_COPY;
     TIME(enumerate_universe,());
@@ -345,11 +345,13 @@
         heap.Tcopy = (float) gc_time.dt();
         return res;
     }
-    finalize_objects();
     process_special_references(phantom_references);
+    roots_update();
+    finalize_objects();
 
     heap_mark_phase ^= 3;
     gc_copy_update_regions();
+    heap.Tcopy = (float) gc_time.dt();
     after_copy_gc();
     // reset thread-local allocation areas
     clear_thread_local_buffers();
@@ -358,7 +360,6 @@
     vm_resume_threads_after();
     notify_gc_end();
     TRACE2("gc.mem", "copy_gc = " << res);
-    heap.Tcopy = (float) gc_time.dt();
     return res;
 }
 
@@ -366,12 +367,15 @@
     Timer gc_time("FORCE_GC", "gc.time.total");
     prepare_gc();
 
+    roots_clear();
+
     gc_type = GC_FORCED;
     TIME(enumerate_universe,());
     TIME(process_special_references,(soft_references));
     TIME(process_special_references,(weak_references));
     TIME(process_finalizable_objects,());
     TIME(process_special_references,(phantom_references));
+    roots_update();
     TIME(finalize_objects,());
 
     heap_mark_phase ^= 3;
@@ -382,28 +386,3 @@
     notify_gc_end();
 }
 
-void gc_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) {
-    //Partial_Reveal_Object **ref1 = (Partial_Reveal_Object**)ref;
-    switch(gc_type) {
-        case GC_COPY: gc_copy_add_root_set_entry(ref, is_pinned); break;
-        case GC_FORCED: gc_forced_add_root_set_entry(ref, is_pinned); break;
-        case GC_SLIDE_COMPACT: gc_slide_add_root_set_entry(ref, is_pinned); break;
-        case GC_CACHE: gc_cache_add_root_set_entry(ref, is_pinned); break;
-                      
-        case GC_FULL:
-        default: abort();
-    }
-}
-
-void gc_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned)
-{
-    switch (gc_type) {
-        case GC_COPY: gc_copy_add_root_set_entry_interior_pointer(slot, offset, is_pinned); break;
-        case GC_FORCED: gc_forced_add_root_set_entry_interior_pointer(slot, offset, is_pinned); break;
-        case GC_SLIDE_COMPACT: gc_slide_add_root_set_entry_interior_pointer(slot, offset, is_pinned); break;
-        case GC_CACHE: gc_cache_add_root_set_entry_interior_pointer(slot, offset, is_pinned); break;
-
-        case GC_FULL:
-        default: abort();
-    }
-}

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.h?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect.h Mon Oct  2 19:26:48 2006
@@ -24,8 +24,9 @@
 #include <assert.h>
 #include <open/gc.h>
 #include <open/types.h>
+#include "slot.h"
 
-extern fast_list<Partial_Reveal_Object**,65536> slots;
+extern fast_list<Slot,65536> slots;
 typedef fast_list<Partial_Reveal_Object*,1024> reference_vector;
 extern reference_vector finalizible_objects;
 extern reference_vector soft_references;
@@ -65,27 +66,24 @@
     assert(!(obj->vt() & (FORWARDING_BIT|RESCAN_BIT)));
 }
 
-void gc_copy_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned);
-void gc_copy_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned);
+void roots_clear();
+void roots_update();
+
+void gc_copy_add_root_set_entry(Slot slot);
 void gc_copy_update_regions();
 
-void gc_forced_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned);
-void gc_forced_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned);
+void gc_forced_add_root_set_entry(Slot slot);
 
-void gc_reset_interior_pointers();
-void gc_process_interior_pointers();
-void gc_slide_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned);
-void gc_slide_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned);
+void gc_slide_add_root_set_entry(Slot slot);
 void gc_slide_move_all();
 void gc_slide_process_special_references(reference_vector& array);
 void gc_slide_postprocess_special_references(reference_vector& array);
 
-void transition_copy_to_sliding_compaction(fast_list<Partial_Reveal_Object**,65536>& slots);
-void gc_slide_process_transitional_slots(fast_list<Partial_Reveal_Object**,65536>& slots);
-void gc_slide_process_transitional_slots(Partial_Reveal_Object **refs, int pos, int length);
+void transition_copy_to_sliding_compaction(fast_list<Slot,65536>& slots);
+void gc_slide_process_transitional_slots(fast_list<Slot,65536>& slots);
+void gc_slide_process_transitional_slots(Reference *refs, int pos, int length);
 
-void gc_cache_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned);
-void gc_cache_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned);
+void gc_cache_add_root_set_entry(Slot slot);
 void gc_cache_retrieve_root_set();
 void gc_cache_emit_root_set();
 

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_cache.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_cache.cpp?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_cache.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_cache.cpp Mon Oct  2 19:26:48 2006
@@ -30,43 +30,22 @@
 #include "root_set_cache.h"
 
 roots_vector root_set;
-fast_list<InteriorPointer,256> interior_pointers;
 
-void gc_cache_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) {
-    assert(!is_pinned);
-    assert(ref != NULL);
-    assert(*ref == NULL || ((unsigned char*)*ref >= heap.base && (unsigned char*)*ref < heap.ceiling));
-    root_set.push_back((Partial_Reveal_Object**)ref);
-}
-
-void gc_cache_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned)
-{
-    assert(!is_pinned);
-    InteriorPointer ip;
-    ip.obj = (Partial_Reveal_Object*) (*(unsigned char**)slot - offset);
-    ip.interior_ref = (Partial_Reveal_Object**)slot;
-    ip.offset = offset;
-    interior_pointers.push_back(ip);
+void gc_cache_add_root_set_entry(Slot slot) {
+    root_set.push_back(slot);
 }
 
 void gc_cache_emit_root_set() {
     for(roots_vector::iterator r = root_set.begin(); r != root_set.end(); ++r) {
-        gc_add_root_set_entry((Managed_Object_Handle*)*r, false);
-    }
-
-    for(fast_list<InteriorPointer,256>::iterator ip = interior_pointers.begin();
-            ip != interior_pointers.end(); ++ip) {
-        gc_add_root_set_entry_interior_pointer ((void**)(*ip).interior_ref, (*ip).offset, false);
+        gc_add_root_set_entry_slot(*r);
     }
 }
 
 void gc_cache_retrieve_root_set() {
     root_set.clear();
-    interior_pointers.clear();
     GC_TYPE orig_gc_type = gc_type;
     gc_type = GC_CACHE;
     vm_enumerate_root_set_all_threads();
     gc_type = orig_gc_type;
     INFO2("gc.verbose", root_set.count() << " roots collected");
-    INFO2("gc.verbose", interior_pointers.count() << " interior pointers collected");
 }

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_copy.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_copy.cpp?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_copy.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_copy.cpp Mon Oct  2 19:26:48 2006
@@ -26,6 +26,8 @@
 #include <jni_types.h>
 #include "gc_types.h"
 #include "collect.h"
+#include "slot.h"
+
 
 void gc_copy_update_regions() {
     int n = 0;
@@ -53,7 +55,7 @@
     cleaning_needed = true;
 }
 
-static bool gc_copy_process_reference(Partial_Reveal_Object **ref, Boolean is_pinned, int phase);
+static bool gc_copy_process_reference(Slot slot, int phase);
 
 static inline bool 
 gc_copy_scan_array_object(Partial_Reveal_Object *array, int vector_length, int phase)
@@ -62,13 +64,13 @@
 
     int32 array_length = vector_length; //vector_get_length((Vector_Handle) array);
 
-    Partial_Reveal_Object **refs = (Partial_Reveal_Object**)
+    Reference *refs = (Reference*)
         vector_get_element_address_ref ((Vector_Handle) array, 0);
 
     for(int i = 0; i < array_length; i++) {
-        Partial_Reveal_Object **ref = &refs[i];
+        Slot slot(refs + i);
 
-        bool success = gc_copy_process_reference(ref, false, phase);
+        bool success = gc_copy_process_reference(slot, phase);
 
         if (!success) {
             // overflow in old objects
@@ -91,6 +93,7 @@
     if (endpos <= heap.old_objects.pos_limit) {
         heap.old_objects.pos = endpos;
         assert(endpos <= heap.old_objects.end);
+        assert(((POINTER_SIZE_INT) endpos & (GC_OBJECT_ALIGNMENT - 1)) == 0);
         return true;
     }
     TRACE2("gc.pin.gc", "old area: reached heap.old_objects.pos_limit =" << heap.old_objects.pos_limit);
@@ -128,53 +131,54 @@
         endpos = newpos + size;
         if (endpos <= heap.old_objects.pos_limit) {
             heap.old_objects.pos = endpos;
+            assert(((POINTER_SIZE_INT) endpos & (GC_OBJECT_ALIGNMENT - 1)) == 0);
             return true;
         }
     }
     return false;
 }
 
-static bool gc_copy_process_reference(Partial_Reveal_Object **ref, Boolean is_pinned, int phase) {
-    assert(ref);
- 
-    Partial_Reveal_Object* obj = *ref;
+static bool gc_copy_process_reference(Slot slot, int phase) {
+    Partial_Reveal_Object* obj = slot.read();
 
-    if (!obj) return true;
+    if (obj == heap_null) return true;
+    assert(obj);
     assert(obj->vt() & ~(FORWARDING_BIT|RESCAN_BIT));
     TRACE2("gc.debug", "0x" << obj << " info = " << obj->obj_info());
 
-    int info = obj->obj_info();
-    int vt = obj->vt();
+    unsigned info = obj->obj_info();
+    unsigned vt = obj->vt();
 
     if (info & phase) {
         // object already marked, need to check if it is forwared still
         
         if (vt & FORWARDING_BIT) {
-            Partial_Reveal_Object *newpos = (Partial_Reveal_Object*) (vt & ~FORWARDING_BIT);
+            Partial_Reveal_Object *newpos = fw_to_pointer(vt & ~FORWARDING_BIT);
             assert_vt(newpos);
-            *ref = newpos;
-        }
+            slot.write(newpos);
+        } else obj->valid();
         return true;
     }
+    obj->valid();
 
     VMEXPORT Class_Handle vtable_get_class(VTable_Handle vh);
-    assert(class_get_vtable(vtable_get_class((VTable_Handle)obj->vt())) == (VTable_Handle)obj->vt());
-    TRACE2("gc.debug", "0x" << obj << " is " << class_get_name(vtable_get_class((VTable_Handle)obj->vt())));
+    assert(class_get_vtable(vtable_get_class((VTable_Handle)obj->vtable())) == (VTable_Handle)obj->vtable());
+    TRACE2("gc.debug", "0x" << obj << " is " << class_get_name(vtable_get_class((VTable_Handle)obj->vtable())));
 
     obj->obj_info() = (info & ~MARK_BITS) | phase;
 
     // move the object?
 #define pos ((unsigned char*) obj)
-    Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*) vt;
+    Partial_Reveal_VTable *vtable = ah_to_vtable(vt);
     GC_VTable_Info *gcvt = vtable->get_gcvt();
 
     if (pos >= heap.compaction_region_start() && pos < heap.compaction_region_end()) {
         int size = get_object_size(obj, gcvt);
 
         // is it not pinned?
-        if (size < 5000 &&  (!is_pinned) && ((info & OBJECT_IS_PINNED_BITS) == 0)) {
+        if (size < 5000 && ((info & OBJECT_IS_PINNED_BITS) == 0)) {
             if (info & HASHCODE_IS_SET_BIT) {
-                size += 4;
+                size += GC_OBJECT_ALIGNMENT;
             }
 
             // move the object
@@ -185,8 +189,8 @@
 
                 Partial_Reveal_Object *newobj = (Partial_Reveal_Object*) newpos;
                 if ((info & HASHCODE_IS_SET_BIT) && !(info & HASHCODE_IS_ALLOCATED_BIT)) {
-                    memcpy(newobj, obj, size-4);
-                    *(int*)(newpos + size-4) = gen_hashcode(obj);
+                    memcpy(newobj, obj, size-GC_OBJECT_ALIGNMENT);
+                    *(int*)(newpos + size-GC_OBJECT_ALIGNMENT) = gen_hashcode(obj);
                     newobj->obj_info() |= HASHCODE_IS_ALLOCATED_BIT;
                 } else {
                     memcpy(newobj, obj, size);
@@ -194,21 +198,21 @@
                 //TRACE2("gc.copy", "obj " << obj << " -> " << newobj << " + " << size);
                 assert(newobj->vt() == obj->vt());
                 assert(newobj->obj_info() & phase);
-                obj->vt() = (POINTER_SIZE_INT)newobj | FORWARDING_BIT;
+                obj->vt() = pointer_to_fw(newobj);
                 assert_vt(newobj);
-                *ref = newobj;
+                slot.write(newobj);
                 obj = newobj;
             } else {
                 // overflow! no more space in old objects area
                 // pinning the overflow object
                 pinned_areas_unsorted.push_back(pos);
                 pinned_areas_unsorted.push_back(pos + size
-                        + ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? 4 : 0));
+                        + ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? GC_OBJECT_ALIGNMENT : 0));
                 TRACE2("gc.pin", "add failed pinned area = " << pos << " " << pinned_areas_unsorted.back());
                 TRACE2("gc.pin", "failed object = " << pos);
                 // arange transition to slide compaction
                 obj->obj_info() &= ~MARK_BITS;
-                slots.push_back(ref);
+                slots.push_back(slot);
                 transition_copy_to_sliding_compaction(slots);
                 return false;
             }
@@ -217,9 +221,9 @@
             assert(gc_num != 1 || !(obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT));
             pinned_areas_unsorted.push_back(pos);
             pinned_areas_unsorted.push_back(pos + size
-                    + ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? 4 : 0));
+                    + ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? GC_OBJECT_ALIGNMENT : 0));
             TRACE2("gc.pin", "add pinned area = " << pos << " " << pinned_areas_unsorted.back() << " hash = " 
-                    << ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? 4 : 0));
+                    << ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? GC_OBJECT_ALIGNMENT : 0));
         }
     }
 
@@ -237,15 +241,12 @@
     if (type != NOT_REFERENCE) {
         switch (type) {
             case SOFT_REFERENCE:
-                TRACE2("gc.debug", "soft reference 0x" << obj);
                 add_soft_reference(obj);
                 break;
             case WEAK_REFERENCE:
-                TRACE2("gc.debug", "weak reference 0x" << obj);
                 add_weak_reference(obj);
                 break;
             case PHANTOM_REFERENCE:
-                TRACE2("gc.debug", "phantom reference 0x" << obj);
                 add_phantom_reference(obj);
                 break;
             default:
@@ -256,10 +257,10 @@
 
     int offset;
     while ((offset = *offset_list) != 0) {
-        Partial_Reveal_Object **slot = (Partial_Reveal_Object**)(pos + offset);
+        Slot inner_slot((Reference*)(pos + offset));
         //if (*slot) { looks like without check is better
-            TRACE2("gc.debug", "0x" << *slot << " referenced from object = 0x" << obj);
-            slots.push_back(slot);
+            TRACE2("gc.debug", "0x" << inner_slot.read() << " referenced from object = 0x" << obj);
+            slots.push_back(inner_slot);
         //}
 
         offset_list++;
@@ -269,44 +270,19 @@
 #undef pos
 }
 
-static void gc_copy_add_root_set_entry_internal(Partial_Reveal_Object **ref, Boolean is_pinned) {
+void gc_copy_add_root_set_entry(Slot root) {
     // FIXME: check for zero here, how it reflect perfomance, should be better!
     // and possibly remove check in gc_copy_process_reference
     // while added check in array handling
 
-#ifdef _DEBUG
-    if (*ref) {
-        TRACE2("gc.debug", "0x" << *ref << " referenced from root = 0x" << ref << " info = " << (*ref)->obj_info());
-    }
-#endif
-
     int phase = heap_mark_phase;
-    gc_copy_process_reference(ref, is_pinned, phase);
+    gc_copy_process_reference(root, phase);
 
     while (true) {
         if (slots.empty()) break;
-        Partial_Reveal_Object **ref = slots.pop_back();
-        *ref;
-        gc_copy_process_reference(ref, false, phase);
-    }
-}
-
-void gc_copy_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) {
-    assert(!is_pinned);
-    //TRACE2("gc.enum", "gc_add_root_set_entry");
-    gc_copy_add_root_set_entry_internal((Partial_Reveal_Object**)ref, is_pinned);
-}
-
-void gc_copy_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned)
-{
-    assert(!is_pinned);
-    int *ref = (int*)slot;
-    int oldobj = *ref - offset;
-    int newobj = oldobj;
-
-    gc_copy_add_root_set_entry_internal((Partial_Reveal_Object**)&newobj, is_pinned);
-    if (newobj != oldobj) {
-        *ref = newobj + offset;
+        Slot slot = slots.pop_back();
+        slot.read();
+        gc_copy_process_reference(slot, phase);
     }
 }
 

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_forced.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_forced.cpp?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_forced.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_forced.cpp Mon Oct  2 19:26:48 2006
@@ -29,7 +29,7 @@
 extern fast_list<Partial_Reveal_Object*, 65536> objects; // FIXME: duplication of memory slots and objects
                                                   // FIXME: move to header file
 
-static void forced_process_reference(Partial_Reveal_Object *obj, Boolean is_pinned);
+static void forced_process_reference(Partial_Reveal_Object *obj);
 
 static inline void 
 forced_scan_array_object(Partial_Reveal_Object *array, int vector_length)
@@ -39,27 +39,27 @@
 
     int32 array_length = vector_length; //vector_get_length((Vector_Handle) array);
 
-    Partial_Reveal_Object **refs = (Partial_Reveal_Object**)
+    Reference *refs = (Reference*)
         vector_get_element_address_ref ((Vector_Handle) array, 0);
 
     for(int i = 0; i < array_length; i++) {
-        Partial_Reveal_Object **ref = &refs[i];
-        Partial_Reveal_Object *obj = *ref;
-        if (obj != 0) {
-            forced_process_reference(obj, false);
+        Slot slot(refs + i);
+        Partial_Reveal_Object *obj = slot.read();
+        if (obj != heap_null) {
+            forced_process_reference(obj);
         }
     }
 }
 
-static void forced_process_reference(Partial_Reveal_Object *obj, Boolean is_pinned) {
-    assert(!is_pinned);
+static void forced_process_reference(Partial_Reveal_Object *obj) {
 
     assert(obj->vt() & ~FORWARDING_BIT);
 
-    int info = obj->obj_info();
+    unsigned info = obj->obj_info();
     if (info & heap_mark_phase) {
         return;
     }
+    obj->valid();
 
     obj->obj_info() = (info & ~MARK_BITS) | heap_mark_phase;
 
@@ -100,35 +100,23 @@
 
     int offset;
     while ((offset = *offset_list) != 0) {
-        Partial_Reveal_Object **slot = (Partial_Reveal_Object**)(((char*)obj) + offset);
+        Slot slot( (Reference*)(((char*)obj) + offset) );
         offset_list++;
-        Partial_Reveal_Object *object = *slot;
-        if (object != 0) {
+        Partial_Reveal_Object *object = slot.read();
+        if (object != heap_null) {
             objects.push_back(object);
         }
     }
 }
 
-static void gc_forced_add_root_set_entry_internal(Partial_Reveal_Object *obj, Boolean is_pinned) {
-    forced_process_reference(obj, is_pinned);
+void gc_forced_add_root_set_entry(Slot slot) {
+    Partial_Reveal_Object *obj = slot.read();
+    if (obj == heap_null) return;
+    forced_process_reference(obj);
 
     while (!objects.empty()) {
         Partial_Reveal_Object *obj = objects.pop_back();
-        forced_process_reference(obj, false);
+        forced_process_reference(obj);
     }
 }
 
-void gc_forced_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) {
-    Partial_Reveal_Object *obj = *(Partial_Reveal_Object**)ref;
-    if (obj == 0) return;
-    gc_forced_add_root_set_entry_internal(obj, is_pinned);
-}
-
-void gc_forced_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned)
-{
-    int *ref = (int*)slot;
-    int obj = *ref - offset;
-    if (obj == 0) return;
-
-    gc_forced_add_root_set_entry_internal((Partial_Reveal_Object*)obj, is_pinned);
-}

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_slide_compact.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_slide_compact.cpp?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_slide_compact.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/collect_slide_compact.cpp Mon Oct  2 19:26:48 2006
@@ -32,7 +32,6 @@
 unsigned char *mark_bits;
 int mark_bits_size;
 fast_list<Partial_Reveal_Object*, 65536> objects;
-static fast_list<InteriorPointer,256> comp_interior_pointers;
 
 static inline bool
 is_compaction_object(Partial_Reveal_Object *refobj) {
@@ -47,29 +46,37 @@
 }
 
 static inline void
-update_forwarded_reference(Partial_Reveal_Object *obj, Partial_Reveal_Object **ref) {
+update_forwarded_reference(Partial_Reveal_Object *obj, Slot slot) {
     assert(!(obj->vt() & RESCAN_BIT));
     assert(obj->vt() & FORWARDING_BIT);
-    *(int*)ref = obj->vt() & ~FORWARDING_BIT;
+    slot.write(fw_to_pointer(obj->vt() & ~FORWARDING_BIT));
 }
 
+#if GC_OBJECT_ALIGNMENT == 8
+#define GC_OBJECT_ALIGNMENT_SHIFT 3
+#elif GC_OBJECT_ALIGNMENT == 4
+#define GC_OBJECT_ALIGNMENT_SHIFT 2
+#else
+#error not detected GC_OBJECT_ALIGNMENT
+#endif
+
 static inline bool mark_bit_is_set(Partial_Reveal_Object *obj) {
-    int addr = (POINTER_SIZE_INT)obj - (POINTER_SIZE_INT) heap_base;
-    addr >>= 2;
-    int bit = addr & 7; // FIXME: use defines
-    int byte = addr >> 3;
+    size_t addr = (POINTER_SIZE_INT)obj - (POINTER_SIZE_INT) heap_base;
+    addr >>= GC_OBJECT_ALIGNMENT_SHIFT;
+    size_t bit = addr & 7; // FIXME: use defines
+    size_t byte = addr >> 3;
     return mark_bits[byte] & ((unsigned char)1 << bit);
 }
 
-static inline void enqueue_reference(Partial_Reveal_Object *refobj, Partial_Reveal_Object **ref) {
+static inline void enqueue_reference(Partial_Reveal_Object *refobj, Slot slot) {
     assert(is_compaction_object(refobj));
     assert(!is_forwarded_object(refobj));
     //assert(*ref == refobj);
     assert(refobj->obj_info());
 
-    int &info = refobj->obj_info();
-    *(int*)ref = info;
-    info = (int)ref | heap_mark_phase;
+    unsigned &info = refobj->obj_info();
+    slot.write_raw(info);
+    info = slot.addr() | heap_mark_phase; //(int)ref
 }
 
 static inline bool is_object_marked(Partial_Reveal_Object *obj) {
@@ -77,17 +84,17 @@
 }
 
 static inline void set_mark_bit(Partial_Reveal_Object *obj) {
-    int addr = (POINTER_SIZE_INT)obj - (POINTER_SIZE_INT) heap_base;
-    addr >>= 2;
-    int bit = addr & 7; // FIXME: use defines
-    int byte = addr >> 3;
+    size_t addr = (POINTER_SIZE_INT)obj - (POINTER_SIZE_INT) heap_base;
+    addr >>= GC_OBJECT_ALIGNMENT_SHIFT;
+    size_t bit = addr & 7; // FIXME: use defines
+    size_t byte = addr >> 3;
     mark_bits[byte] |=  ((unsigned char) 1 << bit);
 }
 
 static inline bool mark_object(Partial_Reveal_Object *obj) {
     int phase = heap_mark_phase;
 
-    assert((unsigned char*) obj >= heap_base && (unsigned char*) obj < heap_ceiling);
+    assert((unsigned char*) obj >= heap.base && (unsigned char*) obj < heap.ceiling);
     assert(obj->vt() != 0);
 
     // is object already marked
@@ -95,9 +102,10 @@
         return false;
     }
 
+    obj->valid();
     assert(!is_forwarded_object(obj));
 
-    int info = obj->obj_info();
+    unsigned info = obj->obj_info();
 
     if (is_compaction_object(obj)) {
         set_mark_bit(obj);
@@ -106,7 +114,7 @@
             pinned_areas_unsorted.push_back((unsigned char*)obj);
             int size = get_object_size(obj, obj->vtable()->get_gcvt());
             pinned_areas_unsorted.push_back((unsigned char*)obj + size
-                    + ((info & HASHCODE_IS_ALLOCATED_BIT) ? 4 : 0));
+                    + ((info & HASHCODE_IS_ALLOCATED_BIT) ? GC_OBJECT_ALIGNMENT : 0));
             TRACE2("gc.pin", "add pinned area = " << (unsigned char*)obj << " " << pinned_areas_unsorted.back());
         }
 
@@ -125,41 +133,37 @@
 }
 
 static inline void process_reference_queue(Partial_Reveal_Object *newobj, Partial_Reveal_Object *obj) {
-    int info = obj->obj_info();
+    unsigned info = obj->obj_info();
     assert(info);
     assert(info & heap_mark_phase); assert(is_compaction_object(obj));
 
     while (!(info & prev_mark_phase)) {
         assert(info);
         assert(info & heap_mark_phase);
-        Partial_Reveal_Object **ref = (Partial_Reveal_Object**) (info & ~MARK_BITS);
-        info = (int)*ref;
-        *ref = newobj;
+        Slot slot((Reference*) fw_to_pointer(info & ~MARK_BITS));
+        info = slot.read_raw(); //(int)*ref;
+        slot.write(newobj);
     }
     obj->obj_info() = info & ~MARK_BITS;
 }
 
-void gc_reset_interior_pointers() { // FIXME: rename
-    comp_interior_pointers.clear();
-}
-
 static void postprocess_array(Partial_Reveal_Object *array, int vector_length, Partial_Reveal_Object *oldobj) {
     // No primitive arrays allowed
     assert(!is_array_of_primitives(array));
     assert(is_compaction_object(array));
     assert(!is_forwarded_object(array));
 
-    int32 array_length = vector_length; //vector_get_length((Vector_Handle) array);
+    int array_length = vector_length; //vector_get_length((Vector_Handle) array);
 
-    Partial_Reveal_Object **refs = (Partial_Reveal_Object**) vector_get_element_address_ref ((Vector_Handle) array, 0);
+    Reference *refs = (Reference*) vector_get_element_address_ref ((Vector_Handle) array, 0);
 
     for(int i = 0; i < array_length; i++) {
-        Partial_Reveal_Object **ref = &refs[i];
-        POINTER_SIZE_INT refobj_int = (POINTER_SIZE_INT)*ref;
+        Slot slot(refs + i);
+        POINTER_SIZE_INT refobj_int = (POINTER_SIZE_INT)slot.read();
         POINTER_SIZE_INT refobj_unmarked = refobj_int & ~1;
         if (refobj_int == refobj_unmarked) continue; // not specially marked reference
         Partial_Reveal_Object *refobj = (Partial_Reveal_Object*) refobj_unmarked;
-        enqueue_reference(refobj, ref);
+        enqueue_reference(refobj, slot);
     }
 }
 
@@ -174,10 +178,12 @@
     assert(is_compaction_object(obj));
     assert(!is_forwarded_object(obj));
  
-    assert((unsigned char*) obj >= heap_base && (unsigned char*) obj < heap_ceiling);
+    assert((unsigned char*) obj >= heap.base && (unsigned char*) obj < heap.ceiling);
     assert(obj->vt() & RESCAN_BIT);
-    Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*) (obj->vt() & ~RESCAN_BIT);
-    obj->vt() = (int) vtable;
+
+    VT32 vt = obj->vt() & ~RESCAN_BIT;
+    obj->vt() = vt;
+    Partial_Reveal_VTable *vtable = ah_to_vtable(vt);
     GC_VTable_Info *gcvt = vtable->get_gcvt();
 
     // process slots
@@ -190,34 +196,34 @@
     }
 
     if (gcvt->reference_type() != NOT_REFERENCE) {
-        Partial_Reveal_Object **ref = (Partial_Reveal_Object**)((char*)obj + global_referent_offset);
+        Slot slot((Reference*)((char*)obj + global_referent_offset));
 
-        POINTER_SIZE_INT refobj_int = (POINTER_SIZE_INT)*ref;
+        POINTER_SIZE_INT refobj_int = (POINTER_SIZE_INT)slot.read();
         POINTER_SIZE_INT refobj_unmarked = refobj_int & ~1;
         if (refobj_int != refobj_unmarked) {
             Partial_Reveal_Object *refobj = (Partial_Reveal_Object*) refobj_unmarked;
-            enqueue_reference(refobj, ref);
+            enqueue_reference(refobj, slot);
         }
     }
 
     int *offset_list = gcvt->offset_array();
     int offset;
     while ((offset = *offset_list) != 0) {
-        Partial_Reveal_Object **ref = (Partial_Reveal_Object**)((char*)obj + offset);
+        Slot slot( (Reference*)((char*)obj + offset));
         offset_list++;
 
-        POINTER_SIZE_INT refobj_int = (POINTER_SIZE_INT)*ref;
+        POINTER_SIZE_INT refobj_int = (POINTER_SIZE_INT)slot.read();
         POINTER_SIZE_INT refobj_unmarked = refobj_int & ~1;
         if (refobj_int == refobj_unmarked) continue; // not specially marked reference
         Partial_Reveal_Object *refobj = (Partial_Reveal_Object*) refobj_unmarked;
-        enqueue_reference(refobj, ref);
+        enqueue_reference(refobj, slot);
     }
 }
 
 void gc_slide_move_all() {
     unsigned char *compact_pos = heap.compaction_region_start();
     unsigned char *compact_pos_limit = heap.compaction_region_end();
-    unsigned char *next_pinned_object = heap.ceiling;
+    unsigned char *next_pinned_object = heap.compaction_region_end();
     unsigned next_pinned_object_pos = 0;
 
     prev_mark_phase = heap_mark_phase ^ 3;
@@ -261,14 +267,14 @@
         break;
     }
 
-    pinned_areas.push_back(heap.ceiling);
+    pinned_areas.push_back(heap.compaction_region_end());
 
     int *mark_words = (int*) mark_bits;
     // Searching marked bits
-    int start = (heap.compaction_region_start() - heap_base) / sizeof(void*) / sizeof(int) / 8;
-    int end = (heap.compaction_region_end() - heap_base + sizeof(void*) + sizeof(int) * 8 - 1) / sizeof(void*) / sizeof(int) / 8;
-    if (end > mark_bits_size/4) end = mark_bits_size/4;
-    for(int i = start; i < end; i++) {
+    unsigned start = (unsigned)(heap.compaction_region_start() - heap_base) / GC_OBJECT_ALIGNMENT / sizeof(int) / 8;
+    unsigned end = (unsigned)(heap.compaction_region_end() - heap_base + GC_OBJECT_ALIGNMENT * sizeof(int) * 8 - 1) / GC_OBJECT_ALIGNMENT / sizeof(int) / 8;
+    if (end > mark_bits_size/sizeof(int)) end = mark_bits_size/sizeof(int);
+    for(unsigned i = start; i < end; i++) {
         // no marked bits in word - skip
 
         int word = mark_words[i];
@@ -276,12 +282,12 @@
 
         for(int bit = 0; bit < 32; bit++) {
             if (word & 1) {
-                unsigned char *pos = heap_base + i * 32 * 4 + bit * 4;
+                unsigned char *pos = heap_base + i * 8 * GC_OBJECT_ALIGNMENT * sizeof(int) + bit * GC_OBJECT_ALIGNMENT;
                 Partial_Reveal_Object *obj = (Partial_Reveal_Object*) pos;
 
-                int vt = obj->vt();
+                VT32 vt = obj->vt();
                 bool post_processing = vt & RESCAN_BIT;
-                Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*)(vt & ~RESCAN_BIT);
+                Partial_Reveal_VTable *vtable = ah_to_vtable(vt & ~RESCAN_BIT);
                 int size = get_object_size(obj, vtable->get_gcvt());
 
                 assert(is_object_marked(obj));
@@ -289,7 +295,7 @@
 
                 if ((unsigned char*)obj != next_pinned_object) {
 
-                    // 4 bytes reserved for hash
+                    // 4/8 bytes reserved for hash
                     while (compact_pos + size > compact_pos_limit) {
                         assert(pinned_areas_pos < pinned_areas.size());
                         compact_pos = pinned_areas[pinned_areas_pos];
@@ -302,10 +308,10 @@
                     if (compact_pos >= pos) {
                         newobj = obj;
                         process_reference_queue(obj, obj);
-                        int info = obj->obj_info();
+                        unsigned info = obj->obj_info();
                         if (compact_pos == pos) {
-                            assert(HASHCODE_IS_ALLOCATED_BIT == 4);
-                            compact_pos += size + (info & HASHCODE_IS_ALLOCATED_BIT);
+                            compact_pos += size +
+                                (((info & HASHCODE_IS_ALLOCATED_BIT) != 0) ? GC_OBJECT_ALIGNMENT : 0);
                         } else {
                             assert(compact_pos >= pos + size);
                         }
@@ -315,11 +321,11 @@
 
                         newobj = (Partial_Reveal_Object*) newpos;
                         process_reference_queue(newobj, obj);
-                        int info = obj->obj_info();
+                        unsigned info = obj->obj_info();
 
                         if (info & HASHCODE_IS_SET_BIT) {
-                            size += 4;
-                            compact_pos += 4;
+                            size += GC_OBJECT_ALIGNMENT;
+                            compact_pos += GC_OBJECT_ALIGNMENT;
                         }
 
                         if (newpos + size <= pos) {
@@ -328,7 +334,7 @@
                             memmove(newpos, pos, size);
                         }
                         if (info & HASHCODE_IS_SET_BIT && !(info & HASHCODE_IS_ALLOCATED_BIT)) {
-                            *(int*)(newpos + size - 4) = gen_hashcode(pos);
+                            *(int*)(newpos + size - GC_OBJECT_ALIGNMENT) = gen_hashcode(pos);
                             newobj->obj_info() |= HASHCODE_IS_ALLOCATED_BIT;
                         }
                     }
@@ -352,12 +358,12 @@
         }
     }
     assert(next_pinned_object >= heap.compaction_region_end());
-    pinned_areas.pop_back(); //heap.ceiling
+    pinned_areas.pop_back(); //heap.compaction_region_end()
 
     TRACE2("gc.mem", "compaction: region size = "
             << (heap.compaction_region_end() - heap.compaction_region_start()) / 1024 / 1024 << " mb");
     TRACE2("gc.mem", "compaction: free_space = "
-            << (heap.ceiling - compact_pos) / 1024 / 1024 << " mb");
+            << (heap.compaction_region_end() - compact_pos) / 1024 / 1024 << " mb");
 
     cleaning_needed = true;
     heap.pos = compact_pos;
@@ -371,7 +377,7 @@
     old_pinned_areas_pos = 1;
 }
 
-static void slide_process_object(Partial_Reveal_Object *obj, Boolean is_pinned);
+static void slide_process_object(Partial_Reveal_Object *obj);
 
 static inline void 
 slide_scan_array_object(Partial_Reveal_Object *array, Partial_Reveal_VTable *vtable, int vector_length)
@@ -382,28 +388,28 @@
 
     int32 array_length = vector_length; //vector_get_length((Vector_Handle) array);
 
-    Partial_Reveal_Object **refs = (Partial_Reveal_Object**) vector_get_element_address_ref ((Vector_Handle) array, 0);
+    Reference *refs = (Reference*) vector_get_element_address_ref ((Vector_Handle) array, 0);
 
     if (is_compaction_object(array)) {
         bool rescan = false;
         for(int i = 0; i < array_length; i++) {
-            Partial_Reveal_Object **ref = &refs[i];
-            Partial_Reveal_Object *refobj = *ref;
-            if (!refobj) continue;
+            Slot slot(refs + i);
+            Partial_Reveal_Object *refobj = slot.read();
+            if (refobj == heap_null) continue;
 
             if (mark_object(refobj)) {
-                slide_process_object(refobj, false);
+                slide_process_object(refobj);
             } else if (is_forwarded_object(refobj)) {
-                update_forwarded_reference(refobj, ref);
+                update_forwarded_reference(refobj, slot);
                 continue;
             }
 
             if (is_compaction_object(refobj)) {
-                if (is_left_object(refobj, ref)) {
-                    enqueue_reference(refobj, ref);
+                if (is_left_object(refobj, slot)) {
+                    enqueue_reference(refobj, slot);
                 } else {
                     // mark_rescan_reference
-                    *ref = (Partial_Reveal_Object*) ((size_t)refobj | 1);
+                    slot.write( (Partial_Reveal_Object*) ((size_t)refobj | 1) );
                     rescan = true;
                 }
             }
@@ -411,36 +417,35 @@
         if (rescan) set_rescan_bit(array);
     } else {
         for(int i = 0; i < array_length; i++) {
-            Partial_Reveal_Object **ref = &refs[i];
-            Partial_Reveal_Object *refobj = *ref;
-            if (!refobj) continue;
+            Slot slot(refs + i);
+            Partial_Reveal_Object *refobj = slot.read();
+            if (refobj == heap_null) continue;
 
             if (mark_object(refobj)) {
-                slide_process_object(refobj, false);
+                slide_process_object(refobj);
             } else if (is_forwarded_object(refobj)) {
-                update_forwarded_reference(refobj, ref);
+                update_forwarded_reference(refobj, slot);
                 continue;
             }
 
             if (is_compaction_object(refobj)) {
-                enqueue_reference(refobj, ref);
+                enqueue_reference(refobj, slot);
             }
         }
     }
 }
 
-static void slide_process_object(Partial_Reveal_Object *obj, Boolean is_pinned) {
+static void slide_process_object(Partial_Reveal_Object *obj) {
 
-    assert(!is_pinned);
     assert(obj);
-    assert((unsigned char*) obj >= heap_base && (unsigned char*) obj < heap_ceiling);
+    assert((unsigned char*) obj >= heap.base && (unsigned char*) obj < heap.ceiling);
     assert(is_object_marked(obj));
     //assert(mark_bit_is_set(obj) || !is_compaction_object(obj));
 
-    int vt = obj->vt();
+    unsigned vt = obj->vt();
     assert(obj->vt() & ~RESCAN_BIT); // has vt
 
-    Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*) (vt & ~RESCAN_BIT);
+    Partial_Reveal_VTable *vtable = ah_to_vtable(vt & ~RESCAN_BIT);
     GC_VTable_Info *gcvt = vtable->get_gcvt();
 
     // process slots
@@ -481,25 +486,25 @@
         bool rescan = false;
         int offset;
         while ((offset = *offset_list) != 0) {
-            Partial_Reveal_Object **ref = (Partial_Reveal_Object**)((char*)obj + offset);
-            Partial_Reveal_Object *refobj = *ref;
+            Slot slot((Reference*)((char*)obj + offset));
+            Partial_Reveal_Object *refobj = slot.read();
             offset_list++;
 
-            if (!refobj) continue;
+            if (refobj == heap_null) continue;
 
             if (mark_object(refobj)) {
                 objects.push_back(refobj);
             } else if (is_forwarded_object(refobj)) {
-                update_forwarded_reference(refobj, ref);
+                update_forwarded_reference(refobj, slot);
                 continue;
             }
 
             if (is_compaction_object(refobj)) {
-                if (is_left_object(refobj, ref)) {
-                    enqueue_reference(refobj, ref);
+                if (is_left_object(refobj, slot)) {
+                    enqueue_reference(refobj, slot);
                 } else {
                     // mark_rescan_reference
-                    *ref = (Partial_Reveal_Object*) ((size_t)refobj | 1);
+                    slot.write( (Partial_Reveal_Object*) ((size_t)refobj | 1) );
                     rescan = true;
                 }
             }
@@ -508,79 +513,54 @@
     } else {
         int offset;
         while ((offset = *offset_list) != 0) {
-            Partial_Reveal_Object **ref = (Partial_Reveal_Object**)((char*)obj + offset);
-            Partial_Reveal_Object *refobj = *ref;
+            Slot slot((Reference*)((char*)obj + offset));
+            Partial_Reveal_Object *refobj = slot.read();
             offset_list++;
 
-            if (!refobj) continue;
+            if (refobj == heap_null) continue;
 
             if (mark_object(refobj)) {
                 objects.push_back(refobj);
             } else if (is_forwarded_object(refobj)) {
-                update_forwarded_reference(refobj, ref);
+                update_forwarded_reference(refobj, slot);
                 continue;
             }
 
             if (is_compaction_object(refobj)) {
-                enqueue_reference(refobj, ref);
+                enqueue_reference(refobj, slot);
             }
         }
     }
 
 }
 
-static void gc_slide_add_root_set_entry_internal(Partial_Reveal_Object **ref, Boolean is_pinned) {
+void gc_slide_add_root_set_entry(Slot slot) {
     // get object
-    Partial_Reveal_Object *refobj = *ref;
+    Partial_Reveal_Object *refobj = slot.read();
 
     // check no garbage
-    assert(((int)refobj & 3) == 0);
+    assert(((POINTER_SIZE_INT)refobj & 3) == 0);
 
     // empty references is not interesting
-    if (!refobj) return;
-    assert(!is_pinned); // no pinning allowed for now
+    if (refobj == heap_null) return;
 
     if (mark_object(refobj)) {
         // object wasn't marked yet
-        slide_process_object(refobj, is_pinned);
+        slide_process_object(refobj);
     } else if (is_forwarded_object(refobj)) {
-        update_forwarded_reference(refobj, ref);
+        update_forwarded_reference(refobj, slot);
         goto skip;
     }
 
     if (is_compaction_object(refobj)) {
-        enqueue_reference(refobj, ref);
+        enqueue_reference(refobj, slot);
     }
 skip:
 
     while (true) {
         if (objects.empty()) break;
         Partial_Reveal_Object *obj = objects.pop_back();
-        slide_process_object(obj, false);
-    }
-}
-
-void gc_slide_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) {
-    //TRACE2("gc.enum", "gc_add_root_set_entry");
-    gc_slide_add_root_set_entry_internal((Partial_Reveal_Object**)ref, is_pinned);
-}
-
-void gc_slide_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned)
-{
-    InteriorPointer ip;
-    ip.obj = (Partial_Reveal_Object*) (*(unsigned char**)slot - offset);
-    ip.interior_ref = (Partial_Reveal_Object**)slot;
-    ip.offset = offset;
-    InteriorPointer& ips = comp_interior_pointers.push_back(ip);
-    gc_slide_add_root_set_entry_internal((Partial_Reveal_Object**)&ips.obj, is_pinned);
-}
-
-void gc_process_interior_pointers() {
-    fast_list<InteriorPointer,256>::iterator begin = comp_interior_pointers.begin();
-    fast_list<InteriorPointer,256>::iterator end = comp_interior_pointers.end();
-
-    for(fast_list<InteriorPointer,256>::iterator i = begin; i != end; ++i) {
-        *(*i).interior_ref = (Partial_Reveal_Object*)((unsigned char*)(*i).obj + (*i).offset);
+        slide_process_object(obj);
     }
 }
 
@@ -589,13 +569,13 @@
             i != array.end(); ++i) {
         Partial_Reveal_Object *obj = *i;
 
-        Partial_Reveal_Object **ref = 
-            (Partial_Reveal_Object**) ((unsigned char *)obj + global_referent_offset);
-        Partial_Reveal_Object* refobj = *ref;
+        Slot slot(
+            (Reference*) ((unsigned char *)obj + global_referent_offset));
+        Partial_Reveal_Object* refobj = slot.read();
 
         if (refobj == 0) {
             // reference already cleared, no post processing needed
-            *i = 0;
+            *i = heap_null;
             continue;
         }
 
@@ -603,31 +583,32 @@
             //assert(mark_bit_is_set(refobj) || !is_compaction_object(refobj) || is_forwarded_object(refobj));
 
             if (is_forwarded_object(refobj)) {
-                update_forwarded_reference(refobj, ref);
+                update_forwarded_reference(refobj, slot);
             } else if (is_compaction_object(refobj)) {
-                if (is_left_object(refobj, ref) || !is_compaction_object(obj)) {
-                    enqueue_reference(refobj, ref);
+                if (is_left_object(refobj, slot) || !is_compaction_object(obj)) {
+                    enqueue_reference(refobj, slot);
                 } else {
                     // mark_rescan_reference
-                    *ref = (Partial_Reveal_Object*) ((size_t)refobj | 1);
+                    slot.write( (Partial_Reveal_Object*) ((size_t)refobj | 1) );
                     set_rescan_bit(obj);
                 }
             }
 
             // no post processing needed
-            *i = 0;
+            *i = heap_null;
             continue;
         } else {
             //assert(!mark_bit_is_set(refobj));
         }
 
         // object not marked, clear reference
-        *ref = (Partial_Reveal_Object*)0;
+        slot.write((Partial_Reveal_Object*) heap_null);
+        Slot root = make_direct_root(&*i);
 
         if (is_forwarded_object(obj)) {
-            update_forwarded_reference(obj, &*i);
+            update_forwarded_reference(obj, root);
         } else if (is_compaction_object(obj)) {
-            enqueue_reference(obj, &*i);
+            enqueue_reference(obj, root);
         }
     }
 }
@@ -637,8 +618,8 @@
             i != array.end(); ++i) {
         Partial_Reveal_Object *obj = *i;
 
-        if (!obj) continue;
-        vm_enqueue_reference((Managed_Object_Handle*)obj);
+        if (obj == heap_null) continue;
+        vm_enqueue_reference((Managed_Object_Handle)obj);
     }
 }
 
@@ -646,7 +627,7 @@
 // all previous references are processed in copying collector
 // so will not move, they can be considered as root references here
 
-void gc_slide_process_transitional_slots(fast_list<Partial_Reveal_Object**,65536>& slots) {
+void gc_slide_process_transitional_slots(fast_list<Slot,65536>& slots) {
     // also process pinned objects all but last
     pinned_areas_unsorted_t::iterator end = --(--pinned_areas_unsorted.end());
     for(pinned_areas_unsorted_t::iterator i = pinned_areas_unsorted.begin();
@@ -660,13 +641,14 @@
 
     while (true) {
         if (slots.empty()) break;
-        Partial_Reveal_Object **ref = slots.pop_back();
-        gc_slide_add_root_set_entry_internal(ref, false);
+        Slot slot = slots.pop_back();
+        gc_slide_add_root_set_entry(slot);
     }
 }
-void gc_slide_process_transitional_slots(Partial_Reveal_Object **refs, int pos, int length) {
+
+void gc_slide_process_transitional_slots(Reference *refs, int pos, int length) {
     for(int i = pos; i < length; i++) {
-        Partial_Reveal_Object **ref = &refs[i];
-        gc_slide_add_root_set_entry_internal(ref, false);
+        Slot slot(refs + i);
+        gc_slide_add_root_set_entry(slot);
     }
 }

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_for_vm.cpp Mon Oct  2 19:26:48 2006
@@ -30,11 +30,17 @@
 GC_Thread_Info *thread_list;
 volatile int thread_list_lock;
 int num_threads = 0;
+Ptr vtable_base;
 
 fast_list<Partial_Reveal_Object*, 1024> finalizible_objects;
 
+#ifdef POINTER64
+GCExport Boolean gc_supports_compressed_references() {
+    vtable_base = (Ptr) vm_get_vtable_base();
+    return true;
+}
+#endif
 
-// GCExport Boolean gc_supports_compressed_references(); optional
 GCExport void gc_write_barrier(Managed_Object_Handle p_base_of_obj_with_slot) {
     TRACE2("gc.wb", "gc_write_barrier");
 }
@@ -80,8 +86,6 @@
     TRACE2("gc.init2", "gc_vm_initialized called (" << count++ << ")");
 }
 
-//GCExport void gc_add_compressed_root_set_entry(uint32 *ref, Boolean is_pinned); optional
-
 void gc_add_weak_root_set_entry(Managed_Object_Handle *slot, 
     Boolean is_pinned, Boolean is_short_weak) {
     TRACE2("gc.enum", "gc_add_weak_root_set_entry - EMPTY");
@@ -159,7 +163,7 @@
     unsigned char *next;
 
     GC_Thread_Info *info = (GC_Thread_Info *) thread_pointer;
-    Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*) ah;
+    Partial_Reveal_VTable *vtable = ah_to_vtable(ah);
     GC_VTable_Info *gcvt = vtable->get_gcvt();
     unsigned char *cleaned = info->tls_current_cleaned;
     unsigned char *res = info->tls_current_free;
@@ -168,8 +172,9 @@
         if (gcvt->is_finalizible()) return 0;
 
         info->tls_current_free =  res + in_size;
-        *(int*)res = ah;
+        *(VT32*)res = ah;
 
+        assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
         return res;
     }
 
@@ -187,8 +192,9 @@
         if (cleaned_new > ceiling) cleaned_new = ceiling;
         info->tls_current_cleaned = cleaned_new;
         memset(cleaned, 0, cleaned_new - cleaned);
-        *(int*)res = ah;
+        *(VT32*)res = ah;
 
+        assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
         return res;
     }
 
@@ -203,7 +209,7 @@
     assert (ah);
 
     GC_Thread_Info *info = (GC_Thread_Info *) thread_pointer;
-    Partial_Reveal_VTable *vtable = (Partial_Reveal_VTable*) ah;
+    Partial_Reveal_VTable *vtable = ah_to_vtable(ah);
     GC_VTable_Info *gcvt = vtable->get_gcvt();
     unsigned char *res = info->tls_current_free;
     unsigned char *cleaned = info->tls_current_cleaned;
@@ -212,8 +218,9 @@
 
         if (res + in_size <= cleaned) {
             info->tls_current_free =  res + in_size;
-            *(int*)res = ah;
+            *(VT32*)res = ah;
 
+            assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
             return res;
         }
 
@@ -230,7 +237,8 @@
             info->tls_current_cleaned = cleaned_new;
             memset(cleaned, 0, cleaned_new - cleaned);
 
-            *(int*)res = ah;
+            *(VT32*)res = ah;
+            assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
             return (Managed_Object_Handle)res;
         }
     }
@@ -256,7 +264,8 @@
             memset(obj, 0, size);
             finalizible_objects.push_back((Partial_Reveal_Object*) obj);
             vm_gc_unlock_enum();
-            *(int*)obj = ah;
+            *(VT32*)obj = ah;
+            assert(((POINTER_SIZE_INT)obj & (GC_OBJECT_ALIGNMENT - 1)) == 0);
             return (Managed_Object_Handle)obj;
         }
 
@@ -266,8 +275,7 @@
 
         if (res + size <= info->tls_current_ceiling) {
             unsigned char *next;
-            info->tls_current_free = next = info->tls_current_free + in_size;
-            assert(!((POINTER_SIZE_INT)res & 3));
+            info->tls_current_free = next = info->tls_current_free + size;
             finalizible_objects.push_back((Partial_Reveal_Object*) res);
 
             if (cleaned < next) {
@@ -275,7 +283,8 @@
                 info->tls_current_cleaned = next;
             }
             vm_gc_unlock_enum();
-            *(int*)res = ah;
+            *(VT32*)res = ah;
+            assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
             return (Managed_Object_Handle)res;
         }
     }
@@ -301,9 +310,10 @@
         }
         vm_gc_unlock_enum();
         if (cleaning_needed) memset(res, 0, size);
-        *(int*)res = ah; // NOTE: object partially initialized, should not be moved!!
+        *(VT32*)res = ah; // NOTE: object partially initialized, should not be moved!!
                          //       problems with arrays
                          //       no way to call vm_hint_finalize() here
+        assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
         return res;
     }
 
@@ -317,7 +327,8 @@
         // chunk is not expired yet, reuse it
         vm_gc_unlock_enum();
         if (cleaning_needed) memset(res, 0, size);
-        *(int*)res = ah;
+        *(VT32*)res = ah;
+        assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
         return (Managed_Object_Handle)res;
     }
 
@@ -332,7 +343,8 @@
     vm_gc_unlock_enum();
     if (cleaning_needed) memset(res, 0, size);
 
-    *(int*)res = ah;
+    *(VT32*)res = ah;
+    assert(((POINTER_SIZE_INT)res & (GC_OBJECT_ALIGNMENT - 1)) == 0);
     return (Managed_Object_Handle)res;
 }
 
@@ -407,6 +419,7 @@
 }
 
 void gc_pin_object (Managed_Object_Handle* p_object) {
+#if 0
     // FIXME: overflow check and handling
     Partial_Reveal_Object *obj = *(Partial_Reveal_Object**) p_object;
 
@@ -421,9 +434,11 @@
         if (old_value == value) return;
         value = old_value;
     }
+#endif
 }
 
 void gc_unpin_object (Managed_Object_Handle* p_object) {
+#if 0
     Partial_Reveal_Object *obj = *(Partial_Reveal_Object**) p_object;
     assert((obj->obj_info_byte() & OBJECT_IS_PINNED_BITS) != 0);
 
@@ -434,17 +449,23 @@
         if (old_value == value) return;
         value = old_value;
     }
+#endif
 }
 
 Boolean gc_is_object_pinned (Managed_Object_Handle p_object) {
     Partial_Reveal_Object *obj = (Partial_Reveal_Object*) p_object;
+    assert ((obj->obj_info_byte() & OBJECT_IS_PINNED_INCR) == 0);
+    return false;
+#if 0
+    Partial_Reveal_Object *obj = (Partial_Reveal_Object*) p_object;
     return (obj->obj_info_byte() & OBJECT_IS_PINNED_INCR) != 0;
+#endif
 }
 
 int32 gc_get_hashcode(Managed_Object_Handle p_object) {
     Partial_Reveal_Object *obj = (Partial_Reveal_Object*) p_object;
     if (!obj) return 0;
-    assert((unsigned char*)obj >= heap_base && (unsigned char*)obj < heap_ceiling);
+    assert((unsigned char*)obj >= heap.base && (unsigned char*)obj < heap.ceiling);
     assert(obj->vtable());
     unsigned char info = obj->obj_info_byte();
     // FIXME: atomic ops need to keep pinning work?
@@ -465,7 +486,6 @@
     return hash;
 }
 
-
 Managed_Object_Handle gc_get_next_live_object(void *iterator) {
     TRACE2("gc.iter", "gc_get_next_live_object - NOT IMPLEMENTED");
     abort();
@@ -477,10 +497,10 @@
 }
 
 void *gc_heap_base_address() {
-    return (void*) heap_base;
+    return (void*) heap.base;
 }
 void *gc_heap_ceiling_address() {
-    return (void*) heap_ceiling;
+    return (void*) (heap.base + heap.max_size);
 }
 
 void gc_finalize_on_exit() {

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/gc_types.h Mon Oct  2 19:26:48 2006
@@ -28,10 +28,25 @@
 #include <list>
 #include <open/vm.h>
 #include <open/vm_gc.h>
+#include <open/gc.h>
 #include <port_vmem.h>
 #include <apr_time.h>
 #include <apr_atomic.h>
 #include <cxxlog.h>
+#include "slot.h"
+
+static char* gc_version_string() {
+#if (defined _DEBUG) || ! (defined NDEBUG)
+#define BUILD_MODE "debug"
+#else
+#define BUILD_MODE "release"
+#endif
+#ifndef __TIMESTAMP__
+#define __TIMESTAMP__
+#endif /* TIMESTAMP */
+//    return "GC v4.1 " __TIMESTAMP__ " (" BUILD_MODE ")";
+    return "GC v4.1 " __TIMESTAMP__ " (" BUILD_MODE ")";
+}
 
 /// obtains a spinlock.
 inline void spin_lock(volatile int* lock) {
@@ -79,15 +94,8 @@
     GC_Thread_Info **prev;
 } GC_Thread_Info;
 
-#define FORWARDING_BIT 1
-#define RESCAN_BIT 2
-#define GC_OBJECT_MARK_BIT_MASK 0x00000080
-#define MARK_BITS 3
-
-#define HASHCODE_IS_ALLOCATED_BIT 4
-#define HASHCODE_IS_SET_BIT 8
-#define OBJECT_IS_PINNED_BITS (7 << 4)
-#define OBJECT_IS_PINNED_INCR (1 << 4)
+// Heap layout
+#define RESERVED_FOR_HEAP_NULL (4 * 32)
 
 // FLAGS
 extern const char *lp_hint; // Use large pages
@@ -111,7 +119,7 @@
     unsigned size_and_ref_type;
 
     // Methods
-    unsigned flags() { return (int)this; }
+    POINTER_SIZE_INT flags() { return (POINTER_SIZE_INT)this; }
     GC_VTable_Info *ptr() {
         assert(!is_array());
         return (GC_VTable_Info*) ((POINTER_SIZE_INT)this & ~GC_VT_FLAGS);
@@ -129,6 +137,7 @@
 };
 
 typedef POINTER_SIZE_INT GC_VT;
+typedef uint32 VT32;
 
 typedef struct Partial_Reveal_VTable {
 private:
@@ -140,21 +149,27 @@
 
 } Partial_Reveal_VTable;
 
+
 class Partial_Reveal_Object {
     private:
     Partial_Reveal_Object();
-    int vt_raw;
-    int info;
+    VT32 vt_raw;
+    unsigned info;
     int array_len;
 
     public:
-    int &vt() { assert(/* alignment check */ !((int)this & 3)); return vt_raw; }
-    int &obj_info() { assert(/* alignment check */ !((int)this & 3)); return info; }
+    VT32 &vt() { assert(/* alignment check */ !((POINTER_SIZE_INT)this & (GC_OBJECT_ALIGNMENT - 1))); return vt_raw; }
+    unsigned &obj_info() { assert(/* alignment check */ !((POINTER_SIZE_INT)this & (GC_OBJECT_ALIGNMENT - 1))); return info; }
     unsigned char &obj_info_byte() { return *(unsigned char*)&obj_info(); }
 
     Partial_Reveal_VTable *vtable() {
+#ifdef POINTER64
+        assert(!(vt() & FORWARDING_BIT));
+        return ah_to_vtable(vt());
+#else
         assert(!(vt() & FORWARDING_BIT));
         return (Partial_Reveal_VTable*) vt();
+#endif
     }
 
     int array_length() { return array_len; }
@@ -165,6 +180,16 @@
         return (Partial_Reveal_Object**)
             ((unsigned char*) this + (gcvt->flags() >> GC_VT_ARRAY_FIRST_SHIFT));
     }
+
+#if _DEBUG
+    void valid() {
+        assert((vt() & FORWARDING_BIT) == 0);
+        Class_Handle c = allocation_handle_get_class(vt());
+        assert(class_get_allocation_handle(c) == vt());
+    }
+#else
+    void valid() {}
+#endif
 };
 
 
@@ -174,7 +199,7 @@
     unsigned f = flags();
     unsigned element_shift = f >> GC_VT_ARRAY_ELEMENT_SHIFT;
     unsigned first_element = element_shift >> (GC_VT_ARRAY_FIRST_SHIFT - GC_VT_ARRAY_ELEMENT_SHIFT);
-    return (first_element + (length << (element_shift & GC_VT_ARRAY_ELEMENT_MASK)) + 3) & ~3;
+    return (first_element + (length << (element_shift & GC_VT_ARRAY_ELEMENT_MASK)) + (GC_OBJECT_ALIGNMENT - 1)) & ~(GC_OBJECT_ALIGNMENT - 1);
 }
 
 static inline int get_object_size(Partial_Reveal_Object *obj, GC_VTable_Info *gcvt) {
@@ -209,8 +234,6 @@
     return (size + m/2-1)/m;
 }
 
-typedef unsigned char* Ptr;
-
 struct OldObjects {
     Ptr end;
     Ptr pos;
@@ -228,6 +251,10 @@
     Ptr pos; // current allocation position
     Ptr pos_limit; // end of continuous allocation region
 
+    Ptr roots_start;
+    Ptr roots_pos;
+    Ptr roots_end;
+
     Ptr compaction_region_start() { return old_objects.end; }  // compaction region
     Ptr compaction_region_end() { return ceiling; }
 
@@ -254,10 +281,9 @@
 
 // GLOBALS
 extern Ptr heap_base;
-extern Ptr heap_ceiling;
 
 extern int pending_finalizers;
-extern int chunk_size;
+extern uint32 chunk_size;
 extern bool cleaning_needed;
 extern std::vector<unsigned char*> pinned_areas;
 extern unsigned pinned_areas_pos;
@@ -324,7 +350,7 @@
     assert((hash & ~0x7e) == 0x3a00);
 }
 #else /* DEBUG_HASHCODE */
-inline int gen_hashcode(void *addr) { return (int)addr; }
+inline int gen_hashcode(void *addr) { return (int)(POINTER_SIZE_INT)addr; }
 inline void check_hashcode(int hash) {}
 #endif /* DEBUG_HASHCODE */
 

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/init.cpp Mon Oct  2 19:26:48 2006
@@ -32,6 +32,7 @@
 #include "gc_types.h"
 #include "cxxlog.h"
 #include "timer.h"
+#include "apr_time.h"
 #ifndef _WIN32
 #include <sys/mman.h>
 #endif
@@ -41,14 +42,14 @@
 unsigned int heap_mark_phase;
 
 HeapSegment heap;
-int chunk_size;
+uint32 chunk_size;
 
 int pending_finalizers = false;
 
 #define RESERVED_FOR_LAST_HASH 4
 
 #define MB * (1024 * 1024)
-int HEAP_SIZE_DEFAULT = 256 MB;
+size_t HEAP_SIZE_DEFAULT = 256 MB;
 
 unsigned int prev_mark_phase;
 bool cleaning_needed = false;
@@ -57,16 +58,15 @@
 int64 timer_start;
 int64 timer_dt;
 Ptr heap_base;
-Ptr heap_ceiling;
 size_t max_heap_size;
 size_t min_heap_size;
 bool ignore_finalizers = false;
 bool remember_root_set = false;
 const char *lp_hint = NULL;
 
-static long parse_size_string(const char* size_string) {
+static size_t parse_size_string(const char* size_string) {
     size_t len = strlen(size_string);
-    int unit = 1;
+    size_t unit = 1;
     if (tolower(size_string[len - 1]) == 'k') {
         unit = 1024;
     } else if (tolower(size_string[len - 1]) == 'm') {
@@ -74,8 +74,8 @@
     } else if (tolower(size_string[len - 1]) == 'g') {
         unit = 1024 * 1024 * 1024;
     }
-    long size = atol(size_string);
-    long res = size * unit;
+    size_t size = atol(size_string);
+    size_t res = size * unit;
     if (res / unit != size) {
         // overflow happened
         return 0;
@@ -105,13 +105,13 @@
 
 static void parse_configuration_properties() {
     max_heap_size = HEAP_SIZE_DEFAULT;
-    min_heap_size = 8 MB;
+    min_heap_size = 16 MB;
     if (is_property_set("gc.mx")) {
         max_heap_size = parse_size_string(vm_get_property_value("gc.mx"));
 
-        if (max_heap_size < 8 MB) {
+        if (max_heap_size < 16 MB) {
             INFO("max heap size is too small: " << max_heap_size);
-            max_heap_size = 8 MB;
+            max_heap_size = 16 MB;
         }
         if (0 == max_heap_size) {
             INFO("wrong max heap size");
@@ -119,15 +119,15 @@
         }
 
         min_heap_size = max_heap_size / 10;
-        if (min_heap_size < 8 MB) min_heap_size = 8 MB;
+        if (min_heap_size < 16 MB) min_heap_size = 16 MB;
     }
 
     if (is_property_set("gc.ms")) {
         min_heap_size = parse_size_string(vm_get_property_value("gc.ms"));
 
-        if (min_heap_size < 1 MB) {
+        if (min_heap_size < 16 MB) {
             INFO("min heap size is too small: " << min_heap_size);
-            min_heap_size = 1 MB;
+            min_heap_size = 16 MB;
         }
 
         if (0 == min_heap_size)
@@ -139,6 +139,17 @@
         max_heap_size = min_heap_size;
     }
 
+#ifdef POINTER64
+        size_t max_compressed = (4096 * (size_t) 1024 * 1024);
+        if (max_heap_size > max_compressed) {
+            INFO("maximum heap size is limited"
+                    " to 4 Gb due to pointer compression");
+            max_heap_size = max_compressed;
+            if (min_heap_size > max_heap_size)
+                min_heap_size = max_heap_size;
+        }
+#endif
+
 
     if (is_property_set("gc.lp")) {
         lp_hint = vm_get_property_value("gc.lp");
@@ -147,12 +158,8 @@
     if (is_property_set("gc.type"))
         gc_algorithm = get_property_value_int("gc.type");
 
-#if (defined _DEBUG) || ! (defined NDEBUG)
-    char *build_mode = " (debug)";
-#else
-    char *build_mode = " (release)";
-#endif
-    INFO("gc 4.1" << build_mode);
+    // version
+    INFO(gc_version_string());
     INFO("GC type = " << gc_algorithm);
 
     if (get_property_value_boolean("gc.ignore_finalizers", false)) {
@@ -174,13 +181,29 @@
 }
 
 #ifdef _WIN32
-static inline void *reserve_mem(long size) {
+static inline void *reserve_mem(size_t size) {
     return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_READWRITE);
 }
 static const void* RESERVE_FAILURE = 0;
 #else
-static inline void *reserve_mem(long size) {
+static inline void *reserve_mem(size_t size) {
+#ifdef POINTER64
+    /* We have planty of address space, let's protect unaccessible part of heap
+     * to find some of bad pointers. */
+    size_t four_gig = 4 * 1024 * (size_t) 1024 * 1024;
+    size_t padding = 4 * 1024 * (size_t) 1024 * 1024;
+    void *addr = mmap(0, padding + four_gig, PROT_READ | PROT_WRITE,
+            MAP_NORESERVE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+    assert(addr != MAP_FAILED);
+    UNUSED int err = mprotect((Ptr)addr, padding, PROT_NONE);
+    assert(!err);
+    err = mprotect((Ptr)addr + padding + max_heap_size,
+                    four_gig - max_heap_size, PROT_NONE);
+    assert(!err);
+    return (Ptr)addr + padding;
+#else
     return mmap(0, max_heap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+#endif
 }
 static const void* RESERVE_FAILURE = MAP_FAILED;
 #endif
@@ -213,7 +236,7 @@
     if (heap_base == NULL) {
         heap_base = (unsigned char*) reserve_mem(max_heap_size);
         if (heap_base == RESERVE_FAILURE) {
-            long dec = 100 * 1024 * 1024;
+            size_t dec = 100 * 1024 * 1024;
             max_heap_size = max_heap_size / dec * dec;
 
             while(true) {
@@ -231,12 +254,13 @@
         ECHO("WARNING: min heap size reduced to " << mb(min_heap_size) << " Mb");
     }
 
-    heap_ceiling = heap_base + max_heap_size;
+    heap.ceiling = heap_base + min_heap_size - RESERVED_FOR_LAST_HASH;
 
     heap.base = heap_base;
     heap.size = min_heap_size;
-    heap.ceiling = heap.base + heap.size - RESERVED_FOR_LAST_HASH;
     heap.max_size = max_heap_size;
+    heap.roots_start = heap.roots_pos = heap.roots_end =
+        heap.base + heap.max_size - RESERVED_FOR_LAST_HASH;
 
 #ifdef _WIN32
     void *res;
@@ -253,6 +277,7 @@
 void gc_init() {
     INFO2("gc.init", "GC init called\n");
     init_mem();
+    init_slots();
     init_select_gc();
     gc_end = apr_time_now();
     timer_init();
@@ -311,8 +336,8 @@
     unsigned char *start = mark_bits + (heap.compaction_region_start() - heap_base) / sizeof(void*) / 8;
     unsigned char *end = mark_bits + (heap.compaction_region_end() - heap_base + sizeof(void*) * 8 - 1) / sizeof(void*) / 8;
     int page = 4096; // FIXME
-    mark_bits_allocated_start = (unsigned char*)((int)start & ~(page - 1));
-    mark_bits_allocated_end = (unsigned char*)(((int)end + page - 1) & ~(page - 1));
+    mark_bits_allocated_start = (unsigned char*)((POINTER_SIZE_INT)start & ~(page - 1));
+    mark_bits_allocated_end = (unsigned char*)(((POINTER_SIZE_INT)end + page - 1) & ~(page - 1));
 #ifdef _WIN32
     unsigned char *res = (unsigned char*) VirtualAlloc(mark_bits_allocated_start,
             mark_bits_allocated_end - mark_bits_allocated_start, MEM_COMMIT, PAGE_READWRITE);
@@ -334,7 +359,8 @@
 
 void heap_extend(size_t size) {
     size = (size + 65535) & ~65535;
-    if (size > max_heap_size) size = max_heap_size;
+    size_t max_size = heap.max_size - (heap.roots_end - heap.roots_start);
+    if (size > max_size) size = max_size;
     if (size <= heap.size) return;
 
 #ifdef _WIN32
@@ -345,7 +371,7 @@
     unsigned char *old_ceiling = heap.ceiling;
     heap.ceiling = heap.base + heap.size - RESERVED_FOR_LAST_HASH;
 
-    if (old_ceiling == heap.pos_limit) {
+    if (heap.pos_limit == old_ceiling) {
         heap.pos_limit = heap.ceiling;
     }
     chunk_size = round_down(heap.size / (10 * num_threads),128);

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/prepare.cpp Mon Oct  2 19:26:48 2006
@@ -124,9 +124,14 @@
         int el_offset;
         for(el_offset = -1; el_size; el_size >>= 1, el_offset++);
 
+        // FIXME: use data from VM
+#ifdef _EM64T_
+        int first_element = 16;
+#else
         int first_element = (el_offset == 3) ? 16 : 12;
+#endif
 
-        int flags = GC_VT_ARRAY
+        POINTER_SIZE_INT flags = GC_VT_ARRAY
             | (el_offset << GC_VT_ARRAY_ELEMENT_SHIFT)
             | (first_element << GC_VT_ARRAY_FIRST_SHIFT);
 
@@ -143,7 +148,7 @@
     GC_VTable_Info *info = build_slot_offset_array(ch, vt, type);
     info->size_and_ref_type = class_get_boxed_data_size(ch) | (int)type;
 
-    int flags = 0;
+    POINTER_SIZE_INT flags = 0;
     if (!ignore_finalizers && class_is_finalizable(ch)) {
         flags |= GC_VT_FINALIZIBLE;
     }
@@ -153,7 +158,7 @@
         flags |= GC_VT_HAS_SLOTS;
     }
 
-    int addr = (int) info;
+    POINTER_SIZE_INT addr = (POINTER_SIZE_INT) info;
     assert((addr & 7) == 0); // required alignment
 
     flags |= addr;

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/root_set_cache.h Mon Oct  2 19:26:48 2006
@@ -18,5 +18,5 @@
  * @author Ivan Volosyuk
  */
 
-typedef fast_list<Partial_Reveal_Object**,65536> roots_vector;
+typedef fast_list<Slot,65536> roots_vector;
 extern roots_vector root_set;

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/selector.cpp Mon Oct  2 19:26:48 2006
@@ -22,25 +22,24 @@
 #include "collect.h"
 #include <math.h>
 
-void reserve_old_object_space(int size) {
-    size &= ~3;
+void reserve_old_object_space(POINTER_SIZE_SINT size) {
+    size &= ~(GC_OBJECT_ALIGNMENT - 1);
 
-    int free = heap.old_objects.end - heap.old_objects.pos;
     if (size < 0) {
         TRACE2("gc.select", "Reserve old object space: can't shrink old object space");
         return;
     }
 
     assert(heap.old_objects.end == heap.pos);
-    if (heap.old_objects.end + size > heap.ceiling) {
-        size = heap.ceiling - heap.old_objects.end;
+    if (heap.old_objects.end + size > heap.allocation_region_end()) {
+        size = heap.allocation_region_end() - heap.old_objects.end;
     }
 
     heap.old_objects.end += size;
     TRACE2("gc.select", "Reserved space = " << mb(heap.old_objects.end - heap.old_objects.pos));
 
     // balancing free areas.
-    pinned_areas.push_back(heap.ceiling);
+    pinned_areas.push_back(heap.allocation_region_end());
 
     // update heap.old_objects.pos_limit
     if (heap.old_objects.pos_limit == heap.pos) {
@@ -74,7 +73,7 @@
     heap.old_objects.end = heap.pos;
 
     // restore pinned areas.
-    pinned_areas.pop_back();
+    pinned_areas.pop_back(); // heap.allocation_region_end()
 }
 
 unsigned char *select_gc(int size) {
@@ -120,7 +119,7 @@
     TRACE2("gc.mem", "select_gc2 = " << res);
 
     if (res == 0 && heap.size != heap.max_size) {
-        assert(heap.pos_limit == heap.ceiling);
+        assert(heap.pos_limit == heap.allocation_region_end());
         heap_extend(round_up(heap.size + size, 65536));
         if (heap.pos + size <= heap.pos_limit) {
             res = heap.pos;
@@ -167,20 +166,25 @@
 bool need_compaction_next_gc() {
     if (heap.working_set_size == 0 || !gc_adaptive) {
         TRACE2("gc.adaptive", "static Smin analisis");
-        return heap.ceiling - heap.pos < heap.size * 0.7f;
+        return heap.allocation_region_end() - heap.pos < heap.size * 0.7f;
     } else {
-        float smin = Smin(heap.size - heap.working_set_size,
+        float smin = Smin(heap.roots_start - heap.base - RESERVED_FOR_HEAP_NULL - heap.working_set_size,
                 heap.Tcompact, heap.Tcopy, heap.dS_copy);
-        float free = (float) (heap.ceiling - heap.pos);
+        float free = (float) (heap.allocation_region_end() - heap.old_objects.pos);
+        INFO2("gc.smin", "smin = " << mb((size_t)smin)
+                << " (working set " << mb((size_t)heap.working_set_size)
+                << " Tfast " << (int)(heap.Tcopy / 1000.)
+                << " Tslow " << (int)(heap.Tcompact / 1000.)
+                << " dS " << mb((size_t)heap.dS_copy)
+                << "), free = " << mb((int)free));
         //INFO2("gc.logic", "Smin = " << (int) mb((int)smin) << "mb, free = " << mb((int)free) << " mb");
         return free < smin;
-            
     }
 }
 
 static void check_heap_extend() {
-    int free_space = heap.allocation_region_end() - heap.allocation_region_start();
-    int used_space = heap.size - free_space;
+    size_t free_space = heap.allocation_region_end() - heap.allocation_region_start();
+    size_t used_space = heap.size - free_space;
 
     if (free_space < used_space) {
         size_t new_heap_size = used_space * 8;
@@ -198,6 +202,7 @@
 }
 
 size_t correction;
+Ptr prev_alloc_start;
 
 static void update_evacuation_area() {
     POINTER_SIZE_SINT free = heap.allocation_region_end() - heap.allocation_region_start();
@@ -210,10 +215,16 @@
         return;
     }
 
+    POINTER_SIZE_SINT dS = heap.old_objects.pos - prev_alloc_start;
+    if (prev_alloc_start != 0) {
+        heap.dS_copy = (float)dS;
+    }
+    prev_alloc_start = heap.old_objects.pos;
+
     if (need_compaction_next_gc()) {
         //INFO2("gc.logic", "compaction triggered by Smin");
         heap.next_gc = GC_FULL;
-        heap.dS_copy = 0;
+        prev_alloc_start = 0;
         return;
     }
 
@@ -232,7 +243,6 @@
         return;
     }
     assert(incr > 0);
-    heap.dS_copy = (float)incr;
 
     /*INFO2("gc.logic", 
             "mb overflow = " << overflow / 1024 / 1024
@@ -283,29 +293,25 @@
 }
 
 void select_force_gc() {
+    vm_gc_lock_enum();
     if (gc_algorithm < 10) {
-        vm_gc_lock_enum();
         force_gc();
-        vm_gc_unlock_enum();
-        vm_hint_finalize();
     } else if ((gc_algorithm / 10) == 2) {
-        vm_gc_lock_enum();
         full_gc(0);
-        vm_gc_unlock_enum();
-        vm_hint_finalize();
     } else if ((gc_algorithm / 10) == 3) {
-        vm_gc_lock_enum();
+        heap.old_objects.prev_pos = heap.old_objects.pos;
         copy_gc(0);
-        vm_gc_unlock_enum();
-        vm_hint_finalize();
     }
+    vm_gc_unlock_enum();
+    vm_hint_finalize();
 }
 
 void init_select_gc() {
-    heap.old_objects.end = heap.old_objects.pos = heap.old_objects.pos_limit = heap.base;
+    heap.old_objects.end = heap.old_objects.pos = heap.old_objects.pos_limit
+        = heap.base + RESERVED_FOR_HEAP_NULL;
 
-    heap.pos = heap.base;
-    heap.pos_limit = heap.ceiling;
+    heap.pos = heap.base + RESERVED_FOR_HEAP_NULL;
+    heap.pos_limit = heap.allocation_region_end();
 
     heap.incr_abs = 0;
     heap.incr_rel = 0.2f;
@@ -315,15 +321,14 @@
     pinned_areas_pos = 1;
 
     if (gc_algorithm % 10 == 0) {
-        int reserve = heap.size / 5;
+        size_t reserve = heap.size / 5;
         reserve_old_object_space(reserve);
-        heap.predicted_pos = heap.base + reserve;
+        heap.predicted_pos = heap.base + reserve + RESERVED_FOR_HEAP_NULL;
     }
     if (gc_algorithm % 10 == 3) {
-        int reserve = heap.size / 3;
+        size_t reserve = heap.size / 3;
         reserve_old_object_space(reserve);
-        heap.predicted_pos = heap.base + reserve;
+        heap.predicted_pos = heap.base + reserve + RESERVED_FOR_HEAP_NULL;
     }
     heap.next_gc = GC_COPY;
-
 }

Modified: incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h?view=diff&rev=452293&r1=452292&r2=452293
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/gc/src/slide_compact.h Mon Oct  2 19:26:48 2006
@@ -18,17 +18,10 @@
  * @author Ivan Volosyuk
  */
 
-struct InteriorPointer {
-    Partial_Reveal_Object *obj;
-    int offset;
-    Partial_Reveal_Object **interior_ref;
-};
-
 extern fast_list<Partial_Reveal_Object*, 65536> objects;
-extern fast_list<InteriorPointer,256> interior_pointers;
 
 
-inline bool is_left_object(Partial_Reveal_Object *refobj, Partial_Reveal_Object **ref) {
-    return (void*)refobj <= (void*) ref;
+inline bool is_left_object(Partial_Reveal_Object *refobj, Slot slot) {
+    return (void*)refobj <= slot.ptr();
 }