You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by ge...@apache.org on 2005/10/05 04:20:10 UTC
svn commit: r294974 [14/25] - in
/incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm: ./ jchevm/
jchevm/doc/ jchevm/etc/ jchevm/include/ jchevm/java/ jchevm/java/org/
jchevm/java/org/dellroad/ jchevm/java/org/dellroad/jc/
jchevm/java/org/dellroad...
Added: incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/gc_scan.c
URL: http://svn.apache.org/viewcvs/incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/gc_scan.c?rev=294974&view=auto
==============================================================================
--- incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/gc_scan.c (added)
+++ incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/gc_scan.c Tue Oct 4 19:19:16 2005
@@ -0,0 +1,989 @@
+
+/*
+ * Copyright 2005 The Apache Software Foundation or its licensors,
+ * as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: gc_scan.c,v 1.17 2005/03/20 23:06:10 archiecobbs Exp $
+ */
+
+#include "libjc.h"
+
+/* Internal functions */
+static int _jc_gc_trace(_jc_env *env, _jc_trace_info *trace);
+static int _jc_gc_mark_object(_jc_trace_info *trace, _jc_object *obj);
+static jint _jc_gc_push_refs(_jc_env *env, _jc_uni_mem *uni,
+ _jc_scan_frame **framep, _jc_object **refs, int nrefs);
+static int _jc_get_explicit_refs(_jc_env *env, _jc_type **types,
+ int ntypes, _jc_object **list);
+
+/*
+ * Collect garbage.
+ *
+ * This is a "stop the world" mark-sweep collection algorithm.
+ *
+ * Because we allocate instances of java.lang.Class within the corresponding
+ * class loader's memory area instead of the heap, for the purposes of
+ * marking memory in use we treat the class loader memory area as a single
+ * blob which is either marked or not marked. The "marked bit" is contained
+ * in the corresponding _jc_class_loader structure.
+ *
+ * If unsuccessful, an exception is stored.
+ */
+jint
+_jc_gc(_jc_env *env, jboolean urgent)
+{
+ _jc_jvm *const vm = env->vm;
+ _jc_heap *const heap = &vm->heap;
+ jboolean clipped_stack = JNI_FALSE;
+ _jc_object **root_set = NULL;
+ _jc_trace_info *trace = NULL;
+ struct timeval start_time;
+ _jc_class_loader *loader;
+ char *last_small_page;
+ int root_set_length;
+ _jc_heap_sweep sweep;
+ _jc_object *obj;
+ jint status = JNI_OK;
+ _jc_env *thread;
+ jint gc_cycles;
+ int bsi;
+ int i;
+
+ /*
+ * Sanity check: if an object is recyclable and has its lockword as
+ * the first work in a heap block, then when the block is marked free
+ * it must still look like a recyclable object was there.
+ */
+ _JC_ASSERT((_JC_HEAP_BLOCK_FREE
+ & (_JC_LW_LIVE_BIT|_JC_LW_KEEP_BIT|_JC_LW_FINALIZE_BIT)) == 0);
+
+ /* Can't do GC during initial bootstrapping */
+ if (vm->initialization != NULL) {
+ _JC_EX_STORE(env, OutOfMemoryError, "gc during bootstrap");
+ return JNI_ERR;
+ }
+
+ /* Record current GC cycle count */
+ gc_cycles = vm->gc_cycles;
+
+ /* Stop the world and free any leftover thread stacks */
+ _JC_MUTEX_LOCK(env, vm->mutex);
+ _jc_stop_the_world(env);
+ _jc_free_thread_stacks(vm);
+ _JC_MUTEX_UNLOCK(env, vm->mutex);
+
+ /* If another thread just did a GC cycle, we don't need to do one */
+ if (vm->gc_cycles != gc_cycles)
+ goto done;
+
+ /* Bump GC cycle counter and visited bit for stack-allocated objects */
+ vm->gc_cycles++;
+ vm->gc_stack_visited ^= _JC_LW_VISITED_BIT;
+
+ /* Verbosity */
+ if ((vm->verbose_flags & (1 << _JC_VERBOSE_GC)) != 0) {
+ VERBOSE(GC, vm, "starting garbage collection #%d",
+ vm->gc_cycles);
+ gettimeofday(&start_time, NULL);
+ }
+
+#if 0
+ _jc_heap_check(vm);
+#endif
+
+ /* Initialize trace state */
+ if ((trace = _JC_STACK_ALLOC(env, sizeof(*trace)
+ + heap->num_sizes * sizeof(*trace->num_small_objects))) == NULL)
+ goto fail;
+ memset(trace, 0, sizeof(*trace)
+ + heap->num_sizes * sizeof(*trace->num_small_objects));
+ trace->heap = &vm->heap;
+ trace->follow_soft = !urgent;
+ trace->gc_stack_visited = vm->gc_stack_visited;
+
+ /* Reset all small page hints and "use first" page lists */
+ for (bsi = 0; bsi < heap->num_sizes; bsi++) {
+ heap->sizes[bsi].pages = NULL;
+ heap->sizes[bsi].hint = NULL;
+ }
+
+ /* Unmark all class loaders */
+ LIST_FOREACH(loader, &vm->class_loaders, link)
+ loader->gc_mark = 0;
+
+ /* Clip the Java stack for the current thread */
+ clipped_stack = _jc_stack_clip(env);
+
+ /* Compute the root set */
+ _JC_MUTEX_LOCK(env, vm->mutex);
+ root_set_length = _jc_root_walk(env, &root_set);
+ _JC_MUTEX_UNLOCK(env, vm->mutex);
+ if (root_set_length == -1)
+ goto fail;
+
+ /* Set all object bits LK -> 00 and count finalizable objects */
+ _jc_heap_sweep_init(heap, &sweep);
+ while ((obj = _jc_heap_sweep_next(&sweep, JNI_FALSE)) != NULL) {
+ obj->lockword &= ~(_JC_LW_LIVE_BIT|_JC_LW_KEEP_BIT);
+ if (_JC_LW_TEST(obj->lockword, FINALIZE))
+ trace->num_finalizable++;
+ }
+
+ /* Trace live objects starting with the root set, setting LK -> 11 */
+ trace->mark_bits = _JC_LW_LIVE_BIT | _JC_LW_KEEP_BIT;
+ trace->bottom_frame.posn = 0;
+ trace->bottom_frame.prev = NULL;
+ trace->bottom_frame.next = NULL;
+ trace->bottom_frame.lists[0].start = root_set;
+ trace->bottom_frame.lists[0].end = root_set + root_set_length;
+ if (_jc_gc_trace(env, trace) != JNI_OK)
+ goto fail;
+
+ /*
+ * If any objects are finalizable but not reachable, we must
+ * mark them and all other objects reachable from them as keepable.
+ * That is, they are reachable (via finalization) but not live.
+ */
+ _JC_ASSERT(trace->num_finalizable >= 0);
+ if (trace->num_finalizable > 0) {
+ const int num_finalizable = trace->num_finalizable;
+ _jc_object **refs;
+
+ /* Ensure the finalizer thread runs */
+ trace->wakeup_finalizer = JNI_TRUE;
+
+ /* Reset trace info and re-use root set ref list */
+ trace->mark_bits = _JC_LW_KEEP_BIT; /* note: not LIVE */
+ trace->bottom_frame.posn = 0;
+ trace->bottom_frame.prev = NULL;
+ trace->bottom_frame.next = NULL;
+ trace->bottom_frame.lists[0].start = root_set;
+ trace->bottom_frame.lists[0].end = root_set
+ + trace->num_finalizable;
+
+ /* Allocate another ref list if needed */
+ if (trace->num_finalizable > root_set_length) {
+ const int num_more_refs
+ = trace->num_finalizable - root_set_length;
+ _jc_object **more_refs;
+
+ /* Use entire root set ref list */
+ trace->bottom_frame.lists[0].end = root_set
+ + root_set_length;
+
+ /* Allocate another list for the remainder */
+ if ((more_refs = _JC_STACK_ALLOC(env,
+ + num_more_refs * sizeof(*more_refs))) == NULL)
+ goto fail;
+ trace->bottom_frame.lists[1].start = more_refs;
+ trace->bottom_frame.lists[1].end
+ = more_refs + num_more_refs;
+ trace->bottom_frame.posn++;
+ }
+
+ /*
+ * Scan heap for finalizable but not reachable objects.
+ * Fill up the first (or first two) ref lists.
+ */
+ _jc_heap_sweep_init(heap, &sweep);
+ refs = root_set;
+ while ((obj = _jc_heap_sweep_next(&sweep, JNI_FALSE)) != NULL) {
+ if ((obj->lockword
+ & (_JC_LW_LIVE_BIT|_JC_LW_FINALIZE_BIT))
+ == _JC_LW_FINALIZE_BIT) {
+ if (refs == trace->bottom_frame.lists[0].end) {
+ refs = trace->bottom_frame
+ .lists[1].start;
+ }
+ *refs++ = obj;
+ }
+ }
+ _JC_ASSERT(refs == trace->bottom_frame.lists[0].end
+ || refs == trace->bottom_frame.lists[1].end);
+
+ /*
+ * Trace finalizable-reachable objects, setting K -> 1.
+ * Note: we should not encounter any new stack allocated
+ * objects during this scan.
+ */
+ if (_jc_gc_trace(env, trace) != JNI_OK)
+ goto fail;
+
+ /* Repair count of finalizable objects */
+ _JC_ASSERT(trace->num_finalizable == 0);
+ trace->num_finalizable = num_finalizable;
+ }
+
+ /* Now recycle unreachable blocks and pages */
+ _jc_heap_sweep_init(heap, &sweep);
+ last_small_page = NULL;
+ while ((obj = _jc_heap_sweep_next(&sweep, JNI_TRUE)) != NULL) {
+ const _jc_word lockword = obj->lockword;
+
+ /* Sanity check: LIVE implies KEEP */
+ _JC_ASSERT(_JC_LW_TEST(lockword, KEEP)
+ || !_JC_LW_TEST(lockword, LIVE));
+
+ /*
+ * Keep keepable objects and free the rest. But for any
+ * keepable weak (and maybe soft) references that point to
+ * objects which are no longer live, clear the reference.
+ */
+ switch (lockword & (_JC_LW_KEEP_BIT|_JC_LW_SPECIAL_BIT)) {
+ case _JC_LW_SPECIAL_BIT:
+ case 0:
+ goto free_it;
+ case _JC_LW_KEEP_BIT:
+ goto keep_it;
+ case _JC_LW_SPECIAL_BIT | _JC_LW_KEEP_BIT:
+ {
+ _jc_object **referent;
+
+ /* Rule out non-Reference objects */
+ if (!_jc_subclass_of(obj, vm->boot.types.Reference))
+ goto keep_it;
+
+ /* Rule out referents already cleared or reachable */
+ referent = _JC_VMFIELD(vm,
+ obj, Reference, referent, _jc_object *);
+ if (*referent == NULL
+ || _JC_LW_TEST((*referent)->lockword, LIVE))
+ goto keep_it;
+
+ /*
+ * Sanity check: if we're following soft references,
+ * then a soft reference's referent must be live.
+ */
+ _JC_ASSERT(!trace->follow_soft
+ || !_jc_subclass_of(obj,
+ vm->boot.types.SoftReference));
+
+ /*
+ * Rule out phantom references if the referent
+ * object is not recyclable yet.
+ */
+ if (_jc_subclass_of(obj,
+ vm->boot.types.PhantomReference)
+ && ((*referent)->lockword
+ & (_JC_LW_FINALIZE_BIT|_JC_LW_KEEP_BIT)) != 0)
+ goto keep_it;
+
+ /* Clear the reference */
+ trace->num_refs_cleared++;
+ *referent = NULL;
+
+ /* Wakeup finalizer if reference needs enqueuing */
+ if (*_JC_VMFIELD(vm, obj,
+ Reference, queue, _jc_object *) != NULL)
+ trace->wakeup_finalizer = JNI_TRUE;
+ break;
+ }
+ }
+
+keep_it:
+ /* Update stats and mark page in use */
+ if (sweep.size != NULL) {
+ trace->num_small_objects[sweep.bsi]++;
+ if (last_small_page != sweep.page) {
+ last_small_page = sweep.page;
+ trace->num_small_pages++;
+ }
+ sweep.blocks_live = 1;
+ } else {
+ trace->num_large_objects++;
+ trace->num_large_pages += sweep.npages;
+ }
+
+ /* Leave object in the heap */
+ continue;
+
+free_it:
+ /* Sanity check */
+ _JC_ASSERT((lockword
+ & (_JC_LW_LIVE_BIT|_JC_LW_FINALIZE_BIT)) == 0);
+
+ /* Update stats */
+ trace->num_recycled_objects++;
+
+ /* Recycle fat lock */
+ if (_JC_LW_TEST(lockword, FAT)) {
+ trace->num_fat_locks_recycled++;
+ _jc_free_lock(vm, obj);
+ }
+
+ /* Mark small page block or large page range free */
+ if (sweep.size != NULL)
+ *((_jc_word *)sweep.block) = _JC_HEAP_BLOCK_FREE;
+ else {
+ for (i = 0; i < sweep.npages; i++) {
+ *_JC_PAGE_ADDR(sweep.page, i)
+ = _JC_HEAP_PAGE_FREE;
+ }
+ }
+ }
+
+loader_check:
+ /* Unload unloadable class loaders */
+ LIST_FOREACH(loader, &vm->class_loaders, link) {
+ if (loader->gc_mark)
+ continue;
+ _JC_ASSERT(loader != vm->boot.loader);
+ VERBOSE(GC, vm, "unloading class loader %s@%p (%d classes)",
+ loader->instance->type->name, loader->instance,
+ loader->defined_types.size);
+ _JC_MUTEX_LOCK(env, vm->mutex);
+ _jc_destroy_loader(vm, &loader);
+ _JC_MUTEX_UNLOCK(env, vm->mutex);
+ goto loader_check;
+ }
+
+ /* Reset next free page pointer */
+ heap->next_page = 0;
+
+ /* Mark all threads as no longer memory critical (we hope) */
+ LIST_FOREACH(thread, &vm->threads.alive_list, link)
+ thread->out_of_memory = 0;
+
+ /* Wake up finalizer thread if there is any work for it to do */
+ if (trace->wakeup_finalizer)
+ _jc_thread_interrupt_instance(vm, *vm->finalizer_thread);
+
+ /* Verbosity */
+ if ((vm->verbose_flags & (1 << _JC_VERBOSE_GC)) != 0) {
+ struct timeval finish_time;
+ int num_small_objects;
+ int num_loader_pages;
+ int num_pages;
+ char buf[80];
+ int bsi;
+
+ /* Calculate time spent during GC */
+ gettimeofday(&finish_time, NULL);
+ finish_time.tv_sec -= start_time.tv_sec;
+ finish_time.tv_usec -= start_time.tv_usec;
+ if (finish_time.tv_usec < 0) {
+ finish_time.tv_sec--;
+ finish_time.tv_usec += 1000000;
+ }
+
+ /* Sum total number of small objects */
+ num_small_objects = 0;
+ for (bsi = 0; bsi < heap->num_sizes; bsi++)
+ num_small_objects += trace->num_small_objects[bsi];
+
+ /* Sum total number of pages in use */
+ num_pages = trace->num_small_pages + trace->num_large_pages;
+ num_loader_pages = vm->max_loader_pages
+ - vm->avail_loader_pages;
+
+ /* Print summary info */
+ VERBOSE(GC, vm, "heap pages in use: %d/%d (%d%%)",
+ num_pages, heap->num_pages,
+ (num_pages * 100) / heap->num_pages);
+ VERBOSE(GC, vm, "class loader pages in use: %d/%d (%d%%)",
+ num_loader_pages, vm->max_loader_pages,
+ (num_loader_pages * 100) / vm->max_loader_pages);
+ VERBOSE(GC, vm, "number of small objects: %d in %d pages",
+ num_small_objects, trace->num_small_pages);
+ VERBOSE(GC, vm, "number of large objects: %d in %d pages",
+ trace->num_large_objects, trace->num_large_pages);
+ VERBOSE(GC, vm, "number of finalizable objects: %d",
+ trace->num_finalizable);
+ VERBOSE(GC, vm, "number of references cleared: %d",
+ trace->num_refs_cleared);
+ VERBOSE(GC, vm, "number of objects reclaimed: %d",
+ trace->num_recycled_objects);
+ VERBOSE(GC, vm, "number of fat locks recycled: %d",
+ trace->num_fat_locks_recycled);
+ VERBOSE(GC, vm, "distribution of small object sizes:");
+ for (bsi = 0; bsi < heap->num_sizes; bsi++) {
+ if (bsi % 4 == 0)
+ *buf = '\0';
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
+ "%5d:%6d", heap->sizes[bsi].size,
+ trace->num_small_objects[bsi]);
+ if ((bsi + 1) % 4 == 0 || bsi == heap->num_sizes - 1)
+ VERBOSE(GC, vm, "%s", buf);
+ }
+ VERBOSE(GC, vm, "garbage collection completed in %d.%03d sec",
+ finish_time.tv_sec, finish_time.tv_usec / 1000);
+ }
+
+#if 0
+ /* This dumps the number of objects of each type on the stack */
+ {
+ _jc_splay_tree tree;
+ _jc_type_node **nodes;
+
+ _jc_splay_init(&tree, _jc_node_cmp, _JC_OFFSETOF(_jc_type_node, node));
+ _jc_heap_sweep_init(heap, &sweep);
+ while ((obj = _jc_heap_sweep_next(&sweep, JNI_FALSE)) != NULL) {
+ _jc_type_node *node;
+ _jc_type_node key;
+
+ key.type = obj->type;
+ if ((node = _jc_splay_find(&tree, &key)) == NULL) {
+ node = _jc_vm_zalloc(env, sizeof(*node));
+ node->type = obj->type;
+ node->thread = (void *)1;
+ _jc_splay_insert(&tree, node);
+ } else
+ node->thread = (void *)((int)node->thread + 1);
+ }
+ nodes = alloca(tree.size * sizeof(*nodes));
+ _jc_splay_list(&tree, (void **)nodes);
+ static int compare(const void *v1, const void *v2) {
+ const _jc_type_node *const n1 = *(const _jc_type_node **)v1;
+ const _jc_type_node *const n2 = *(const _jc_type_node **)v2;
+ return (int)n2->thread - (int)n1->thread;
+ }
+ qsort(nodes, tree.size, sizeof(*nodes), compare);
+ for (i = 0; i < tree.size; i++) {
+ _jc_type_node *node = nodes[i];
+
+ printf("%7d %s\n", (int)node->thread, node->type->name);
+ _jc_vm_free(&node);
+ }
+ }
+#endif
+
+done:
+#if 0
+ _jc_heap_check(vm);
+#endif
+
+ /* Unclip Java stack */
+ if (clipped_stack)
+ _jc_stack_unclip(env);
+
+ /* Free root set memory */
+ _jc_vm_free(&root_set);
+
+ /* Resume the world */
+ _JC_MUTEX_LOCK(env, vm->mutex);
+ _jc_resume_the_world(env);
+ _JC_MUTEX_UNLOCK(env, vm->mutex);
+
+ /* If we woke up the finalizer, give it a chance to run */
+ if (trace != NULL && trace->wakeup_finalizer)
+ sched_yield();
+
+ /* Done */
+ return status;
+
+fail:
+ /* Restore the heap to a normal state */
+ _jc_heap_sweep_init(heap, &sweep);
+ while ((obj = _jc_heap_sweep_next(&sweep, JNI_FALSE)) != NULL)
+ obj->lockword |= _JC_LW_LIVE_BIT|_JC_LW_KEEP_BIT;
+ /* XXX need to restore stack-allocated objects XXX */
+
+ /* Report failure */
+ VERBOSE(GC, vm, "garbage collection FAILED: %s",
+ _jc_vmex_names[env->ex.num]);
+
+ /* Return failure */
+ status = JNI_ERR;
+ goto done;
+}
+
+/*
+ * Mark an object in the heap. Returns 1 if object is already marked.
+ */
+static inline int
+_jc_gc_mark_object(_jc_trace_info *trace, _jc_object *obj)
+{
+ _jc_word lockword = obj->lockword;
+
+ /* Sanity checks */
+ _JC_ASSERT(_JC_LW_TEST(lockword, ODD));
+
+ /* If object is stack-allocated, initialize it for this GC cycle */
+ if (!_JC_IN_HEAP(trace->heap, obj)) {
+
+ /* Is object already marked? */
+ if ((lockword & _JC_LW_VISITED_BIT)
+ == trace->gc_stack_visited)
+ return 1;
+
+ /* Sanity check that this is the first GC cycle */
+ _JC_ASSERT(trace->mark_bits
+ == (_JC_LW_KEEP_BIT | _JC_LW_LIVE_BIT));
+
+ /* Sanity check LK = 11 already */
+ _JC_ASSERT((lockword & (_JC_LW_KEEP_BIT | _JC_LW_LIVE_BIT))
+ == (_JC_LW_KEEP_BIT | _JC_LW_LIVE_BIT));
+
+ /* Mark object visited this GC cycle */
+ obj->lockword = lockword ^ _JC_LW_VISITED_BIT;
+ return 0;
+ }
+
+ /* Is object already marked? */
+ if (_JC_LW_TEST(lockword, KEEP))
+ return 1;
+
+ /* Keep track of finalizable but unreachable objects */
+ if (_JC_LW_TEST(lockword, FINALIZE))
+ trace->num_finalizable--;
+
+ /* Mark object */
+ obj->lockword = lockword | trace->mark_bits;
+ return 0;
+}
+
+/*
+ * Push a new list of references onto the scanning stack.
+ */
+static inline jint
+_jc_gc_push_refs(_jc_env *env, _jc_uni_mem *uni, _jc_scan_frame **framep,
+ _jc_object **refs, int nrefs)
+{
+ _jc_scan_frame *frame = *framep;
+
+ /* Find room in this, next, or new scan frame */
+ if (frame->posn == (sizeof(frame->lists) / sizeof(*frame->lists)) - 1) {
+ if (frame->next != NULL) {
+ frame = frame->next;
+ _JC_ASSERT(frame->posn == -1);
+ } else {
+ _jc_scan_frame *next;
+
+ if ((next = _jc_uni_alloc(env,
+ uni, sizeof(*next))) == NULL)
+ return JNI_ERR;
+ next->posn = -1;
+ next->prev = frame;
+ next->next = NULL;
+ frame->next = next;
+ frame = next;
+ }
+ *framep = frame;
+ }
+
+ /* Push new reference list onto current frame */
+ frame->posn++;
+ frame->lists[frame->posn].start = refs;
+ frame->lists[frame->posn].end = refs + nrefs;
+ return JNI_OK;
+}
+
+/*
+ * Scan all objects reachable from the supplied root set.
+ */
+static int
+_jc_gc_trace(_jc_env *env, _jc_trace_info *trace)
+{
+ _jc_jvm *const vm = env->vm;
+ const int referent_index
+ = -vm->boot.fields.Reference.referent->offset / sizeof(void *);
+ _jc_scan_frame *frame = &trace->bottom_frame;
+ _jc_class_loader *loader = NULL;
+ _jc_class_loader *other_loader = NULL;
+ _jc_uni_mem uni;
+
+ /* Initialize uni-allocator */
+ _JC_ASSERT(frame->next == NULL);
+ _jc_uni_alloc_init(&uni, 0, NULL);
+
+ /* Recursively scan references */
+ while (1) {
+ _jc_object *referent;
+ _jc_object *obj;
+ int nrefs;
+
+ /* Sanity check */
+ _JC_ASSERT(frame->next == NULL || frame->next->posn == -1);
+ _JC_ASSERT(frame != NULL && frame->posn >= 0);
+ _JC_ASSERT(frame->lists[frame->posn].start
+ <= frame->lists[frame->posn].end);
+
+ /* Check for end of reference list */
+ if (frame->lists[frame->posn].start
+ == frame->lists[frame->posn].end) {
+ if (frame->posn-- == 0 && (frame = frame->prev) == NULL)
+ break;
+ continue;
+ }
+
+ /*
+ * Extract the next object pointer in the reference list
+ * sitting on the top of the scan stack. Ignore null's.
+ */
+ if ((obj = *frame->lists[frame->posn].start++) == NULL)
+ continue;
+
+ /*
+ * Handle the common case of a normal (non-special) object.
+ * We need to follow these references:
+ *
+ * (a) explicit references, i.e., object reference fields
+ * (b) implicit reference to the object's class loader
+ */
+ if (!_JC_LW_TEST(obj->lockword, SPECIAL)) {
+
+ /* Mark object; if already marked, skip it */
+ if (_jc_gc_mark_object(trace, obj))
+ continue;
+
+do_normal_object:
+ /* Push object's explicit references onto the stack */
+ if ((obj->lockword & _JC_LW_REF_COUNT_MASK) != 0) {
+ nrefs = _jc_num_refs(obj);
+ if (_jc_gc_push_refs(env, &uni, &frame,
+ (_jc_object **)obj - nrefs, nrefs)
+ != JNI_OK)
+ goto fail;
+ }
+
+ /* Now handle object's implicit Class reference */
+ if (!(loader = obj->type->loader)->gc_mark)
+ goto do_class_loader;
+ continue;
+ }
+
+ /*
+ * Special case: java.lang.Class objects. They are allocated
+ * from class loader memory rather than the heap, so we don't
+ * mark them, but we do mark their associated class loaders.
+ *
+ * The Class class itself is loaded by the bootstrap loader.
+ */
+ if (obj->type == vm->boot.types.Class) {
+ _jc_type *type;
+
+ /* Sanity check */
+ _JC_ASSERT(!_JC_IN_HEAP(&vm->heap, obj));
+
+ /* Get class associated with this Class object */
+ type = _jc_get_vm_pointer(obj,
+ vm->boot.fields.Class.vmdata);
+
+ /* Handle class' class loader */
+ if (!(loader = type->loader)->gc_mark)
+ goto do_class_loader;
+ continue;
+ }
+
+ /* Non-Class special object: first, mark it normally */
+ if (_jc_gc_mark_object(trace, obj))
+ continue;
+
+ /*
+ * Special case: soft/weak/phantom references.
+ * We have to handle the 'referent' field specially.
+ */
+ if (!_jc_subclass_of(obj, vm->boot.types.Reference))
+ goto not_reference_object;
+
+ /* Sanity check */
+ _JC_ASSERT(_jc_subclass_of(obj, vm->boot.types.WeakReference)
+ || _jc_subclass_of(obj, vm->boot.types.SoftReference)
+ || _jc_subclass_of(obj, vm->boot.types.PhantomReference));
+
+ /* If "referent" is null, treat object normally */
+ if ((referent = *_JC_VMFIELD(vm, obj,
+ Reference, referent, _jc_object *)) == NULL)
+ goto do_normal_object;
+
+ /* If we're following soft references, treat them normally */
+ if (trace->follow_soft
+ && _jc_subclass_of(obj, vm->boot.types.SoftReference))
+ goto do_normal_object;
+
+ /* Follow all object references except "referent" */
+ if (referent_index > 1) {
+ if (_jc_gc_push_refs(env, &uni, &frame,
+ (_jc_object **)obj - (referent_index - 1),
+ referent_index - 1) != JNI_OK)
+ goto fail;
+ }
+ if ((nrefs = _jc_num_refs(obj)) > referent_index) {
+ if (_jc_gc_push_refs(env, &uni, &frame,
+ (_jc_object **)obj - nrefs,
+ nrefs - referent_index) != JNI_OK)
+ goto fail;
+ }
+
+ /* Now handle object's class loader */
+ if (!(loader = obj->type->loader)->gc_mark)
+ goto do_class_loader;
+ continue;
+
+not_reference_object:
+ /*
+ * Not a Class or Reference object. Push the object's
+ * explicit references (reference fields) onto the stack.
+ * Then all we have left are its implicit references.
+ */
+ if ((obj->lockword & _JC_LW_REF_COUNT_MASK) != 0) {
+ nrefs = _jc_num_refs(obj);
+ if (_jc_gc_push_refs(env, &uni, &frame,
+ (_jc_object **)obj - nrefs, nrefs) != JNI_OK)
+ goto fail;
+ }
+
+ /*
+ * Special case: exceptions. They can contain the stack trace
+ * as an array of _jc_saved_frame structures in VMThrowable.
+ * We must mark the classes of methods in the stack trace.
+ */
+ if (obj->type == vm->boot.types.VMThrowable) {
+ _jc_saved_frame *frames;
+ _jc_byte_array *bytes;
+ _jc_object **refs;
+ int num_frames;
+ int num_refs;
+ int i;
+
+ /* Get saved stack frames from 'vmdata' byte[] array */
+ bytes = *_JC_VMFIELD(vm, obj,
+ VMThrowable, vmdata, _jc_byte_array *);
+ if (bytes == NULL)
+ goto do_exception_loader;
+ frames = (_jc_saved_frame *)_JC_ROUNDUP2(
+ (_jc_word)bytes->elems, _JC_FULL_ALIGNMENT);
+ num_frames = (bytes->length -
+ ((_jc_word)frames - (_jc_word)bytes->elems))
+ / sizeof(*frames);
+ if (num_frames == 0)
+ goto do_exception_loader;
+
+ /* Count unmarked class loaders referred to by stack */
+ for (num_refs = i = 0; i < num_frames; i++) {
+ _jc_type *const class = frames[i].method->class;
+
+ if (!class->loader->gc_mark
+ && class->loader != vm->boot.loader)
+ num_refs++;
+ }
+ if (num_refs == 0)
+ goto do_exception_loader;
+
+ /* Allocate an array of references for stack classes */
+ if ((refs = _jc_uni_alloc(env, &uni,
+ num_refs * sizeof(*refs))) == NULL)
+ goto fail;
+
+ /* Fill in array using stack trace classes */
+ for (num_refs = i = 0; i < num_frames; i++) {
+ _jc_type *const class = frames[i].method->class;
+
+ if (!class->loader->gc_mark
+ && class->loader != vm->boot.loader)
+ refs[num_refs++] = class->instance;
+ }
+
+ /* Push stack trace references onto the scan stack */
+ if (_jc_gc_push_refs(env, &uni,
+ &frame, refs, num_refs) != JNI_OK)
+ goto fail;
+
+do_exception_loader:
+ /* Do exception object's class loader */
+ if (!(loader = obj->type->loader)->gc_mark)
+ goto do_class_loader;
+ continue;
+ }
+
+ /*
+ * Special case: ClassLoaders. We have to mark both the
+ * implicitly associated class loader (if any) and also
+ * the class loader associated with the ClassLoader object;
+ * they may be different and neither be the boot loader.
+ */
+ if (_jc_subclass_of(obj, vm->boot.types.ClassLoader)) {
+ _jc_class_loader *cl_loader;
+
+ /* Get ClassLoader loader and normal object loader */
+ cl_loader = _jc_get_vm_pointer(obj,
+ vm->boot.fields.ClassLoader.vmdata);
+ loader = obj->type->loader;
+
+ /* Do both class loaders */
+ if (loader->gc_mark) {
+ if (cl_loader == NULL || cl_loader->gc_mark)
+ continue;
+ loader = cl_loader;
+ } else if (cl_loader != NULL && !cl_loader->gc_mark)
+ other_loader = cl_loader;
+ goto do_class_loader;
+ }
+
+ /* Special object that we didn't recognize - impossible! */
+ _JC_ASSERT(JNI_FALSE);
+
+do_class_loader:
+ {
+ _jc_type **loader_types;
+ int num_loader_types;
+ _jc_object **erefs;
+ int num_erefs;
+
+ /*
+ * Mark a class loader "object", then then push all references
+ * (both explicit and implicit) from all of its Class instances
+ * onto the scan stack.
+ */
+ _JC_ASSERT(!loader->gc_mark);
+ loader->gc_mark = JNI_TRUE;
+
+ /*
+ * Compute size of the list of this loader's defined types.
+ * The boot loader is a special case: the primitive types
+ * are not in the derived types tree, so we have to account
+ * for them manually.
+ */
+ num_loader_types = loader->defined_types.size;
+ if (loader == vm->boot.loader)
+ num_loader_types += _JC_TYPE_VOID - _JC_TYPE_BOOLEAN + 1;
+
+ /* Populate the list of this loader's defined types */
+ if ((loader_types = _jc_vm_alloc(env,
+ num_loader_types * sizeof(*loader_types))) == NULL)
+ goto fail;
+ _jc_splay_list(&loader->defined_types, (void **)loader_types);
+ if (loader == vm->boot.loader) {
+ _jc_type **typep;
+ int i;
+
+ typep = loader_types + loader->defined_types.size;
+ for (i = _JC_TYPE_BOOLEAN; i <= _JC_TYPE_VOID; i++)
+ *typep++ = vm->boot.types.prim[i];
+ _JC_ASSERT(typep - loader_types == num_loader_types);
+ }
+
+ /* Generate list of explicit references from all loaded types */
+ num_erefs = _jc_get_explicit_refs(env,
+ loader_types, num_loader_types, NULL);
+ if ((erefs = _jc_uni_alloc(env, &uni,
+ num_erefs * sizeof(*erefs))) == NULL) {
+ _jc_vm_free(&loader_types);
+ goto fail;
+ }
+ _jc_get_explicit_refs(env,
+ loader_types, num_loader_types, erefs);
+
+ /* Push loader's explicit references onto the stack */
+ if (num_erefs > 0) {
+ if (_jc_gc_push_refs(env, &uni,
+ &frame, erefs, num_erefs) != JNI_OK) {
+ _jc_vm_free(&loader_types);
+ goto fail;
+ }
+ }
+
+ /* Push loader's implicit references onto the stack */
+ if (loader->num_implicit_refs > 0) {
+ if (_jc_gc_push_refs(env, &uni, &frame,
+ loader->implicit_refs, loader->num_implicit_refs)
+ != JNI_OK) {
+ _jc_vm_free(&loader_types);
+ goto fail;
+ }
+ }
+
+ /* Free loader types list */
+ _jc_vm_free(&loader_types);
+
+ /* If there was another loader to handle, do it now */
+ if (other_loader != NULL) {
+ loader = other_loader;
+ other_loader = NULL;
+ goto do_class_loader;
+ }
+ }
+ }
+
+ /* Done */
+ _jc_uni_alloc_free(&uni);
+ return JNI_OK;
+
+fail:
+ /* Failed */
+ _jc_uni_alloc_free(&uni);
+ return JNI_ERR;
+}
+
+/*
+ * Generate the list of all explicit references from Class objects
+ * associated with a class loader. Skip null references so the list
+ * is as small as possible.
+ *
+ * Returns the length of the list.
+ */
+static int
+_jc_get_explicit_refs(_jc_env *env, _jc_type **types,
+ int ntypes, _jc_object **list)
+{
+ _jc_jvm *const vm = env->vm;
+ int num_class_fields;
+ int len = 0;
+ int i;
+ int j;
+
+ /* Get number of reference fields in a java.lang.Class object */
+ num_class_fields = vm->boot.types.Class->u.nonarray.num_virtual_refs;
+
+ /* Add explicit references from each type */
+ for (i = 0; i < ntypes; i++) {
+ _jc_type *const type = types[i];
+
+ /* Add references from this Class object's instance fields */
+ for (j = -num_class_fields; j < 0; j++) {
+ _jc_object *const ref
+ = ((_jc_object **)type->instance)[j];
+
+ if (ref == NULL)
+ continue;
+ if (list != NULL)
+ list[len] = ref;
+ len++;
+ }
+
+ /* Arrays don't have static fields */
+ if (_JC_FLG_TEST(type, ARRAY))
+ continue;
+
+ /*
+ * Add references from this class' static fields. Note that
+ * we are relying here on the fact that the reference fields
+ * appear first in the 'class_fields' structure, and static
+ * reference fields appear first in a type's field list.
+ */
+ for (j = 0; j < type->u.nonarray.num_fields; j++) {
+ _jc_field *const field = type->u.nonarray.fields[j];
+ _jc_object *ref;
+
+ /* Have we done all the static reference fields? */
+ if (!_JC_ACC_TEST(field, STATIC)
+ || _jc_sig_types[(u_char)*field->signature]
+ != _JC_TYPE_REFERENCE)
+ break;
+
+ /* Scan reference */
+ ref = ((_jc_object **)type->u.nonarray.class_fields)[j];
+ if (ref == NULL)
+ continue;
+ if (list != NULL)
+ list[len] = ref;
+ len++;
+ }
+ }
+
+ /* Done */
+ return len;
+}
+
Added: incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/heap.c
URL: http://svn.apache.org/viewcvs/incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/heap.c?rev=294974&view=auto
==============================================================================
--- incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/heap.c (added)
+++ incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/heap.c Tue Oct 4 19:19:16 2005
@@ -0,0 +1,722 @@
+
+/*
+ * Copyright 2005 The Apache Software Foundation or its licensors,
+ * as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: heap.c,v 1.9 2005/03/05 23:59:08 archiecobbs Exp $
+ */
+
+#include "libjc.h"
+
+/* Number of GC cycles to perform before giving up */
+#define _JC_HEAP_MAX_GC_CYCLES 3
+
+/* Internal functions */
+static int _jc_heap_gen_block_sizes(_jc_jvm *vm);
+static int _jc_heap_roundup_block_size(_jc_jvm *vm, int size, int *nbp);
+
+/*
+ * Initialize a VM's heap.
+ *
+ * If unsuccessful an exception is stored.
+ */
+jint
+_jc_heap_init(_jc_env *env, _jc_jvm *vm)
+{
+ _jc_heap *const heap = &vm->heap;
+
+ /* Round up heap size to page size */
+ heap->size = _JC_ROUNDUP2(heap->size, _JC_PAGE_SIZE);
+
+ /* Allocate heap */
+ if ((heap->mem = _jc_vm_alloc(env, heap->size)) == NULL) {
+ _JC_EX_STORE(env, OutOfMemoryError,
+ "can't allocate %lu byte heap", (unsigned long)heap->size);
+ goto fail;
+ }
+
+ /* Find an array of (page aligned) pages in there */
+ heap->pages = (void *)_JC_ROUNDUP2((_jc_word)heap->mem, _JC_PAGE_SIZE);
+ heap->max_pages = (heap->size - ((char *)heap->pages
+ - (char *)heap->mem)) / _JC_PAGE_SIZE;
+ heap->num_pages = 0;
+
+ /* Generate the list of available block sizes */
+ heap->num_sizes = _jc_heap_gen_block_sizes(vm);
+ if ((heap->sizes = _jc_vm_alloc(env,
+ heap->num_sizes * sizeof(*heap->sizes))) == NULL)
+ goto fail;
+ _jc_heap_gen_block_sizes(vm);
+
+ /* Sanity check values */
+ if (heap->max_pages >= _JC_HEAP_MAX(NEXT)) {
+ _JC_EX_STORE(env, OutOfMemoryError,
+ "heap is too large: it can't have more than %d pages",
+ _JC_HEAP_MAX(NPAGES) - 1);
+ goto fail;
+ }
+ if (heap->num_sizes > _JC_HEAP_MAX(BSI)) {
+ _JC_EX_STORE(env, OutOfMemoryError,
+ "heap granularity is too small: heap can't have more"
+ " than %d different block sizes", _JC_HEAP_MAX(BSI) - 1);
+ goto fail;
+ }
+
+ /* Done */
+ return JNI_OK;
+
+fail:
+ /* Clean up after failure */
+ _jc_vm_free(&heap->mem);
+ _jc_vm_free(&heap->sizes);
+ memset(heap, 0, sizeof(*heap));
+ return JNI_ERR;
+}
+
+/*
+ * Free a VM's heap.
+ */
+void
+_jc_heap_destroy(_jc_jvm *vm)
+{
+ _jc_heap *const heap = &vm->heap;
+
+ _jc_vm_free(&heap->mem);
+ _jc_vm_free(&heap->sizes);
+ memset(heap, 0, sizeof(*heap));
+}
+
+/*
+ * Allocate one or more pages from the heap.
+ */
+void *
+_jc_heap_alloc_pages(_jc_env *env, int npages)
+{
+ _jc_jvm *const vm = env->vm;
+ _jc_heap *const heap = &vm->heap;
+ int reserved = _JC_HEAP_RESERVED_PAGES;
+ int num_cycles = 0;
+ _jc_word *ptr;
+ int i;
+ int j;
+
+ /* Sanity check */
+ _JC_ASSERT(npages < _JC_HEAP_MAX(NPAGES));
+
+try_again:
+ /* Start looking at where we left off last */
+ i = heap->next_page;
+
+ /* Leave some special reserved memory for posting OutOfMemoryError's */
+ if (env->out_of_memory)
+ reserved = 0;
+
+check_avail:
+ /* Check for out of memory */
+ if (i + npages > heap->num_pages - reserved) {
+
+ /* Initialize pages the first time through the heap */
+ if (heap->num_pages != heap->max_pages) {
+ int increase;
+ int i;
+
+ _JC_MUTEX_LOCK(env, vm->mutex);
+ increase = _JC_HEAP_INIT_PAGES(heap);
+ if (heap->num_pages + increase > heap->max_pages)
+ increase = heap->max_pages - heap->num_pages;
+ for (i = 0; i < increase; i++) {
+ *_JC_PAGE_ADDR(heap->pages,
+ heap->num_pages + i) = 0;
+ }
+ heap->num_pages += increase;
+ _JC_MUTEX_UNLOCK(env, vm->mutex);
+ goto check_avail;
+ }
+
+ /* Dip into reserve memory when really out of memory */
+ if (num_cycles == _JC_HEAP_MAX_GC_CYCLES) {
+ env->out_of_memory = 1;
+ _jc_post_exception(env, _JC_OutOfMemoryError);
+ return NULL;
+ }
+
+ /* Do a GC cycle */
+ if (_jc_gc(env, num_cycles > 0) != JNI_OK) {
+ _jc_post_exception_info(env);
+ return NULL;
+ }
+ num_cycles++;
+
+ /* After second try, yield so the finalizer thread can run */
+ if (num_cycles > 1)
+ sched_yield();
+
+ /* Try again */
+ goto try_again;
+ }
+
+ /* Point at first available page */
+ ptr = _JC_PAGE_ADDR(heap->pages, i);
+
+ /* Try to allocate 'npages' contiguous pages */
+ for (j = 0; j < npages; j++) {
+ volatile _jc_word *const page_info = _JC_PAGE_ADDR(ptr, j);
+ _jc_word word;
+
+ /* Try to quickly grab the next page */
+ if (_jc_compare_and_swap(page_info, _JC_HEAP_PAGE_FREE,
+ (_JC_HEAP_PAGE_ALLOC << _JC_HEAP_PTYPE_SHIFT)
+ | ((npages - j) << _JC_HEAP_NPAGES_SHIFT)))
+ continue;
+
+ /* Page was not free; skip over it and any subsequent pages */
+ word = *page_info;
+ switch (_JC_HEAP_EXTRACT(word, PTYPE)) {
+ case _JC_HEAP_PAGE_FREE:
+ break;
+ case _JC_HEAP_PAGE_SMALL:
+ i += j + 1;
+ break;
+ case _JC_HEAP_PAGE_LARGE:
+ case _JC_HEAP_PAGE_ALLOC:
+ i += j + _JC_HEAP_EXTRACT(word, NPAGES);
+ break;
+ }
+
+ /* Un-do our partial reservation */
+ while (j-- > 0)
+ *_JC_PAGE_ADDR(ptr, j) = _JC_HEAP_PAGE_FREE;
+
+ /* Try again */
+ goto check_avail;
+ }
+
+ /*
+ * Reset the next free page pointer. Note there is a race
+ * condition here which may lead to some free pages remaining
+ * unallocatable until the next garbage collection.
+ */
+ heap->next_page = i + j;
+
+ /* Done */
+ return ptr;
+}
+
+/*
+ * Allocate a small block of memory with size index 'bsi' from the heap.
+ *
+ * The caller is responsible for making the memory look like
+ * an object and maintaining an (indirect) reference to it.
+ */
+void *
+_jc_heap_alloc_small_block(_jc_env *env, int bsi)
+{
+ _jc_jvm *const vm = env->vm;
+ _jc_heap *const heap = &vm->heap;
+ _jc_heap_size *const bs = &heap->sizes[bsi];
+ volatile _jc_word *ptr;
+ jboolean gotit;
+ _jc_word *next;
+ int i;
+
+ /* Sanity check */
+ _JC_ASSERT(bsi >= 0 && bsi < heap->num_sizes);
+ _JC_ASSERT(bs->hint == NULL
+ || (((_jc_word)bs->hint & (_JC_PAGE_SIZE - 1)) % bs->size)
+ == _JC_HEAP_BLOCK_OFFSET);
+ _JC_ASSERT(_JC_HEAP_SAME_PAGE(bs->hint,
+ (char *)bs->hint + bs->size - 1));
+
+try_again:
+ /* Do we have a hint for the next free block to try? */
+ if ((ptr = bs->hint) == NULL)
+ goto need_hint;
+
+ /* Try to grab hinted at block */
+ gotit = _jc_compare_and_swap(ptr,
+ _JC_HEAP_BLOCK_FREE, _JC_HEAP_BLOCK_ALLOC);
+
+ /* Update hint to point to the next block in the page (if any) */
+ next = (_jc_word *)((char *)ptr + bs->size);
+ bs->hint = _JC_HEAP_SAME_PAGE(ptr, (char *)next + bs->size - 1) ?
+ next : NULL;
+
+ /* Return the block we got if we got it */
+ if (gotit)
+ return (void *)ptr;
+
+ /* Try again */
+ goto try_again;
+
+need_hint:
+ /* Does this blocksize have any pages on its "use first" list? */
+ if ((ptr = bs->pages) == NULL)
+ goto get_page;
+
+ /* Sanity check */
+ _JC_ASSERT(((_jc_word)ptr & (_JC_PAGE_SIZE - 1)) == 0);
+ _JC_ASSERT(_JC_HEAP_EXTRACT(*ptr, PTYPE) == _JC_HEAP_PAGE_SMALL);
+ _JC_ASSERT(_JC_HEAP_EXTRACT(*ptr, BSI) == bsi);
+ _JC_ASSERT(_JC_HEAP_EXTRACT(*ptr, NEXT) < heap->num_pages
+ || _JC_HEAP_EXTRACT(*ptr, NEXT) == _JC_HEAP_MAX(NEXT) - 1);
+
+ /* Get the page following the first page in the list, if any */
+ next = (i = _JC_HEAP_EXTRACT(*ptr, NEXT)) == _JC_HEAP_MAX(NEXT) - 1 ?
+ NULL : _JC_PAGE_ADDR(heap->pages, i);
+ _JC_ASSERT(next == NULL || _JC_IN_HEAP(heap, next));
+
+ /*
+ * Pop page off the page list; if we lose the race, another thread
+ * popped it off first. At worst, page will remain as is until
+ * the next GC cycle.
+ */
+ if (_jc_compare_and_swap((_jc_word *)&bs->pages,
+ (_jc_word)ptr, (_jc_word)next))
+ bs->hint = (_jc_word *)((char *)ptr + _JC_HEAP_BLOCK_OFFSET);
+ goto try_again;
+
+get_page:
+ /* Allocate a new small page for this blocksize */
+ if ((ptr = _jc_heap_alloc_pages(env, 1)) == NULL)
+ return NULL;
+ _JC_ASSERT(_JC_HEAP_EXTRACT(*ptr, PTYPE) == _JC_HEAP_PAGE_ALLOC);
+
+ /* Initialize page and allocate first block for ourselves */
+ ptr = (_jc_word *)((char *)ptr + _JC_HEAP_BLOCK_OFFSET);
+ *ptr = _JC_HEAP_BLOCK_ALLOC;
+
+ /* Mark all subsquent blocks as free */
+ for (i = bs->num_blocks - 1; i > 0; i--) {
+ ptr = (_jc_word *)((char *)ptr + bs->size);
+ *ptr = _JC_HEAP_BLOCK_FREE;
+ }
+
+ /* Now mark the page as small and available */
+ ptr = (_jc_word *)((_jc_word)ptr & ~(_JC_PAGE_SIZE - 1));
+ *ptr = (_JC_HEAP_PAGE_SMALL << _JC_HEAP_PTYPE_SHIFT)
+ | (bsi << _JC_HEAP_BSI_SHIFT)
+ | _JC_HEAP_NEXT_MASK; /* invalid value not used */
+
+ /* Point the hint at the second block in the page */
+ ptr = (_jc_word *)((char *)ptr + _JC_HEAP_BLOCK_OFFSET);
+ bs->hint = (_jc_word *)((char *)ptr + bs->size);
+
+ /* Return pointer to the first block */
+ return (void *)ptr;
+}
+
+/*
+ * Generate the list of block sizes.
+ *
+ * Return the number of sizes.
+ */
+static int
+_jc_heap_gen_block_sizes(_jc_jvm *vm)
+{
+ _jc_heap *const heap = &vm->heap;
+ int nblocks;
+ int size;
+ int i;
+
+ for (i = 0, size = sizeof(_jc_object); ; i++) {
+ _jc_heap_size *const bs = &heap->sizes[i];
+ int new_size;
+
+ /* Get rounded up size */
+ if ((size = _jc_heap_roundup_block_size(vm,
+ size, &nblocks)) == -1)
+ break;
+
+ /* Initialize this block size descriptor */
+ if (heap->sizes != NULL) {
+ memset(bs, 0, sizeof(*bs));
+ bs->size = size;
+ bs->num_blocks = nblocks;
+ }
+
+ /* Compute the next bigger size */
+ new_size = (size * (200 - heap->granularity)) / 100;
+ if (new_size - size < _JC_FULL_ALIGNMENT)
+ new_size = size + _JC_FULL_ALIGNMENT;
+ size = new_size;
+ }
+ return i;
+}
+
+/*
+ * Round up a block size to the largest possible valid size such
+ * that we still get the same number of blocks out of a single page.
+ */
+static int
+_jc_heap_roundup_block_size(_jc_jvm *vm, int size, int *nbp)
+{
+ const int psize = _JC_HOWMANY(_JC_PAGE_SIZE, _JC_FULL_ALIGNMENT);
+ const int hdrsize = _JC_HOWMANY(sizeof(_jc_word), _JC_FULL_ALIGNMENT);
+ int nblocks;
+
+ /* Do math in multples of _JC_FULL_ALIGNMENT */
+ size = _JC_HOWMANY(size, _JC_FULL_ALIGNMENT);
+
+ /* How many blocks can we get? */
+ nblocks = (psize - hdrsize) / size;
+
+ /* Check for overflow */
+ if (nblocks <= 1)
+ return -1;
+
+ /* Increase block size until 'nblocks' blocks will no longer fit */
+ while (hdrsize + (nblocks * (size + 1)) <= psize)
+ size++;
+
+ /* Done */
+ *nbp = nblocks;
+ return size * _JC_FULL_ALIGNMENT;
+}
+
+/*
+ * Map a size into the appropriate block size index.
+ *
+ * Returns the block size index, or -N if the size is big enough
+ * to require one or more whole pages where N is the number of pages.
+ */
+int
+_jc_heap_block_size(_jc_jvm *vm, size_t size)
+{
+ _jc_heap *const heap = &vm->heap;
+ int bsi;
+ int lim;
+
+ /* Sanity check */
+ _JC_ASSERT(size > 0);
+
+ /* Check whether size require large page(s) */
+ if (size > heap->sizes[heap->num_sizes - 1].size) {
+ return -_JC_HOWMANY(size + _JC_HEAP_BLOCK_OFFSET,
+ _JC_PAGE_SIZE);
+ }
+
+ /* Determine which block size to use with binary search */
+ for (bsi = 0, lim = heap->num_sizes; lim != 0; lim >>= 1) {
+ const int j = bsi + (lim >> 1);
+
+ if (size > heap->sizes[j].size) {
+ bsi = j + 1;
+ lim--;
+ } else if (j == 0 || size > heap->sizes[j - 1].size) {
+ bsi = j;
+ break;
+ }
+ }
+
+ /* Return block size index */
+ return bsi;
+}
+
+#ifndef NDEBUG
+
+/* Internal functions */
+static void _jc_heap_check_object(_jc_jvm *vm, _jc_object *obj, int bsi);
+static void _jc_heap_check_block(_jc_jvm *vm, _jc_word *block,
+ char *page, int bsi);
+static void _jc_heap_check_alloc(_jc_jvm *vm, _jc_object *obj);
+
+/*
+ * Sanity check the heap. This should be called with the world halted.
+ */
+void
+_jc_heap_check(_jc_jvm *vm)
+{
+ _jc_class_loader *loader;
+ _jc_heap *const heap = &vm->heap;
+ int i;
+
+ /* World must be halted */
+ _JC_ASSERT(vm->world_stopped);
+
+ /* Check each page */
+ for (i = 0; i < heap->num_pages; i++) {
+ char *const page = (char *)heap->pages + i * _JC_PAGE_SIZE;
+ _jc_word word = *((_jc_word *)page);
+
+ /* Check page depending on type */
+ switch (_JC_HEAP_EXTRACT(word, PTYPE)) {
+ case _JC_HEAP_PAGE_FREE:
+ break;
+ case _JC_HEAP_PAGE_SMALL:
+ {
+ const int bsi = _JC_HEAP_EXTRACT(word, BSI);
+ _jc_heap_size *const size = &heap->sizes[bsi];
+ char *const blocks = page + _JC_HEAP_BLOCK_OFFSET;
+ int num_free = 0;
+ int j;
+
+ /* Check heap block size */
+ _JC_ASSERT(bsi >= 0 && bsi < heap->num_sizes);
+
+ /* Check all allocated blocks in this page */
+ for (j = 0; j < size->num_blocks; j++) {
+ _jc_word *const block
+ = (_jc_word *)(blocks + j * size->size);
+
+ switch (*block) {
+ case _JC_HEAP_BLOCK_FREE:
+ num_free++;
+ break;
+ case _JC_HEAP_BLOCK_ALLOC:
+ _JC_ASSERT(JNI_FALSE);
+ default:
+ _jc_heap_check_block(vm,
+ block, page, bsi);
+ break;
+ }
+ }
+
+ /* Impossible for all blocks to be free */
+ _JC_ASSERT(num_free < size->num_blocks);
+ break;
+ }
+ case _JC_HEAP_PAGE_LARGE:
+ {
+ const int npages = _JC_HEAP_EXTRACT(word, NPAGES);
+ _jc_word *const block
+ = (_jc_word *)(page + _JC_HEAP_BLOCK_OFFSET);
+
+ _JC_ASSERT(npages > 0 && i + npages <= heap->num_pages);
+ _jc_heap_check_block(vm, block, page, -npages);
+ i += npages - 1;
+ break;
+ }
+ case _JC_HEAP_PAGE_ALLOC:
+ _JC_ASSERT(JNI_FALSE);
+ }
+ }
+
+ /* Check small page "use first" lists */
+ for (i = 0; i < heap->num_sizes; i++) {
+ _jc_heap_size *const size = &heap->sizes[i];
+ _jc_word *page;
+ int j;
+
+ for (j = 0, page = (_jc_word *)size->pages; page != NULL; j++) {
+ _jc_word word = *page;
+ int next;
+
+ /* Sanity check loops */
+ _JC_ASSERT(j <= heap->num_pages);
+
+ /* Sanity check page type */
+ _JC_ASSERT(_JC_HEAP_EXTRACT(word, PTYPE)
+ == _JC_HEAP_PAGE_SMALL);
+ _JC_ASSERT(_JC_HEAP_EXTRACT(word, BSI) == i);
+
+ /* Go to next page on list */
+ if ((next = _JC_HEAP_EXTRACT(word, NEXT))
+ == _JC_HEAP_MAX(NEXT) - 1)
+ page = NULL;
+ else
+ page = _JC_PAGE_ADDR(heap->pages, next);
+ }
+ }
+
+ /* Check objects pointed to by class loader memory */
+ LIST_FOREACH(loader, &vm->class_loaders, link) {
+ for (i = 0; i < loader->num_implicit_refs; i++) {
+ _jc_object *const obj = loader->implicit_refs[i];
+
+ _jc_heap_check_object(vm, obj, 1);
+ }
+ }
+}
+
+/*
+ * Sanity check object belongs where it is in the heap.
+ */
+static void
+_jc_heap_check_alloc(_jc_jvm *vm, _jc_object *obj)
+{
+ _jc_heap *const heap = &vm->heap;
+ _jc_type *const type = obj->type;
+ _jc_word *block_start;
+ _jc_word *obj_start;
+ int object_size;
+ int block_size;
+ _jc_word pginfo;
+ char *page;
+ int ptype;
+ int bsi;
+
+ /* Get object size */
+ if (_JC_LW_TEST(obj->lockword, ARRAY)) {
+ const int elem_type = _JC_LW_EXTRACT(obj->lockword, TYPE);
+ _jc_array *const array = (_jc_array *)obj;
+
+ object_size = _jc_array_head_sizes[elem_type]
+ + array->length * _jc_type_sizes[elem_type];
+ } else
+ object_size = type->u.nonarray.instance_size;
+
+ /* Find object start */
+ obj_start = ((_jc_word *)obj) - _jc_num_refs(obj);
+ bsi = _jc_heap_block_size(vm, object_size);
+
+ /* Get page info for page containing start of object */
+ page = (char *)((_jc_word)obj_start & ~(_JC_PAGE_SIZE - 1));
+ pginfo = *((_jc_word *)page);
+
+ /* Check page type */
+ ptype = _JC_HEAP_EXTRACT(pginfo, PTYPE);
+ if (bsi < 0) {
+ _JC_ASSERT(ptype == _JC_HEAP_PAGE_LARGE);
+ _JC_ASSERT(_JC_HEAP_EXTRACT(pginfo, NPAGES) == -bsi);
+ block_size = -bsi * _JC_PAGE_SIZE - _JC_HEAP_BLOCK_OFFSET;
+ } else {
+ _JC_ASSERT(ptype == _JC_HEAP_PAGE_SMALL);
+ _JC_ASSERT(_JC_HEAP_EXTRACT(pginfo, BSI) == bsi);
+ block_size = heap->sizes[bsi].size;
+ }
+ _JC_ASSERT(object_size <= block_size);
+
+ /* Determine if there is a skip word */
+ block_start = (block_size >= object_size + sizeof(_jc_word)
+ && _jc_num_refs(obj) >= _JC_SKIPWORD_MIN_REFS) ?
+ obj_start - 1 : obj_start;
+
+ /* Check alignment of object start */
+ if (bsi < 0) {
+ _JC_ASSERT(((_jc_word)block_start & (_JC_PAGE_SIZE - 1))
+ == _JC_HEAP_BLOCK_OFFSET);
+ } else {
+ _JC_ASSERT((((_jc_word)block_start & (_JC_PAGE_SIZE - 1))
+ % block_size) == _JC_HEAP_BLOCK_OFFSET);
+ }
+}
+
+/*
+ * Sanity check one allocated heap block.
+ */
+static void
+_jc_heap_check_block(_jc_jvm *vm, _jc_word *block, char *page, int bsi)
+{
+ _jc_heap *const heap = &vm->heap;
+ _jc_heap_size *size;
+ _jc_object *obj;
+ _jc_word word;
+ int block_size;
+ int obj_size;
+ int skip;
+
+ /* Sanity check block contains valid object */
+ word = *block;
+ _JC_ASSERT(word != _JC_HEAP_BLOCK_FREE && word != _JC_HEAP_BLOCK_ALLOC);
+
+ /* Check for possible skip word and find object header */
+ if (_JC_HEAP_EXTRACT(word, BTYPE) == _JC_HEAP_BLOCK_SKIP) {
+ skip = _JC_HEAP_EXTRACT(word, SKIP);
+ _JC_ASSERT(skip >= 1 + _JC_SKIPWORD_MIN_REFS);
+ obj = (_jc_object *)(block + skip);
+ } else {
+ skip = 0;
+ while (!_JC_LW_TEST(*block, ODD))
+ block++;
+ obj = (_jc_object *)block;
+ }
+
+ /* Get size of this block */
+ if (bsi < 0) {
+ size = NULL;
+ block_size = -bsi * _JC_PAGE_SIZE - _JC_HEAP_BLOCK_OFFSET;
+ } else {
+ size = &heap->sizes[bsi];
+ block_size = size->size;
+ }
+
+ /* Sanity check object belongs in this type of block */
+ if (!_JC_LW_TEST(obj->lockword, ARRAY)) {
+ obj_size = obj->type->u.nonarray.instance_size;
+ _JC_ASSERT(obj->type->u.nonarray.block_size_index == bsi);
+ } else {
+ const int elem_type = _JC_LW_EXTRACT(obj->lockword, TYPE);
+ _jc_array *const array = (_jc_array *)obj;
+
+ obj_size = _jc_array_head_sizes[elem_type]
+ + array->length * _jc_type_sizes[elem_type];
+ }
+ _JC_ASSERT(block_size >= (skip ? sizeof(_jc_word) : 0) + obj_size);
+ if (bsi < 0) {
+ _JC_ASSERT(obj_size + _JC_HEAP_BLOCK_OFFSET
+ >= (-bsi - 1) * _JC_PAGE_SIZE);
+ } else if (bsi > 0)
+ _JC_ASSERT(obj_size > heap->sizes[bsi - 1].size);
+
+ /* Sanity check object itself */
+ _jc_heap_check_object(vm, obj, 1);
+}
+
+/*
+ * Sanity check one object.
+ */
+static void
+_jc_heap_check_object(_jc_jvm *vm, _jc_object *obj, int recurse)
+{
+ _jc_heap *const heap = &vm->heap;
+ _jc_type *const type = obj->type;
+ int i;
+
+ /* Check allocation */
+ if (_JC_IN_HEAP(heap, obj)) {
+ _JC_ASSERT(type != vm->boot.types.Class);
+ _jc_heap_check_alloc(vm, obj);
+ } else {
+ _JC_ASSERT(!_JC_LW_TEST(type->initial_lockword, FINALIZE));
+ _JC_ASSERT(!_jc_subclass_of(obj, vm->boot.types.Reference));
+ }
+
+ /* Check lockword */
+ _JC_ASSERT(_JC_LW_TEST(obj->lockword, ODD));
+ _JC_ASSERT(_JC_LW_TEST(obj->lockword, ARRAY)
+ == _JC_FLG_TEST(type, ARRAY));
+ switch (_JC_LW_EXTRACT(obj->lockword, TYPE)) {
+ case _JC_TYPE_BOOLEAN:
+ case _JC_TYPE_BYTE:
+ case _JC_TYPE_CHAR:
+ case _JC_TYPE_SHORT:
+ case _JC_TYPE_INT:
+ case _JC_TYPE_LONG:
+ case _JC_TYPE_FLOAT:
+ case _JC_TYPE_DOUBLE:
+ _JC_ASSERT(_JC_LW_TEST(obj->lockword, ARRAY));
+ break;
+ case _JC_TYPE_REFERENCE:
+ break;
+ default:
+ _JC_ASSERT(JNI_FALSE);
+ }
+ if (_JC_LW_TEST(obj->lockword, ARRAY)) {
+ _JC_ASSERT((type->u.array.element_type->flags & _JC_TYPE_MASK)
+ == _JC_LW_EXTRACT(obj->lockword, TYPE));
+ }
+
+ /* Recurse (once) on reference fields */
+ if (!recurse)
+ return;
+ for (i = -_jc_num_refs(obj); i < 0; i++) {
+ _jc_object *const ref = ((_jc_object **)obj)[i];
+
+ if (ref != NULL)
+ _jc_heap_check_object(vm, ref, 0);
+ }
+}
+
+#endif /* !NDEBUG */
Added: incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/init.c
URL: http://svn.apache.org/viewcvs/incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/init.c?rev=294974&view=auto
==============================================================================
--- incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/init.c (added)
+++ incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/init.c Tue Oct 4 19:19:16 2005
@@ -0,0 +1,50 @@
+
+/*
+ * Copyright 2005 The Apache Software Foundation or its licensors,
+ * as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: init.c,v 1.1.1.1 2004/02/20 05:15:35 archiecobbs Exp $
+ */
+
+#include "libjc.h"
+
+/* Internal variables */
+static jint _jc_init_result = JNI_ERR;
+
+/* Internal functions */
+static void _jc_do_init(void);
+
+/*
+ * One-time global initialization.
+ */
+jint
+_jc_init(void)
+{
+ static pthread_once_t once = PTHREAD_ONCE_INIT;
+
+ pthread_once(&once, _jc_do_init);
+ return _jc_init_result;
+}
+
+static void
+_jc_do_init(void)
+{
+ if (_jc_thread_init() != JNI_OK)
+ return;
+ if (_jc_init_signals() != JNI_OK)
+ return;
+ _jc_init_result = JNI_OK;
+}
+
Added: incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/initialize.c
URL: http://svn.apache.org/viewcvs/incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/initialize.c?rev=294974&view=auto
==============================================================================
--- incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/initialize.c (added)
+++ incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/initialize.c Tue Oct 4 19:19:16 2005
@@ -0,0 +1,310 @@
+
+/*
+ * Copyright 2005 The Apache Software Foundation or its licensors,
+ * as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: initialize.c,v 1.7 2005/03/12 04:24:17 archiecobbs Exp $
+ */
+
+#include "libjc.h"
+
+/* Internal functions */
+static jint _jc_initialize_class(_jc_env *env, _jc_type *type);
+static jint _jc_initialize_fields(_jc_env *env, _jc_type *type);
+
+/*
+ * Initialize a type.
+ */
+jint
+_jc_initialize_type(_jc_env *env, _jc_type *type)
+{
+ _jc_jvm *const vm = env->vm;
+ jint status;
+
+ /* Already initialized? */
+ if (_JC_FLG_TEST(type, INITIALIZED)) {
+ _JC_ASSERT(_JC_FLG_TEST(type, VERIFIED));
+ _JC_ASSERT(_JC_FLG_TEST(type, PREPARED));
+ _JC_ASSERT(_JC_FLG_TEST(type, RESOLVED));
+ return JNI_OK;
+ }
+
+ /* Sanity check */
+ _JC_ASSERT(!_JC_FLG_TEST(type, ARRAY));
+
+ /* Defer initialization when doing initial bootstrapping */
+ if (vm->initialization != NULL && !vm->initialization->may_execute)
+ return JNI_OK;
+
+ /* Resolve the type first */
+ if (!_JC_FLG_TEST(type, RESOLVED)
+ && (status = _jc_resolve_type(env, type)) != JNI_OK)
+ return status;
+
+ /* Initialize class */
+ if ((status = _jc_initialize_class(env, type)) != JNI_OK)
+ return status;
+
+ /* Done */
+ return JNI_OK;
+}
+
+/*
+ * Initialize a type per JVM spec, 2nd edition, sec. 2.17.5.
+ *
+ * During bootstrap initialization, a type's Class instance may
+ * not exist yet, in which case we skip the locking stuff.
+ */
+static jint
+_jc_initialize_class(_jc_env *env, _jc_type *type)
+{
+ _jc_jvm *const vm = env->vm;
+ _jc_object *const obj = type->instance;
+ jboolean locked = JNI_FALSE;
+ _jc_method *method;
+ jint status;
+
+ /* Sanity check */
+ _JC_ASSERT(!_JC_FLG_TEST(type, ARRAY));
+
+ /* Step 1 */
+ if (obj != NULL) {
+ if ((status = _jc_lock_object(env, obj)) != JNI_OK)
+ return status;
+ locked = JNI_TRUE;
+ }
+
+ /* Step 2 */
+ while (type->u.nonarray.initializing_thread != NULL
+ && type->u.nonarray.initializing_thread != env) {
+ if ((status = _jc_invoke_virtual(env,
+ vm->boot.methods.Object.wait, obj)) != JNI_OK)
+ goto fail;
+ }
+
+ /* Step 3 */
+ if (type->u.nonarray.initializing_thread == env)
+ goto done;
+
+ /* Step 4 */
+ if (_JC_FLG_TEST(type, INITIALIZED))
+ goto done;
+
+ /* Step 5 */
+ if (_JC_FLG_TEST(type, INIT_ERROR)) {
+ if (locked) {
+ locked = JNI_FALSE;
+ if ((status = _jc_unlock_object(env, obj)) != JNI_OK)
+ goto fail;
+ }
+ _jc_post_exception_msg(env, _JC_NoClassDefFoundError,
+ "exception during `%s' class initialization", type->name);
+ status = JNI_ERR;
+ goto fail;
+ }
+
+ /* Step 6 */
+ type->u.nonarray.initializing_thread = env;
+ if (locked) {
+ locked = JNI_FALSE;
+ if ((status = _jc_unlock_object(env, obj)) != JNI_OK)
+ goto fail;
+ }
+
+ /* Step 7 */
+ if (type->superclass != NULL
+ && !_JC_FLG_TEST(type->superclass, INITIALIZED)
+ && (status = _jc_initialize_type(env, type->superclass)) != JNI_OK)
+ goto step11;
+
+ /* Verbosity */
+ if (type->loader == vm->boot.loader) {
+ VERBOSE(INIT, vm, "initializing `%s' (in bootstrap loader)",
+ type->name);
+ } else {
+ VERBOSE(INIT, vm, "initializing `%s' (in %s@%p)",
+ type->name, type->loader->instance->type->name,
+ type->loader->instance);
+ }
+
+ /* Initialize lockword, first without using superclass */
+ if (!_JC_ACC_TEST(type, INTERFACE)
+ && type != vm->boot.types.Object
+ && type != vm->boot.types.Class)
+ _jc_initialize_lockword(env, type, NULL);
+
+ /* Step 8 */
+ if ((status = _jc_initialize_fields(env, type)) != JNI_OK)
+ goto fail;
+ if ((method = _jc_get_declared_method(env, type,
+ "<clinit>", "()V", _JC_ACC_STATIC, _JC_ACC_STATIC)) != NULL
+ && (status = _jc_invoke_static(env, method)) != JNI_OK)
+ goto step10;
+
+ /* Step 9 */
+ if (obj != NULL) {
+ if ((status = _jc_lock_object(env, obj)) != JNI_OK)
+ return status;
+ locked = JNI_TRUE;
+ }
+ type->flags |= _JC_TYPE_INITIALIZED;
+ type->u.nonarray.initializing_thread = NULL;
+ goto done;
+
+step10:
+ /* Step 10 */
+ _JC_ASSERT(env->head.pending != NULL);
+ if (!_jc_subclass_of(env->head.pending, vm->boot.types.Error)) {
+ _jc_word param;
+ jobject eref;
+
+ if ((eref = _jc_new_local_native_ref(env,
+ _jc_retrieve_exception(env, NULL))) == NULL) {
+ status = JNI_ERR;
+ goto fail;
+ }
+ param = (_jc_word)*eref;
+ _jc_post_exception_params(env,
+ _JC_ExceptionInInitializerError, ¶m);
+ _jc_free_local_native_ref(&eref);
+ }
+
+step11:
+ /* Step 11 */
+ if (obj != NULL) {
+ if ((status = _jc_lock_object(env, obj)) != JNI_OK)
+ return status;
+ locked = JNI_TRUE;
+ }
+ type->flags |= _JC_TYPE_INIT_ERROR;
+ type->u.nonarray.initializing_thread = NULL;
+ if (obj != NULL
+ && (status = _jc_invoke_virtual(env,
+ vm->boot.methods.Object.notifyAll, obj)) != JNI_OK)
+ goto fail;
+ status = JNI_ERR;
+ goto fail;
+
+done:
+ /* Initialize lockword again, this time using superclass */
+ if (!_JC_ACC_TEST(type, INTERFACE)
+ && type != vm->boot.types.Object
+ && type != vm->boot.types.Class)
+ _jc_initialize_lockword(env, type, type->superclass);
+
+ /* Unlock and return */
+ if (locked) {
+ locked = JNI_FALSE;
+ if ((status = _jc_unlock_object(env, obj)) != JNI_OK)
+ return status;
+ }
+
+ /* Done */
+ return JNI_OK;
+
+fail:
+ /* Clean up after failure */
+ _JC_ASSERT(status != JNI_OK);
+ if (locked) {
+ jint status2;
+
+ if ((status2 = _jc_unlock_object(env, obj)) != JNI_OK)
+ status = status2;
+ }
+ return status;
+}
+
+/*
+ * Initializing any static fields that have a "ConstantValue"
+ * attribute providing an initial value.
+ */
+static jint
+_jc_initialize_fields(_jc_env *env, _jc_type *type)
+{
+ int i;
+
+ /* Sanity check */
+ _JC_ASSERT(!_JC_FLG_TEST(type, ARRAY));
+
+ /* Initialize static fields */
+ for (i = 0; i < type->u.nonarray.num_fields; i++) {
+ _jc_field *const field = type->u.nonarray.fields[i];
+ u_char ptype;
+ void *value;
+
+ /* Ignore non-static and uninitialized fields */
+ if (!_JC_ACC_TEST(field, STATIC)
+ || field->initial_value == NULL)
+ continue;
+
+ /* Get pointer to the field we want to initialize */
+ value = ((char *)type->u.nonarray.class_fields + field->offset);
+
+ /* Set the field's value */
+ switch ((ptype = _jc_sig_types[(u_char)*field->signature])) {
+ case _JC_TYPE_BOOLEAN:
+ case _JC_TYPE_BYTE:
+ case _JC_TYPE_CHAR:
+ case _JC_TYPE_SHORT:
+ case _JC_TYPE_INT:
+ case _JC_TYPE_LONG:
+ memcpy(value, field->initial_value,
+ _jc_type_sizes[ptype]);
+ break;
+ case _JC_TYPE_FLOAT:
+ {
+ const u_char *const b = field->initial_value;
+
+ if (_JC_ACC_TEST(type, INTERP)) {
+ memcpy(value, b, sizeof(jfloat));
+ break;
+ }
+ *((jfloat *)value) = _JC_FCONST(b[0], b[1], b[2], b[3]);
+ break;
+ }
+ case _JC_TYPE_DOUBLE:
+ {
+ const u_char *const b = field->initial_value;
+
+ if (_JC_ACC_TEST(type, INTERP)) {
+ memcpy(value, b, sizeof(jdouble));
+ break;
+ }
+ *((jdouble *)value) = _JC_DCONST(b[0],
+ b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+ break;
+ }
+ case _JC_TYPE_REFERENCE:
+ {
+ const char *const utf8 = field->initial_value;
+ _jc_object *string;
+
+ _JC_ASSERT(field->type == env->vm->boot.types.String);
+ if ((string = _jc_new_intern_string(env,
+ utf8, strlen(utf8))) == NULL)
+ return JNI_ERR;
+ *((_jc_object **)value) = string;
+ break;
+ }
+ default:
+ _JC_ASSERT(JNI_FALSE);
+ break;
+ }
+ }
+
+ /* Done */
+ return JNI_OK;
+}
+
Added: incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/inline.h
URL: http://svn.apache.org/viewcvs/incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/inline.h?rev=294974&view=auto
==============================================================================
--- incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/inline.h (added)
+++ incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/inline.h Tue Oct 4 19:19:16 2005
@@ -0,0 +1,233 @@
+
+/*
+ * Copyright 2005 The Apache Software Foundation or its licensors,
+ * as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: inline.h,v 1.6 2005/02/27 04:52:59 archiecobbs Exp $
+ */
+
+/*
+ * Find the object head given a pointer to the start of its memory block.
+ */
+static inline _jc_object *
+_jc_find_object_head(volatile const void *block)
+{
+ const _jc_word word = *(volatile _jc_word *)block;
+ _jc_word *ptr;
+
+ /* Check for a skip word or object header */
+ if (_JC_LW_TEST(word, ODD)) {
+ _JC_ASSERT(word != _JC_HEAP_BLOCK_FREE
+ && word != _JC_HEAP_BLOCK_ALLOC);
+ if (_JC_HEAP_EXTRACT(word, BTYPE) == _JC_HEAP_BLOCK_SKIP) {
+ _JC_ASSERT(_JC_HEAP_EXTRACT(word, SKIP)
+ >= 1 + _JC_SKIPWORD_MIN_REFS);
+ return (_jc_object *)((_jc_word *)block
+ + _JC_HEAP_EXTRACT(word, SKIP));
+ }
+ return (_jc_object *)block;
+ }
+
+ /* Scan over leading references until we get to the object header */
+ for (ptr = (_jc_word *)block + 1; !_JC_LW_TEST(*ptr, ODD); ptr++);
+ return (_jc_object *)ptr;
+}
+
+/*
+ * Initialize heap sweep (visiting every object).
+ */
+static inline void
+_jc_heap_sweep_init(_jc_heap *heap, _jc_heap_sweep *sweep)
+{
+ memset(sweep, 0, sizeof(*sweep));
+ sweep->heap = heap;
+ sweep->page = heap->pages;
+ sweep->end = (char *)heap->pages + heap->num_pages * _JC_PAGE_SIZE;
+}
+
+/*
+ * Advance to the next object in a heap sweep.
+ */
+static inline _jc_object *
+_jc_heap_sweep_next(_jc_heap_sweep *sweep, jboolean recycle_small)
+{
+ _jc_word word;
+
+ /* Handle small page vs. large page */
+ if (sweep->size != NULL) {
+
+next_block:
+ /* If we are out of blocks, go to the next page */
+ _JC_ASSERT(sweep->blocks_left > 0);
+ if (--sweep->blocks_left == 0) {
+ int next_index;
+
+ /* Check if we should recycle this page */
+ if (!recycle_small)
+ goto next_page;
+
+ /*
+ * If page is empty, mark it free. Otherwise,
+ * put it on the "use me first" list for the
+ * corresponding block size.
+ */
+ if (sweep->blocks_live == 0) {
+ *((_jc_word *)sweep->page) = _JC_HEAP_PAGE_FREE;
+ goto next_page;
+ }
+
+ /* Get index of next page in size's list */
+ next_index = (sweep->size->pages != NULL) ?
+ _JC_PAGE_INDEX(sweep->heap, sweep->size->pages) :
+ _JC_HEAP_MAX(NEXT) - 1;
+
+ /* Insert this page at the head of the list */
+ *((_jc_word *)sweep->page)
+ = (_JC_HEAP_PAGE_SMALL << _JC_HEAP_PTYPE_SHIFT)
+ | (sweep->bsi << _JC_HEAP_BSI_SHIFT)
+ | (next_index << _JC_HEAP_NEXT_SHIFT);
+ sweep->size->pages = (_jc_word *)sweep->page;
+
+next_page:
+ /* Advance to the next page */
+ sweep->page += _JC_PAGE_SIZE;
+ goto new_page;
+ }
+
+ /* Advance to the next block in the page */
+ sweep->block += sweep->block_size;
+
+new_block:
+ /* Skip unoccupied blocks */
+ word = *((volatile _jc_word *)sweep->block);
+ if (word == _JC_HEAP_BLOCK_FREE || word == _JC_HEAP_BLOCK_ALLOC)
+ goto next_block;
+
+ /* Done */
+ return _jc_find_object_head(sweep->block);
+ } else
+ sweep->page += sweep->npages * _JC_PAGE_SIZE;
+
+new_page:
+ /* Check for end of heap */
+ _JC_ASSERT((char *)sweep->page <= sweep->end);
+ if ((char *)sweep->page == sweep->end)
+ return NULL;
+
+ /* Get descriptor word for this page */
+ word = *((volatile _jc_word *)sweep->page);
+
+ /* Handle page based on type */
+ switch (_JC_HEAP_EXTRACT(word, PTYPE)) {
+ case _JC_HEAP_PAGE_FREE:
+ case _JC_HEAP_PAGE_ALLOC:
+ sweep->page += _JC_PAGE_SIZE;
+ goto new_page;
+ case _JC_HEAP_PAGE_SMALL:
+ sweep->bsi = _JC_HEAP_EXTRACT(word, BSI);
+ _JC_ASSERT(sweep->bsi >= 0
+ && sweep->bsi < sweep->heap->num_sizes);
+ sweep->size = &sweep->heap->sizes[sweep->bsi];
+ sweep->block_size = sweep->size->size;
+ sweep->blocks_left = sweep->size->num_blocks;
+ sweep->blocks_live = 0;
+ sweep->block = sweep->page + _JC_HEAP_BLOCK_OFFSET;
+ goto new_block;
+ case _JC_HEAP_PAGE_LARGE:
+ sweep->npages = _JC_HEAP_EXTRACT(word, NPAGES);
+ _JC_ASSERT(sweep->npages >= 1);
+ sweep->bsi = -1;
+ sweep->size = NULL;
+ sweep->block = sweep->page + _JC_HEAP_BLOCK_OFFSET;
+ return _jc_find_object_head(sweep->block);
+ default:
+ _JC_ASSERT(0);
+ return NULL;
+ }
+}
+
+/*
+ * Return the number of references contained in an object.
+ */
+static inline int
+_jc_num_refs(_jc_object *const obj)
+{
+ int nrefs;
+
+ if ((nrefs = _JC_LW_EXTRACT(obj->lockword, REF_COUNT))
+ == _JC_LW_MAX(REF_COUNT) - 1) {
+ _jc_type *const type = obj->type;
+
+ if (!_JC_FLG_TEST(type, ARRAY))
+ nrefs = type->u.nonarray.num_virtual_refs;
+ else {
+ _JC_ASSERT((type->u.array.element_type->flags
+ & _JC_TYPE_MASK) == _JC_TYPE_REFERENCE);
+ nrefs = ((_jc_object_array *)obj)->length;
+ }
+ }
+ return nrefs;
+}
+
+/*
+ * Extract a pointer from an object field of type long.
+ * If the field is null then NULL is returned.
+ */
+static inline void *
+_jc_get_vm_pointer(_jc_object *obj, _jc_field *field)
+{
+ /* Sanity check */
+ _JC_ASSERT(obj != NULL && field != NULL);
+ _JC_ASSERT(_jc_subclass_of(obj, field->class));
+ _JC_ASSERT(_jc_sig_types[(u_char)*field->signature] == _JC_TYPE_LONG);
+
+ /* Extract pointer from field */
+ return *((void **)((char *)obj + field->offset));
+}
+
+/*
+ * Store a pointer into an object field of type long.
+ */
+static inline void
+_jc_set_vm_pointer(_jc_object *obj, _jc_field *field, void *ptr)
+{
+ /* Sanity check */
+ _JC_ASSERT(obj != NULL && field != NULL);
+ _JC_ASSERT(_jc_subclass_of(obj, field->class));
+ _JC_ASSERT(_jc_sig_types[(u_char)*field->signature] == _JC_TYPE_LONG);
+
+ /* Copy the pointer into the field */
+ *((void **)((char *)obj + field->offset)) = ptr;
+}
+
+/*
+ * Store a pointer into an object field of type long atomically.
+ */
+static inline jboolean
+_jc_vm_pointer_cas(_jc_object *obj, _jc_field *field,
+ void *oldptr, void *newptr)
+{
+ volatile _jc_word *const word
+ = (_jc_word *)((char *)obj + field->offset);
+
+ /* Sanity check */
+ _JC_ASSERT(obj != NULL && field != NULL);
+ _JC_ASSERT(_jc_subclass_of(obj, field->class));
+ _JC_ASSERT(_jc_sig_types[(u_char)*field->signature] == _JC_TYPE_LONG);
+
+ /* Atomically update field */
+ return _jc_compare_and_swap(word, (_jc_word)oldptr, (_jc_word)newptr);
+}
+
Added: incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/instance.c
URL: http://svn.apache.org/viewcvs/incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/instance.c?rev=294974&view=auto
==============================================================================
--- incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/instance.c (added)
+++ incubator/harmony/enhanced/trunk/sandbox/contribs/jchevm/jchevm/libjc/instance.c Tue Oct 4 19:19:16 2005
@@ -0,0 +1,139 @@
+
+/*
+ * Copyright 2005 The Apache Software Foundation or its licensors,
+ * as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * $Id: instance.c,v 1.4 2005/03/12 04:24:17 archiecobbs Exp $
+ */
+
+#include "libjc.h"
+
+/*
+ * Determine if an instance of type 'from' can be assigned to
+ * a variable of type 'to'.
+ *
+ * Returns:
+ * 1 Yes
+ * 0 No
+ * -1 Exception posted
+ */
+int
+_jc_assignable_from(_jc_env *env, _jc_type *from, _jc_type *to)
+{
+ _jc_type *const *entry;
+
+ /* Resolve types */
+ if (!_JC_FLG_TEST(from, RESOLVED)
+ && _jc_resolve_type(env, from) != JNI_OK)
+ return -1;
+ if (!_JC_FLG_TEST(to, RESOLVED)
+ && _jc_resolve_type(env, to) != JNI_OK)
+ return -1;
+
+ /* Quick check for a common case (?) XXX */
+ if (from == to)
+ return 1;
+
+ /* Handle the case where 'from' type is an array type */
+ if (_JC_FLG_TEST(from, ARRAY)) {
+ if (_JC_FLG_TEST(to, ARRAY)
+ && from->u.array.dimensions > to->u.array.dimensions) {
+ to = to->u.array.base_type;
+ _JC_ASSERT(!_JC_FLG_TEST(to, ARRAY));
+ goto check_array;
+ }
+ if (!_JC_FLG_TEST(to, ARRAY)) {
+check_array: return to == env->vm->boot.types.Object
+ || to == env->vm->boot.types.Cloneable
+ || to == env->vm->boot.types.Serializable;
+ }
+ to = to->u.array.base_type;
+ from = from->u.array.base_type;
+ }
+
+ /* If 'to' is an array type, then we know already 'from' is not */
+ if (_JC_FLG_TEST(to, ARRAY))
+ return 0;
+
+ /* Check both base types are same primitive, or both reference */
+ if ((from->flags & _JC_TYPE_MASK) != (to->flags & _JC_TYPE_MASK))
+ return 0;
+
+ /* If both are primitive, they are the same type by the above test */
+ if ((from->flags & _JC_TYPE_MASK) != _JC_TYPE_REFERENCE)
+ return 1;
+
+ /* Resolve from type so hashtable is valid */
+ if (!_JC_FLG_TEST(from, RESOLVED)
+ && _jc_resolve_type(env, from) != JNI_OK)
+ return -1;
+
+ /* Search instanceof hash table */
+ if ((entry = from->u.nonarray.instanceof_hash_table[
+ (int)to->u.nonarray.hash & (_JC_INSTANCEOF_HASHSIZE - 1)]) == NULL)
+ return 0;
+ while (*entry != NULL) {
+ if (*entry++ == to)
+ return 1;
+ }
+
+ /* Not found - not an instance of */
+ return 0;
+}
+
+/*
+ * Determine if 'obj' is an instance of type 'type'.
+ *
+ * Returns:
+ * 1 Yes
+ * 0 No
+ * -1 Exception posted
+ */
+int
+_jc_instance_of(_jc_env *env, _jc_object *obj, _jc_type *type)
+{
+ /* 'null' is not an instance of any type */
+ if (obj == NULL)
+ return JNI_FALSE;
+
+ /* Return same as 'assignable from' */
+ return _jc_assignable_from(env, obj->type, type);
+}
+
+/*
+ * Determine if 'obj' is an instance of 'type' or any subclass thereof.
+ *
+ * This assumes that 'obj' is not NULL. If type is an interface
+ * or array type then false is always returned.
+ */
+jboolean
+_jc_subclass_of(_jc_object *obj, _jc_type *type)
+{
+ _jc_type *obj_type;
+
+ /* Sanity check */
+ _JC_ASSERT(obj != NULL);
+
+ /* Check superclasses */
+ for (obj_type = obj->type;
+ obj_type != NULL; obj_type = obj_type->superclass) {
+ if (obj_type == type)
+ return JNI_TRUE;
+ }
+
+ /* Not found */
+ return JNI_FALSE;
+}
+