You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@harmony.apache.org by wj...@apache.org on 2006/11/09 05:52:17 UTC
svn commit: r472771 [7/11] - in /incubator/harmony/enhanced/drlvm/trunk/vm:
gc_cc/src/ gcv4/src/ include/ include/open/ interpreter/src/
jitrino/src/jet/ jitrino/src/vm/drl/ port/src/lil/ia32/pim/
port/src/lil/pim/ vmcore/include/ vmcore/src/class_supp...
Modified: incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/class_support/Prepare.cpp
URL: http://svn.apache.org/viewvc/incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/class_support/Prepare.cpp?view=diff&rev=472771&r1=472770&r2=472771
==============================================================================
--- incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/class_support/Prepare.cpp (original)
+++ incubator/harmony/enhanced/drlvm/trunk/vm/vmcore/src/class_support/Prepare.cpp Wed Nov 8 20:52:12 2006
@@ -39,7 +39,7 @@
#include "interpreter.h"
#include "lil.h"
#include "lil_code_generator.h"
-#include "vm_log.h"
+#include "vm_stats.h"
#ifdef _DEBUG
#include "jni.h"
@@ -48,23 +48,13 @@
#include "dump.h"
-static void class_initialize_if_no_side_effects(Class *clss);
-
-static Boolean should_class_not_be_verified(Class* UNREF clss)
-{
- return FALSE;
-}
-
-
-
-
// For Java currently, fields are not packed: an int16 occupies a full 32 bit word.
// "do_field_compaction" is true, e.g., for packed ("sequential") layout.
// If the "clss" pointer is non-NULL, the type must be that of an instance field and any
// "padding" bytes for the field are added to the class's total number of field padding bytes.
// If "clss" is NULL, no padding information is gathered.
-static unsigned sizeof_field_type(Field *field, bool do_field_compaction, Class *clss)
+static unsigned sizeof_field_type(Field *field, bool do_field_compaction)
{
unsigned sz = 0;
unsigned pad_bytes = 0;
@@ -112,21 +102,16 @@
ABORT("Invalid type descriptor");
}
- if (clss != NULL) {
- clss->num_field_padding_bytes += pad_bytes;
- }
return sz;
-} //sizeof_field_type
+} // sizeof_field_type
-
-
-// Given a clss that is an primitive array return the size
-// of an element in an instance of the class.
-// Beware the the class is not fully formed at this time.
+// Given a class that is a primitive array returns the size
+// of an element in an instance of the class
+// NOTE: the class is not fully formed at this time
unsigned sizeof_primitive_array_element(Class *p_class)
{
- const String *elt_type = p_class->name;
+ const String *elt_type = p_class->get_name();
char elt = elt_type->bytes[1];
unsigned int sz;
switch (elt) {
@@ -169,18 +154,18 @@
//
// Is this array class a one dimensional array (vector) with a primitive component type.
//
-bool
+inline bool
is_vector_of_primitives(Class* p_class)
{
// I parse the following character of the class name
// to see if it is an array of arrays.
- if(p_class->name->bytes[1] == '[') // An array of array
+ if(p_class->get_name()->bytes[1] == '[') // An array of array
return false;
- if(p_class->name->bytes[1] == 'L') // An array of objects
+ if(p_class->get_name()->bytes[1] == 'L') // An array of objects
return false;
- if(p_class->is_array_of_primitives == 0) // base type is not primitive
+ if(!p_class->is_array_of_primitives()) // base type is not primitive
return false;
- if(p_class->is_array)
+ if(p_class->is_array())
return true;
ABORT("Should never be called unless p_class is an array");
return true;
@@ -188,12 +173,13 @@
-void assign_instance_field_offset(Class *clss, Field *field, bool do_field_compaction)
+void Class::assign_offset_to_instance_field(Field *field, bool do_field_compaction)
{
- if (!field->is_static() && !field->is_offset_computed()) {
- int sz = sizeof_field_type(field, do_field_compaction, clss);
- int offset = clss->unpadded_instance_data_size;
- // 20020927 We must continue to align fields on natural boundaries: e.g., Java ints on a 4 byte boundary.
+ if(!field->is_static() && !field->is_offset_computed()) {
+ int sz = sizeof_field_type(field, do_field_compaction);
+ int offset = m_unpadded_instance_data_size;
+ // We must continue to align fields on natural boundaries:
+ // e.g., Java ints on a 4 byte boundary.
// This is required for IPF and can improve IA32 performance.
int inc = sz;
int delta = offset % sz;
@@ -201,46 +187,46 @@
int pad_bytes = (sz - delta);
offset += pad_bytes;
inc += pad_bytes;
- clss->num_field_padding_bytes += pad_bytes;
+ m_num_field_padding_bytes += pad_bytes;
}
- field->_offset = offset;
- clss->unpadded_instance_data_size += inc;
+ field->set_offset(offset);
+ m_unpadded_instance_data_size += inc;
char c_type = *(field->get_descriptor()->bytes);
if ((c_type == '[') || (c_type == 'L')) {
- clss->n_instance_refs += 1;
+ m_num_instance_refs += 1;
}
-
- field->set_offset_computed(true);
}
-} //assign_instance_field_offset
+} // Class::assign_offset_to_instance_field
// "field_ptrs" is an array of pointers to the class's fields.
-void assign_offsets_to_instance_fields(Class *clss, Field **field_ptrs, bool do_field_compaction)
+void Class::assign_offsets_to_instance_fields(Field** field_ptrs, bool do_field_compaction)
{
int i, sz;
- if (clss->n_fields == 0) {
- return;
- }
+ if(m_num_fields == 0) return;
- // Try to align the first field on a 4 byte boundary. It might not be if -compact_fields was specified on the command line.
- // See whether there are any short instance fields towards the end of the field array (since that is where -sort_fields puts them)
- // and try to fill in some bytes before the "first" field.
- if (Class::sort_fields && Class::compact_fields) {
- if ((clss->unpadded_instance_data_size % 4) != 0) {
- int delta = (clss->unpadded_instance_data_size % 4);
+ // Try to align the first field on a 4 byte boundary. It might not be if
+ // -compact_fields was specified on the command line. See whether there are
+ // any short instance fields towards the end of the field array (since that
+ // is where -sort_fields puts them) and try to fill in some bytes before
+ // the "first" field.
+ if(VM_Global_State::loader_env->sort_fields
+ && VM_Global_State::loader_env->compact_fields)
+ {
+ if((m_unpadded_instance_data_size % 4) != 0) {
+ int delta = (m_unpadded_instance_data_size % 4);
int pad_bytes = (4 - delta); // the number of bytes remaining to fill in
- int last_field = (clss->n_fields - 1);
- while (pad_bytes > 0) {
+ int last_field = (m_num_fields - 1);
+ while(pad_bytes > 0) {
// Find a field to allocate
int field_to_allocate = -1;
- for (i = last_field; i >= clss->n_static_fields; i--) {
+ for(i = last_field; i >= get_number_of_static_fields(); i--) {
Field* field = field_ptrs[i];
- if (!field->is_static() && !field->is_offset_computed()) {
- sz = sizeof_field_type(field, do_field_compaction, clss);
- if (sz > pad_bytes) {
- break; // field is too big
+ if(!field->is_static() && !field->is_offset_computed()) {
+ sz = sizeof_field_type(field, do_field_compaction);
+ if(sz > pad_bytes) {
+ break; // field is too big
}
field_to_allocate = i;
break;
@@ -248,15 +234,16 @@
}
// Now allocate that field, if one was found
if (field_to_allocate == -1) {
- // No field could be found to fill in. "pad_bytes" is the number of padding bytes to insert.
- clss->unpadded_instance_data_size += pad_bytes;
- clss->num_field_padding_bytes += pad_bytes;
+ // No field could be found to fill in.
+ // "pad_bytes" is the number of padding bytes to insert.
+ m_unpadded_instance_data_size += pad_bytes;
+ m_num_field_padding_bytes += pad_bytes;
break;
} else {
last_field = (i - 1);
Field* victim_field = field_ptrs[field_to_allocate];
- assign_instance_field_offset(clss, victim_field, do_field_compaction);
- delta = (clss->unpadded_instance_data_size % 4);
+ assign_offset_to_instance_field(victim_field, do_field_compaction);
+ delta = (m_unpadded_instance_data_size % 4);
pad_bytes = ((delta > 0)? (4 - delta) : 0);
}
}
@@ -264,23 +251,23 @@
}
// Place the remaining instance fields.
- for (i = clss->n_static_fields; i < clss->n_fields; i++) {
- assign_instance_field_offset(clss, field_ptrs[i], do_field_compaction);
+ for(i = get_number_of_static_fields(); i < m_num_fields; i++) {
+ assign_offset_to_instance_field(field_ptrs[i], do_field_compaction);
}
-} //assign_offsets_to_instance_fields
+} // Class::assign_offsets_to_instance_fields
// "field_ptrs" is an array of pointers to the class's fields.
-void assign_offsets_to_static_fields(Class *clss, Field **field_ptrs, bool do_field_compaction)
+void Class::assign_offsets_to_static_fields(Field** field_ptrs, bool do_field_compaction)
{
- for (int i=0; i<clss->n_static_fields; i++) {
+ for(int i = 0; i < get_number_of_static_fields(); i++) {
Field* field = field_ptrs[i];
assert(field->is_static());
// static (i.e., class) data field
// is this needed for interface static constants?
int field_size;
- field_size = sizeof_field_type(field, do_field_compaction, /*clss*/ NULL); // NULL clss since this is a static field
+ field_size = sizeof_field_type(field, do_field_compaction);
// Align the static field if necessary.
#ifdef POINTER64
@@ -288,17 +275,16 @@
#else // not POINTER64
if (field->get_descriptor()->bytes[0] == 'D') {
#endif // not POINTER64
- if ((clss->static_data_size%8)!=0) {
- clss->static_data_size += 4;
- assert((clss->static_data_size%8)==0);
+ if((m_static_data_size%8)!=0) {
+ m_static_data_size += 4;
+ assert((m_static_data_size%8)==0);
}
}
- field->_offset = clss->static_data_size;
- field->set_offset_computed(true);
- clss->static_data_size += field_size;
+ field->set_offset(m_static_data_size);
+ m_static_data_size += field_size;
}
-} //assign_offsets_to_static_fields
+} // Class::assign_offsets_to_static_fields
// Return the field's size before any padding: e.g., 1 for a Byte, 2 for a Char.
@@ -308,7 +294,7 @@
}
int sz;
- sz = sizeof_field_type(field, /*do_field_compaction*/ true, /*clss*/ NULL); // NULL clss since no padding is wanted
+ sz = sizeof_field_type(field, /*do_field_compaction*/ true);
return sz;
} //field_size
@@ -359,45 +345,48 @@
}
} //qsort
-void assign_offsets_to_class_fields(Class *clss)
+
+void Class::assign_offsets_to_fields()
{
- assert(clss->state != ST_InstanceSizeComputed);
- bool do_field_compaction = Class::compact_fields;
- bool do_field_sorting = Class::sort_fields;
+ assert(m_state != ST_InstanceSizeComputed);
+ bool do_field_compaction = VM_Global_State::loader_env->compact_fields;
+ bool do_field_sorting = VM_Global_State::loader_env->sort_fields;
// Create a temporary array of pointers to the class's fields. We do this to support sorting the fields
// by size if the command line option "-sort_fields" is given, and because elements of the clss->fields array
// cannot be rearranged without copying their entire Field structure.
- Field **field_ptrs = new Field*[clss->n_fields];
- for (int i=0; i<clss->n_fields; i++) {
- field_ptrs[i] = &(clss->fields[i]);
+ Field** field_ptrs = new Field*[m_num_fields];
+ for(int i = 0; i < m_num_fields; i++) {
+ field_ptrs[i] = &m_fields[i];
}
- if (clss->state != ST_InstanceSizeComputed) {
+ assert(m_state < ST_InstanceSizeComputed);
+ if(m_state != ST_InstanceSizeComputed) {
// Sort the instance fields by size before allocating their offsets. But not if doing sequential layout!
// Note: we must sort the instance fields separately from the static fields since for some classes the offsets
// of statics can only be determined after the offsets of instance fields are found.
- if (do_field_sorting && (clss->n_fields > 0)) {
- qsort(field_ptrs, clss->n_static_fields, (clss->n_fields - 1),
- clss, /*doing_instance_flds:*/ true);
+ if(do_field_sorting && (m_num_fields > 0)) {
+ qsort(field_ptrs, m_num_static_fields,
+ (m_num_fields - 1), this,
+ /*doing_instance_flds:*/ true);
}
// We have to assign offsets to a type's instance fields first because
// a static field of that type needs the instance size if it's a value type.
- assign_offsets_to_instance_fields(clss, field_ptrs, do_field_compaction);
+ assign_offsets_to_instance_fields(field_ptrs, do_field_compaction);
#ifdef DEBUG_FIELD_SORTING
if (do_field_sorting) {
- printf("\nInstance fields for %s, size=%d\n", clss->name->bytes, clss->unpadded_instance_data_size);
- if (clss->super_class != NULL) {
- printf(" super_class: %s\n", clss->super_class->name->bytes);
- }
- for (int i=0; i<clss->n_fields; i++) {
- Field *field = field_ptrs[i];
- if (!field->is_static()) {
- const String *typeDesc = field->get_descriptor();
- int sz = field_size(field, clss, /*doing_instance_flds:*/ true);
- printf(" %40s %c %4d %4d\n", field->get_name()->bytes, typeDesc->bytes[0], sz, field->_offset);
+ printf("\nInstance fields for %s, size=%d\n", m_name->bytes, m_unpadded_instance_data_size);
+ if(m_super_class != NULL) {
+ printf(" super_class: %s\n", m_super_class->get_name()->bytes);
+ }
+ for(int i = 0; i < m_num_fields; i++) {
+ Field* field = field_ptrs[i];
+ if(!field->is_static()) {
+ const String* typeDesc = field->get_descriptor();
+ int sz = field_size(field, this, /*doing_instance_flds:*/ true);
+ printf(" %40s %c %4d %4d\n", field->get_name()->bytes, typeDesc->bytes[0], sz, field->_offset);
fflush(stdout);
}
}
@@ -405,24 +394,24 @@
#endif // DEBUG_FIELD_SORTING
// Set class to ST_InstanceSizeComputed state.
- clss->state = ST_InstanceSizeComputed;
+ m_state = ST_InstanceSizeComputed;
}
// Sort the static fields by size before allocating their offsets.
- if (do_field_sorting && (clss->n_static_fields > 0)) {
- qsort(field_ptrs, 0, (clss->n_static_fields - 1),
- clss, /*doing_instance_flds:*/ false);
+ if(do_field_sorting && (m_num_static_fields > 0)) {
+ qsort(field_ptrs, 0, m_num_static_fields - 1,
+ this, /*doing_instance_flds:*/ false);
}
- assign_offsets_to_static_fields(clss, field_ptrs, do_field_compaction);
+ assign_offsets_to_static_fields(field_ptrs, do_field_compaction);
#ifdef DEBUG_FIELD_SORTING
if (do_field_sorting) {
- printf("Static fields for %s, size=%d\n", clss->name->bytes, clss->static_data_size);
- for (int i=0; i<clss->n_fields; i++) {
- Field *field = field_ptrs[i];
- if (field->is_static()) {
+ printf("Static fields for %s, size=%d\n", m_name->bytes, static_data_size);
+ for(int i = 0; i < m_num_fields; i++) {
+ Field* field = field_ptrs[i];
+ if(field->is_static()) {
const String *typeDesc = field->get_descriptor();
- int sz = field_size(field, clss, /*doing_instance_flds:*/ false);
+ int sz = field_size(field, this, /*doing_instance_flds:*/ false);
printf(" %40s %c %4d %4d\n", field->get_name()->bytes, typeDesc->bytes[0], sz, field->_offset);
fflush(stdout);
}
@@ -430,24 +419,25 @@
}
#endif // DEBUG_FIELD_SORTING
delete[] field_ptrs;
-} //assign_offsets_to_class_fields
+} // Class::assign_offsets_to_fields
// Required for reflection. See class_prepare STEP20 for further explanation.
bool assign_values_to_class_static_final_fields(Class *clss)
{
ASSERT_RAISE_AREA;
- bool do_field_compaction = Class::compact_fields;
+ assert(!hythread_is_suspend_enabled());
+ bool do_field_compaction = VM_Global_State::loader_env->compact_fields;
- for (int i=0; i<clss->n_fields; i++) {
- Field& field = clss->fields[i];
- if (field.is_static()) {
- Java_Type field_type = field.get_java_type();
- void *field_addr = field.get_address();
+ for(int i = 0; i < clss->get_number_of_fields(); i++) {
+ Field* field = clss->get_field(i);
+ if(field->is_static()) {
+ Java_Type field_type = field->get_java_type();
+ void* field_addr = field->get_address();
// If static field is constant it should be initialized by its constant value,...
- if (field.get_const_value_index()) {
- Const_Java_Value cvalue = field.get_const_value();
+ if(field->get_const_value_index()) {
+ Const_Java_Value cvalue = field->get_const_value();
switch(field_type) {
case '[':
case 'L':
@@ -464,8 +454,7 @@
static const String* jlstring_desc_string =
VM_Global_State::loader_env->
string_pool.lookup("Ljava/lang/String;");
- if (field.get_descriptor() == jlstring_desc_string) {
- tmn_suspend_disable();
+ if (field->get_descriptor() == jlstring_desc_string) {
// ------------------------------------------------------------vv
Java_java_lang_String *str = vm_instantiate_cp_string_resolved(cvalue.string);
@@ -473,19 +462,18 @@
assert(exn_raised());
tmn_suspend_enable();
TRACE2("classloader.prepare", "failed instantiate final field : "
- << clss->name->bytes << "." << field.get_name()->bytes);
+ << clss->get_name()->bytes << "." << field->get_name()->bytes);
return false;
}
STORE_GLOBAL_REFERENCE((COMPRESSED_REFERENCE *)field_addr, str);
// ------------------------------------------------------------^^
- tmn_suspend_enable();
} else {
ABORT("Unexpected type descriptor");
}
break;
}
default:
- int field_size = sizeof_field_type(&field, do_field_compaction, /*clss*/ NULL); // NULL clss since field is static
+ int field_size = sizeof_field_type(field, do_field_compaction);
memmove(field_addr, (void*)&cvalue, field_size);
}
@@ -504,123 +492,73 @@
} //assign_values_to_class_static_final_fields
-void build_gc_class_ref_map(Global_Env* env, Class *clss)
-{
- // Add 1 to the size of the information since it includes a zero delimiter.
- // Please note where this get deleted when we unload a class!!!!?
- // It should be done by a call to the gc side of the interface.
- unsigned *local_gc_information = new unsigned[clss->n_instance_refs + 1];
- unsigned int current_index = 0;
-
- // Copy the superclasses gc_information into this refs_offset_map incrementing current_index as needed.
- if (clss->super_class) { // We might be in initialization.
- assert (clss->n_instance_refs >= clss->super_class->n_instance_refs);
-
- // Ask the GC to fill the local_gc_information with the super class offsets.
- current_index = clss->super_class->n_instance_refs;
- } else {
- assert (current_index == 0);
-
- //extern bool bootstrapped;
- //// gloss over bootstrap inconsistency
- //if (bootstrapped == true) {
- if(env->InBootstrap()) {
- assert(clss->n_instance_refs == 0);
- }
- }
- assert (current_index <= clss->n_instance_refs);
-
- for (int i = 0; i<clss->n_fields; i++) {
- Field& field = clss->fields[i];
- if (field.is_static()) {
- // static (i.e., class) data field
- // Since refs_offset only deals with instance fields
- // this can be skipped. We don't change current offset for statics.
- } else {
- // instance data field
- //
- char c_type = *(field.get_descriptor()->bytes);
- if ((c_type == '[') || (c_type == 'L')) {
- assert (field.get_offset() != 0); // Only the vtable can have this offset.
- local_gc_information[current_index] = field.get_offset();
- current_index = current_index + 1;
- }
- }
- }
- assert (current_index == (clss->n_instance_refs));
- local_gc_information[current_index] = 0;
- // delimit with 0 since and offsetwill never be zero, that is where the vtable is we are OK.
- delete[] local_gc_information;
- // gc_information is not created, and populated and zero deliminted.
- // Pass this to the GC since it responsible for the format of the
- // information saved in clss->gc_information.
-} //build_gc_class_ref_map
-
//
// create_intfc_table
//
Intfc_Table *create_intfc_table(Class* clss, unsigned n_entries)
{
unsigned size = INTFC_TABLE_OVERHEAD + (n_entries * sizeof(Intfc_Table_Entry));
- Intfc_Table *table = (Intfc_Table*) clss->class_loader->Alloc(size);
+ Intfc_Table *table = (Intfc_Table*) clss->get_class_loader()->Alloc(size);
memset(table,0,size);
return table;
}
-void build_class_interface_table_descriptors(Class *clss)
+void Class::build_interface_table_descriptors()
{
- // Compute the number of methods that are in the interface part of the vtable. Also, create the array of
- // interfaces (clss->intfc_table_descriptors[]) this class directly or indirectly implements. Note that
- // _n_intfc_table_entries was initialized earlier. This is an upperbound because we eliminate duplicate
- // entries in the table below.
+ // Compute the number of methods that are in the interface part of
+ // the vtable. Also, create the array of interfaces (intfc_table_descriptors[])
+ // this class directly or indirectly implements. Note that
+ // m_num_intfc_table_entries was initialized earlier. This is an upperbound
+ // because we eliminate duplicate entries in the table below.
unsigned i, k;
- for (i = 0; i < clss->n_superinterfaces; i++) {
- Class *intfc = clss->superinterfaces[i].clss;
- clss->n_intfc_table_entries += intfc->n_intfc_table_entries;
+ for (i = 0; i < m_num_superinterfaces; i++) {
+ Class* intfc = get_superinterface(i);
+ m_num_intfc_table_entries += intfc->m_num_intfc_table_entries;
}
// allocate the class's intfc_table_descriptors[] array
- if (clss->n_intfc_table_entries != 0) {
- clss->intfc_table_descriptors = (Class **) clss->class_loader->Alloc(clss->n_intfc_table_entries * sizeof(Class *));
- } else {
- clss->intfc_table_descriptors = NULL;
+ if(m_num_intfc_table_entries != 0) {
+ m_intfc_table_descriptors = (Class**)m_class_loader
+ ->Alloc(m_num_intfc_table_entries * sizeof(Class*));
}
- for (k = 0; k < clss->n_intfc_table_entries; k++) {
- clss->intfc_table_descriptors[k] = NULL;
+ for(k = 0; k < m_num_intfc_table_entries; k++) {
+ m_intfc_table_descriptors[k] = NULL;
}
// fill in intfc_table_descriptors with the descriptors from the superclass and the superinterfaces
unsigned intfc_table_entry = 0;
- if (clss->super_class != NULL) {
- for (unsigned i = 0; i < clss->super_class->n_intfc_table_entries; i++) {
- clss->intfc_table_descriptors[intfc_table_entry] = clss->super_class->intfc_table_descriptors[i];
+ if(has_super_class()) {
+ for(unsigned i = 0; i < get_super_class()->m_num_intfc_table_entries; i++) {
+ m_intfc_table_descriptors[intfc_table_entry] =
+ get_super_class()->m_intfc_table_descriptors[i];
intfc_table_entry++;
}
}
- for (k = 0; k < clss->n_superinterfaces; k++) {
- Class *intfc = clss->superinterfaces[k].clss;
- for (i = 0; i < intfc->n_intfc_table_entries; i++) {
- clss->intfc_table_descriptors[intfc_table_entry] = intfc->intfc_table_descriptors[i];
+ for(k = 0; k < m_num_superinterfaces; k++) {
+ Class* intfc = get_superinterface(k);
+ for(i = 0; i < intfc->m_num_intfc_table_entries; i++) {
+ m_intfc_table_descriptors[intfc_table_entry] =
+ intfc->m_intfc_table_descriptors[i];
intfc_table_entry++;
}
}
// if this class is an interface, add it to the interface table
- if (class_is_interface(clss)) {
- clss->intfc_table_descriptors[intfc_table_entry] = clss;
+ if(is_interface()) {
+ m_intfc_table_descriptors[intfc_table_entry] = this;
intfc_table_entry++;
}
// sort the interfaces in intfc_table_descriptors, eliminating duplicate entries
unsigned last_min_id = 0;
- for (i = 0; i < clss->n_intfc_table_entries; i++) {
+ for (i = 0; i < m_num_intfc_table_entries; i++) {
//
// select the next interface C with smallest id and insert
// into i'th position; delete entry of C if C is the same
// as i-1'th entry
//
- Class *intfc = clss->intfc_table_descriptors[i];
- unsigned min_index = i; // index of intfc with min id
- unsigned min_id = intfc->id; // id of intfc with min id
- for (unsigned k = i+1; k < clss->n_intfc_table_entries; k++) {
- unsigned id = clss->intfc_table_descriptors[k]->id;
+ Class* intfc = m_intfc_table_descriptors[i];
+ unsigned min_index = i; // index of intfc with min id
+ unsigned min_id = intfc->get_id(); // id of intfc with min id
+ for(unsigned k = i + 1; k < m_num_intfc_table_entries; k++) {
+ unsigned id = m_intfc_table_descriptors[k]->get_id();
if (id < min_id) {
// new min
min_index = k;
@@ -628,23 +566,25 @@
continue;
}
}
- // if the id of the min is the same as the i-1'th entry's id, then we have a duplicate
- if (min_id == last_min_id) {
- // duplicate found -- insert the last entry in place of the duplicate's entry
- clss->intfc_table_descriptors[min_index] = clss->intfc_table_descriptors[clss->n_intfc_table_entries-1];
- clss->n_intfc_table_entries--;
+ // if the id of the min is the same as the i-1'th entry's id,
+ // then we have a duplicate
+ if(min_id == last_min_id) {
+ // duplicate found - insert the last entry in place of the duplicate's entry
+ m_intfc_table_descriptors[min_index] =
+ m_intfc_table_descriptors[m_num_intfc_table_entries-1];
+ m_num_intfc_table_entries--;
continue;
}
last_min_id = min_id;
- if (min_index == i) {
+ if(min_index == i) {
continue;
}
// swap i'th entry with min entry
- Class *min_intfc = clss->intfc_table_descriptors[min_index];
- clss->intfc_table_descriptors[min_index] = clss->intfc_table_descriptors[i];
- clss->intfc_table_descriptors[i] = min_intfc;
+ Class* min_intfc = m_intfc_table_descriptors[min_index];
+ m_intfc_table_descriptors[min_index] = m_intfc_table_descriptors[i];
+ m_intfc_table_descriptors[i] = min_intfc;
}
-} //build_class_interface_table_descriptors
+} // Class::build_interface_table_descriptors
// Returns the method matching the Signature "sig" that is implemented directly or indirectly by "clss", or NULL if not found.
@@ -652,8 +592,8 @@
{
assert(clss);
Method *m = NULL;
- for(; ((clss != NULL) && (m == NULL)); clss = clss->super_class) {
- m = class_lookup_method(clss, name, desc);
+ for(; ((clss != NULL) && (m == NULL)); clss = clss->get_super_class()) {
+ m = clss->lookup_method(name, desc);
}
return m;
} //find_method_impl_by_class
@@ -661,25 +601,25 @@
// Add the new fake methods to class.
void inline add_new_fake_method( Class *clss, Class *example, unsigned *next)
{
- for (unsigned i = 0; i < clss->n_superinterfaces; i++) {
- Class *intf_clss = clss->superinterfaces[i].clss;
+ for (unsigned i = 0; i < clss->get_number_of_superinterfaces(); i++) {
+ Class *intf_clss = clss->get_superinterface(i);
add_new_fake_method(intf_clss, example, next);
- for (unsigned k = 0; k < intf_clss->n_methods; k++) {
- Method &intf_method = intf_clss->methods[k];
- if (intf_method.is_clinit()) {
+ for(unsigned k = 0; k < intf_clss->get_number_of_methods(); k++) {
+ Method* intf_method = intf_clss->get_method(k);
+ if(intf_method->is_clinit()) {
continue;
}
// See if the interface method "intf_method" is implemented by clss.
- const String* intf_name = intf_method.get_name();
- const String* intf_desc = intf_method.get_descriptor();
- Method *impl_method = find_method_impl_by_class(example, intf_name, intf_desc);
+ const String* intf_name = intf_method->get_name();
+ const String* intf_desc = intf_method->get_descriptor();
+ Method* impl_method = find_method_impl_by_class(example, intf_name, intf_desc);
if (impl_method == NULL) {
#ifdef DEBUG_FAKE_METHOD_ADDITION
printf("** Adding fake method to class %s for unimplemented method %s of interface %s.\n",
example->name->bytes, intf_name->bytes, intf_clss->name->bytes);
#endif
- Method *fake_method = &(example->methods[(*next)]);
+ Method* fake_method = example->get_method(*next);
(*next)++;
fake_method->_class = example;
@@ -689,7 +629,7 @@
fake_method->_state = Method::ST_NotCompiled;
fake_method->_access_flags = (ACC_PUBLIC | ACC_ABSTRACT);
// Setting its "_intf_method_for_fake_method" field marks the method as being fake.
- fake_method->_intf_method_for_fake_method = &intf_method;
+ fake_method->_intf_method_for_fake_method = intf_method;
// The rest of the method's fields were zero'd above
}
}
@@ -702,17 +642,17 @@
unsigned inline count_fake_interface_method( Class *clss, Class *example )
{
unsigned count = 0;
- for (unsigned i = 0; i < clss->n_superinterfaces; i++) {
- Class *intf_clss = clss->superinterfaces[i].clss;
+ for (unsigned i = 0; i < clss->get_number_of_superinterfaces(); i++) {
+ Class *intf_clss = clss->get_superinterface(i);
count += count_fake_interface_method(intf_clss, example);
- for (unsigned k = 0; k < intf_clss->n_methods; k++) {
- Method &intf_method = intf_clss->methods[k];
- if (intf_method.is_clinit()) {
+ for(unsigned k = 0; k < intf_clss->get_number_of_methods(); k++) {
+ Method* intf_method = intf_clss->get_method(k);
+ if(intf_method->is_clinit()) {
continue;
}
// See if the interface method "intf_method" is implemented by clss.
- const String *intf_name = intf_method.get_name();
- const String *intf_desc = intf_method.get_descriptor();
+ const String *intf_name = intf_method->get_name();
+ const String *intf_desc = intf_method->get_descriptor();
Method *impl_method = find_method_impl_by_class(example, intf_name, intf_desc);
if (impl_method == NULL) {
count++;
@@ -722,16 +662,13 @@
return count;
} // count_fake_interface_method
-// Add any required "fake" methods to a class. These are interface methods inherited by an abstract class that
-// are not implemented by that class or any superclass. Such methods will never be called, but they are added
-// so they have the correct offset in the virtual method part of the vtable (i.e., the offset of the "real" method
-// in the vtable for a concrete class).
-void add_any_fake_methods(Class *clss)
+
+void Class::add_any_fake_methods()
{
- assert(class_is_abstract(clss));
+ assert(is_abstract());
// First, count the fake methods. These are the interface methods that are not implemented by the class.
- unsigned num_fake_methods = count_fake_interface_method(clss, clss);
+ unsigned num_fake_methods = count_fake_interface_method(this, this);
// If no fake methods are needed, just return.
if (num_fake_methods == 0) {
@@ -742,119 +679,118 @@
#ifdef DEBUG_FAKE_METHOD_ADDITION
printf("\n** %u fake methods needed for class %s \n", num_fake_methods, clss->name->bytes);
#endif
- unsigned new_num_methods = (clss->n_methods + num_fake_methods);
- Method *new_meth_array = new Method[new_num_methods];
- if (clss->methods != NULL) {
- memcpy(new_meth_array, clss->methods, (clss->n_methods * sizeof(Method)));
+ unsigned new_num_methods = (m_num_methods + num_fake_methods);
+ Method* new_meth_array = new Method[new_num_methods];
+ if(m_methods != NULL) {
+ memcpy(new_meth_array, m_methods, (m_num_methods * sizeof(Method)));
}
- unsigned next_fake_method_idx = clss->n_methods;
+ unsigned next_fake_method_idx = m_num_methods;
memset(&(new_meth_array[next_fake_method_idx]), 0, (num_fake_methods * sizeof(Method)));
- // Regenerate the existing compile-me/delegate/unboxer stubs and redirect the class's static_initializer and default_constructor fields
- // since they refer to the old method block. We regenerate the stubs because any code to update the addresses in
- // the existing stubs would be very fragile, fake methods very rarely need to be added, and the stubs are small.
- for (unsigned i = 0; i < clss->n_methods; i++) { // Note that this is still the old number of methods.
- Method *m = &clss->methods[i];
- Method *m_copy = &new_meth_array[i];
- if (m_copy->get_method_sig()) {
+ // Regenerate the existing compile-me/delegate/unboxer stubs and redirect
+ // the class's static_initializer and default_constructor fields since
+ // they refer to the old method block. We regenerate the stubs
+ // because any code to update the addresses in the existing stubs would be
+ // very fragile, fake methods very rarely need to be added, and the stubs
+ // are small.
+ // Note that this is still the old number of methods.
+ for(unsigned i = 0; i < m_num_methods; i++) {
+ Method* m = &m_methods[i];
+ Method* m_copy = &new_meth_array[i];
+ if(m_copy->get_method_sig())
+ {
m_copy->get_method_sig()->method = m_copy;
}
- if (m->is_clinit()) {
- clss->static_initializer = m_copy;
+ if(m->is_clinit())
+ {
+ m_static_initializer = m_copy;
}
- if (m->get_name() == VM_Global_State::loader_env->Init_String && m->get_descriptor() == VM_Global_State::loader_env->VoidVoidDescriptor_String) {
- clss->default_constructor = m_copy;
+ if(m->get_name() == VM_Global_State::loader_env->Init_String
+ && m->get_descriptor() == VM_Global_State::loader_env->VoidVoidDescriptor_String)
+ {
+ m_default_constructor = m_copy;
}
}
// Free the old storage area for the class's methods, and have the class point to the new method storage area.
- if (clss->methods != NULL) {
- delete [] clss->methods;
+ if(m_methods != NULL) {
+ delete[] m_methods;
}
- clss->methods = new_meth_array;
+ m_methods = new_meth_array;
+ m_num_methods = (uint16)new_num_methods;
// Add the new fake methods.
- add_new_fake_method( clss, clss, &next_fake_method_idx );
+ add_new_fake_method( this, this, &next_fake_method_idx );
// some methods could be counted several times as "fake" methods (count_fake_interface_method())
// however they are added only once. So we adjust the number of added methods.
assert(next_fake_method_idx <= new_num_methods);
- clss->n_methods = (uint16)next_fake_method_idx;
+ m_num_methods = (uint16)next_fake_method_idx;
} //add_any_fake_methods
-void assign_offsets_to_class_methods(Class *clss)
+
+void Class::assign_offsets_to_methods(Global_Env* env)
{
- // At this point we have an array of the interfaces implemented by this class. We also know the number of
- // methods in the interface part of the vtable. We now need to find the number of virtual methods that are in
- // the virtual method part of the vtable, before we can allocate _vtable and _vtable_descriptors.
-
- // First, if the class is abstract, add any required "fake" methods: these are abstract methods inherited
- // by an abstract class that are not implemented by that class or any superclass.
- if (class_is_abstract(clss) && !class_is_interface(clss)) {
- add_any_fake_methods(clss);
+ // At this point we have an array of the interfaces implemented by
+ // this class. We also know the number of methods in the interface part
+ // of the vtable. We now need to find the number of virtual methods
+ // that are in the virtual method part of the vtable, before we can
+ // allocate _vtable and _vtable_descriptors.
+
+ // First, if the class is abstract, add any required "fake" methods:
+ // these are abstract methods inherited by an abstract class that are
+ // not implemented by that class or any superclass.
+ if(is_abstract() && !is_interface()) {
+ add_any_fake_methods();
}
- Method **super_vtable_descriptors = NULL;
+ Method** super_vtable_descriptors = NULL;
unsigned n_super_virtual_method_entries = 0;
- if (clss->super_class != NULL) {
- super_vtable_descriptors = clss->super_class->vtable_descriptors;
- n_super_virtual_method_entries = clss->super_class->n_virtual_method_entries;
+ if(has_super_class()) {
+ super_vtable_descriptors = get_super_class()->m_vtable_descriptors;
+ n_super_virtual_method_entries =
+ get_super_class()->m_num_virtual_method_entries;
}
// Offset of the next entry in the vtable to use.
#ifdef POINTER64
- unsigned next_vtable_offset = clss->n_virtual_method_entries << 3;
+ unsigned next_vtable_offset = m_num_virtual_method_entries << 3;
#else
- unsigned next_vtable_offset = clss->n_virtual_method_entries << 2;
+ unsigned next_vtable_offset = m_num_virtual_method_entries << 2;
#endif
- if (!class_is_interface(clss)) {
+ if(!is_interface()) {
// Classes have an additional overhead for the class pointer and interface table.
next_vtable_offset += VTABLE_OVERHEAD;
}
unsigned i, j;
- for (i = 0; i < clss->n_methods; i++) {
- Method& method = clss->methods[i];
+ for(i = 0; i < m_num_methods; i++) {
+ Method& method = m_methods[i];
// check if the method hasn't already been initialized or even compiled
assert(method.get_code_addr() == NULL);
// initialize method's code address
method.set_code_addr((char*)compile_gen_compile_me(&method));
- if (method.is_static()) {
- // A static method
- if (method.is_clinit()) {
- method._offset = 0;
- method._index = 0;
- } else {
- // the class better not be an interface!
- // To do : make sure this is not an interface
- assert(!class_is_interface(clss));
- method._offset = clss->static_method_size;
-#ifdef POINTER64
- clss->static_method_size += 8;
-#else
- clss->static_method_size += 4;
-#endif
- }
- } else {
- // A virtual method. Look it up in virtual method tables of the super classes; if not found, then assign a new offset.
-
+ if(!method.is_static()) {
+ // A virtual method. Look it up in virtual method tables of the
+ // super classes; if not found, then assign a new offset.
+
// Ignore initializers.
if (method.is_init()) {
continue;
}
-#ifdef REMOVE_FINALIZE_FROM_VTABLES
- // skip over finalize() method, but remember it
if (method.is_finalize()) {
- clss->finalize_method = &method;
+ if(get_name() != env->JavaLangObject_String) {
+ m_has_finalizer = 1;
+ }
+#ifdef REMOVE_FINALIZE_FROM_VTABLES
+ // skip over finalize() method, but remember it
+ finalize_method = &method;
continue;
- }
#endif
+ }
unsigned off = 0;
unsigned index = 0;
if (super_vtable_descriptors != NULL) {
- bool same_runtime_package_as_super =
- (clss->package == clss->super_class->package);
-
const String *name = method.get_name();
const String *desc = method.get_descriptor();
for (j = 0; j < n_super_virtual_method_entries; j++) {
@@ -863,7 +799,7 @@
if(m->is_final()) {
if(m->is_private()
|| (m->is_package_private()
- && m->get_class()->package != method.get_class()->package))
+ && m->get_class()->get_package() != method.get_class()->get_package()))
{
// We allow to override private final and
// default (package private) final methods
@@ -872,17 +808,17 @@
// Note: for package private methods this statement
// is true only for classes from different packages
} else {
- REPORT_FAILED_CLASS_CLASS(clss->class_loader, clss,
+ REPORT_FAILED_CLASS_CLASS(get_class_loader(), this,
"java/lang/VerifyError",
"An attempt is made to override final method "
- << m->get_class()->name->bytes << "."
+ << m->get_class()->get_name()->bytes << "."
<< m->get_name()->bytes << m->get_descriptor()->bytes);
return;
}
}
// method doesn't override m if method has package access
// and is in a different runtime package than m.
- if(same_runtime_package_as_super
+ if(m_package == m->get_class()->get_package()
|| m->is_public()
|| m->is_protected()
|| m->is_private())
@@ -899,130 +835,126 @@
}
}
}
- if (off == 0 || class_is_interface(clss)) {
- // Didn't find a matching signature in any super class; add a new entry to this class' vtable.
- off = next_vtable_offset;
- index = clss->n_virtual_method_entries;
+ if (off == 0 || is_interface()) {
+ // Didn't find a matching signature in any super class;
+ // add a new entry to this class' vtable.
+ off = next_vtable_offset;
+ index = m_num_virtual_method_entries;
#ifdef POINTER64
- next_vtable_offset += 8;
+ next_vtable_offset += 8;
#else
- next_vtable_offset += 4;
+ next_vtable_offset += 4;
#endif
- clss->n_virtual_method_entries++;
+ m_num_virtual_method_entries++;
}
- method._offset = off;
- method._index = index;
+ method.set_position_in_vtable(index, off);
}
}
// Figure out which methods don't do anything
- for (i = 0; i < clss->n_methods; i++) {
- Method& method = clss->methods[i];
- method._set_nop();
- }
+ // ppervov: suspending this check, as it only detects empty constructors
+ //for (i = 0; i < n_methods; i++) {
+ // Method& method = methods[i];
+ // method._set_nop();
+ //}
// Decide whether it is possible to allocate instances of this class using a fast inline sequence containing
// no calls to other routines. This means no calls to raise exceptions or to invoke constructors. It will also be
// necessary that the allocation itself can be done without needing to call a separate function.
bool is_not_instantiable = ( // if true, will raise java_lang_InstantiationException
- (clss->default_constructor == NULL) ||
- (clss->is_primitive || clss->is_array || class_is_interface(clss) || class_is_abstract(clss)) ||
- (clss == VM_Global_State::loader_env->Void_Class));
- if (!is_not_instantiable && clss->default_constructor->is_nop()) {
- clss->is_fast_allocation_possible = TRUE;
+ (m_default_constructor == NULL)
+ || (is_primitive() || is_array() || is_interface() || is_abstract())
+ || (this == VM_Global_State::loader_env->Void_Class));
+ if(!is_not_instantiable && m_default_constructor->is_nop()) {
+ m_is_fast_allocation_possible = 1;
}
-} //assign_offsets_to_class_methods
+} // Class::assign_offsets_to_methods
-bool initialize_static_fields_for_interface(Class *clss)
+bool Class::initialize_static_fields_for_interface()
{
ASSERT_RAISE_AREA;
tmn_suspend_disable();
+ m_state = ST_Prepared;
// Initialize static fields
- clss->state = ST_Prepared;
- unsigned i;
- for (i=0; i<clss->n_fields; i++) {
- Field& field = clss->fields[i];
- if (field.is_static() && field.get_const_value_index()) {
- char *field_addr = ((char *)clss->static_data_block) + field.get_offset();
- Const_Java_Value field_const_value = field.get_const_value();
- switch(field.get_java_type()) {
- case JAVA_TYPE_INT:
- *((int32 *)field_addr) = field_const_value.i;
- break;
- case JAVA_TYPE_SHORT:
- case JAVA_TYPE_CHAR:
- *((int16 *)field_addr) = (int16)field_const_value.i;
- break;
- case JAVA_TYPE_BYTE:
- case JAVA_TYPE_BOOLEAN:
- *((int8 *)field_addr) = (int8)field_const_value.i;
- break;
- case JAVA_TYPE_LONG:
- *(((int32 *)field_addr)) = field_const_value.l.lo_bytes;
- *(((int32 *)field_addr) + 1) = field_const_value.l.hi_bytes;
- break;
- case JAVA_TYPE_DOUBLE:
- *(((int32 *)field_addr)) = field_const_value.l.lo_bytes;
- *(((int32 *)field_addr) + 1) = field_const_value.l.hi_bytes;
- break;
- case JAVA_TYPE_FLOAT:
- *((float *)field_addr) = field_const_value.f;
- break;
- case JAVA_TYPE_CLASS:
- {
- // compress static reference fields.
- // It must be a String
- assert(strcmp(field.get_descriptor()->bytes, "Ljava/lang/String;") == 0);
- Java_java_lang_String *str
- = vm_instantiate_cp_string_resolved(field_const_value.string);
-
- if (!str) {
- assert(exn_raised());
- tmn_suspend_enable();
- TRACE2("classloader.prepare", "failed instantiate final field : "
- << clss->name->bytes << "." << field.get_name()->bytes);
- return false;
- }
- STORE_GLOBAL_REFERENCE((COMPRESSED_REFERENCE *)field_addr, str);
- break;
- }
- default:
- // This should never happen.
- ABORT("Unexpected java type");
- break;
- }
- }
+ if(!assign_values_to_class_static_final_fields(this)) {
+ tmn_suspend_enable();
+ return false;
}
- clss->n_virtual_method_entries = 0; // interfaces don't have vtables
- for (i=0; i<clss->n_methods; i++) {
- Method *method = &clss->methods[i];
- if (method->is_clinit()) {
- assert(clss->static_initializer == method);
+ tmn_suspend_enable();
+
+#ifndef NDEBUG
+ for(uint16 i = 0; i < m_num_methods; i++) {
+ if(m_methods[i].is_clinit()) {
+ assert(m_static_initializer == &(m_methods[i]));
}
}
- tmn_suspend_enable();
- TRACE2("classloader.prepare", "interface " << clss->name->bytes << " prepared");
- class_initialize_if_no_side_effects(clss);
+#endif
+ TRACE2("classloader.prepare", "interface " << m_name->bytes << " prepared");
return true;
-} //initialize_static_fields_for_interface
+} // Class::initialize_static_fields_for_interface
+
+
+void Class::create_vtable(unsigned n_vtable_entries)
+{
+ unsigned vtable_size = VTABLE_OVERHEAD + n_vtable_entries * sizeof(void *);
+
+ // Always allocate vtable data from vtable_data_pool
+ void* p_gc_hdr = allocate_vtable_data_from_pool(vtable_size);
+
+#ifdef VM_STATS
+ // For allocation statistics, include any rounding added to make each
+ // item aligned (current alignment is to the next 16 byte boundary).
+ unsigned num_bytes = (vtable_size + 15) & ~15;
+ // 20020923 Total number of allocations and total number of
+ // bytes for class-related data structures.
+ VM_Statistics::get_vm_stats().num_vtable_allocations++;
+ VM_Statistics::get_vm_stats().total_vtable_bytes += num_bytes;
+#endif
+ assert(p_gc_hdr);
+ memset(p_gc_hdr, 0, vtable_size);
+
+ VTable* vtable = (VTable*)p_gc_hdr;
+
+ if(has_super_class()) {
+ m_depth = get_super_class()->m_depth + 1;
+ memcpy(&vtable->superclasses,
+ &get_super_class()->m_vtable->superclasses,
+ sizeof(vtable->superclasses));
+ for(int i = 0; i < vm_max_fast_instanceof_depth(); i++) {
+ if(vtable->superclasses[i] == NULL) {
+ vtable->superclasses[i] = this;
+ break;
+ }
+ }
+ }
+ if(m_depth > 0
+ && m_depth < vm_max_fast_instanceof_depth()
+ && !is_array()
+ && !is_interface())
+ {
+ m_is_suitable_for_fast_instanceof = 1;
+ }
+ m_vtable = vtable;
+} // Class::create_vtable
-void populate_vtable_descriptors_table_and_override_methods(Class *clss)
+void Class::populate_vtable_descriptors_table_and_override_methods()
{
- // Populate _vtable_descriptors first with _n_virtual_method_entries from super class
- if (clss->super_class != NULL) {
- for (unsigned i = 0; i < clss->super_class->n_virtual_method_entries; i++) {
- clss->vtable_descriptors[i] = clss->super_class->vtable_descriptors[i];
+ // Populate _vtable_descriptors first with _n_virtual_method_entries
+ // from super class
+ if(has_super_class()) {
+ for(unsigned i = 0; i < get_super_class()->m_num_virtual_method_entries; i++) {
+ m_vtable_descriptors[i] = get_super_class()->m_vtable_descriptors[i];
}
}
// NOW OVERRIDE with this class' methods
unsigned i;
- for (i = 0; i < clss->n_methods; i++) {
- Method *method = &clss->methods[i];
- if (method->is_clinit()) {
- assert(clss->static_initializer == method);
+ for(i = 0; i < m_num_methods; i++) {
+ Method* method = &(m_methods[i]);
+ if(method->is_clinit()) {
+ assert(m_static_initializer == method);
}
if(method->is_static()
|| method->is_init()
@@ -1031,60 +963,61 @@
#endif
)
continue;
- clss->vtable_descriptors[method->get_index()] = method;
+ m_vtable_descriptors[method->get_index()] = method;
}
// finally, the interface methods
- unsigned index = clss->n_virtual_method_entries;
- for (i = 0; i < clss->n_intfc_table_entries; i++) {
- Class *intfc = clss->intfc_table_descriptors[i];
- for (unsigned k = 0; k < intfc->n_methods; k++) {
- if (intfc->methods[k].is_clinit()) {
+ unsigned index = m_num_virtual_method_entries;
+ for(i = 0; i < m_num_intfc_table_entries; i++) {
+ Class* intfc = m_intfc_table_descriptors[i];
+ for(unsigned k = 0; k < intfc->get_number_of_methods(); k++) {
+ if(intfc->get_method(k)->is_clinit()) {
continue;
}
// Find method with matching signature and replace
- const String *sig_name = intfc->methods[k].get_name();
- const String *sig_desc = intfc->methods[k].get_descriptor();
- Method *method = NULL;
- for (unsigned j = 0; j < clss->n_virtual_method_entries; j++) {
- if (clss->vtable_descriptors[j]->get_name() == sig_name && clss->vtable_descriptors[j]->get_descriptor() == sig_desc) {
- method = clss->vtable_descriptors[j];
+ const String* sig_name = intfc->get_method(k)->get_name();
+ const String* sig_desc = intfc->get_method(k)->get_descriptor();
+ Method* method = NULL;
+ for(unsigned j = 0; j < m_num_virtual_method_entries; j++) {
+ if(m_vtable_descriptors[j]->get_name() == sig_name
+ && m_vtable_descriptors[j]->get_descriptor() == sig_desc)
+ {
+ method = m_vtable_descriptors[j];
break; // a match!
}
-
}
- if (method == NULL && !class_is_abstract(clss)) {
- // wgs: I think we should comment out this assert, because there're many cases VM/Classpath
- // will run apps built on previous JDK version, and without implementations of newly added methods
+ if(method == NULL && !is_abstract()) {
+ // There're many cases VM will run apps built on previous JDK
+ // version, and without implementations of newly added methods
// for specific interfaces, we allow them to continue to run
- TRACE2("classloader.prepare", "No implementation in class " << clss->name->bytes
- << " for method " << sig_name->bytes << " of interface " << intfc->name->bytes
+ TRACE2("classloader.prepare", "No implementation in class "
+ << get_name()->bytes << " for method "
+ << sig_name->bytes << " of interface "
+ << intfc->get_name()->bytes
<< ". \n\nCheck whether you used another set of class library.\n");
}
- clss->vtable_descriptors[index] = method;
+ m_vtable_descriptors[index] = method;
index++;
}
}
-} //populate_vtable_descriptors_table_and_override_methods
+} // Class::populate_vtable_descriptors_table_and_override_methods
-void point_class_vtable_entries_to_stubs(Class *clss)
+void Class::point_vtable_entries_to_stubs()
{
-
- for (unsigned i = 0; i < clss->n_virtual_method_entries; i++) {
- assert(clss->vtable_descriptors[i]);
- Method& method = *(clss->vtable_descriptors[i]);
- assert(!method.is_static());
- //if (!method.is_static() && !is_ignored_method) {
- if(!method.is_static()) {
- unsigned meth_idx = method.get_index();
- // 2003-03-17: Make this assert independent of POINTER64. There are already several
- // assumptions in the code that the width of each method pointer is the same as void* .
- assert((method.get_offset() - VTABLE_OVERHEAD) / sizeof(void *) == method.get_index());
- clss->vtable->methods[meth_idx] =
- (unsigned char *)method.get_code_addr();
- method.add_vtable_patch(&(clss->vtable->methods[meth_idx]));
- assert(method.is_fake_method() || interpreter_enabled() || method.get_code_addr());
+ for (unsigned i = 0; i < m_num_virtual_method_entries; i++) {
+ assert(m_vtable_descriptors[i]);
+ Method* method = m_vtable_descriptors[i];
+ assert(!method->is_static());
+ if(!method->is_static()) {
+ unsigned meth_idx = method->get_index();
+ // There are several assumptions in the code that the width
+ // of each method pointer is the same as void*.
+ assert((method->get_offset() - VTABLE_OVERHEAD)/sizeof(void*) == method->get_index());
+ m_vtable->methods[meth_idx] =
+ (unsigned char*)method->get_code_addr();
+ method->add_vtable_patch(&(m_vtable->methods[meth_idx]));
+ assert(method->is_fake_method() || interpreter_enabled() || method->get_code_addr());
}
}
}
@@ -1094,9 +1027,9 @@
// It's a rutime helper. So should be named as rth_prepare_throw_abstract_method_error
void prepare_throw_abstract_method_error(Class_Handle clss, Method_Handle method)
{
- char* buf = (char*)STD_ALLOCA(clss->name->len + method->get_name()->len
+ char* buf = (char*)STD_ALLOCA(clss->get_name()->len + method->get_name()->len
+ method->get_descriptor()->len + 2); // . + \0
- sprintf(buf, "%s.%s%s", clss->name->bytes,
+ sprintf(buf, "%s.%s%s", clss->get_name()->bytes,
method->get_name()->bytes, method->get_descriptor()->bytes);
tmn_suspend_enable();
@@ -1131,11 +1064,11 @@
// It's a rutime helper. So should be named as rth_prepare_throw_illegal_access_error
void prepare_throw_illegal_access_error(Class_Handle to, Method_Handle from)
{
- char* buf = (char*)STD_ALLOCA(from->get_class()->name->len
- + to->name->len + from->get_name()->len
+ char* buf = (char*)STD_ALLOCA(from->get_class()->get_name()->len
+ + to->get_name()->len + from->get_name()->len
+ from->get_descriptor()->len + 12); // from + to + . + \0
- sprintf(buf, "from %s to %s.%s%s", from->get_class()->name->bytes,
- to->name->bytes,
+ sprintf(buf, "from %s to %s.%s%s", from->get_class()->get_name()->bytes,
+ to->get_name()->bytes,
from->get_name()->bytes, from->get_descriptor()->bytes);
tmn_suspend_enable();
@@ -1167,48 +1100,49 @@
return addr;
}
-Intfc_Table *create_populate_class_interface_table(Class *clss)
+Intfc_Table* Class::create_and_populate_interface_table()
{
- Intfc_Table *intfc_table;
- if (clss->n_intfc_table_entries != 0) {
- unsigned vtable_offset = clss->n_virtual_method_entries;
+ Intfc_Table* intfc_table;
+ if(m_num_intfc_table_entries != 0) {
+ unsigned vtable_offset = m_num_virtual_method_entries;
// shouldn't it be called vtable_index?
- intfc_table = create_intfc_table(clss, clss->n_intfc_table_entries);
+ intfc_table = create_intfc_table(this, m_num_intfc_table_entries);
unsigned i;
- for (i = 0; i < clss->n_intfc_table_entries; i++) {
- Class *intfc = clss->intfc_table_descriptors[i];
- intfc_table->entry[i].intfc_id = intfc->id;
- intfc_table->entry[i].table = &clss->vtable->methods[vtable_offset];
- vtable_offset += intfc->n_methods;
- if(intfc->static_initializer) {
+ for (i = 0; i < m_num_intfc_table_entries; i++) {
+ Class* intfc = m_intfc_table_descriptors[i];
+ intfc_table->entry[i].intfc_id = intfc->get_id();
+ intfc_table->entry[i].table = &m_vtable->methods[vtable_offset];
+ vtable_offset += intfc->get_number_of_methods();
+ if(intfc->m_static_initializer) {
// Don't count static initializers of interfaces.
vtable_offset--;
}
}
// Set the vtable entries to point to the code address.
- unsigned meth_idx = clss->n_virtual_method_entries;
- for (i = 0; i < clss->n_intfc_table_entries; i++) {
- Class *intfc = clss->intfc_table_descriptors[i];
- for (unsigned k = 0; k < intfc->n_methods; k++) {
- if (intfc->methods[k].is_clinit()) {
+ unsigned meth_idx = m_num_virtual_method_entries;
+ for (i = 0; i < m_num_intfc_table_entries; i++) {
+ Class* intfc = m_intfc_table_descriptors[i];
+ for(unsigned k = 0; k < intfc->get_number_of_methods(); k++) {
+ if (intfc->get_method(k)->is_clinit()) {
continue;
}
- Method *method = clss->vtable_descriptors[meth_idx];
+ Method* method = m_vtable_descriptors[meth_idx];
if(method == NULL || method->is_abstract()) {
TRACE2("classloader.prepare.ame", "Inserting Throw_AbstractMethodError stub for method\n\t"
- << clss->name->bytes << "."
- << intfc->methods[k].get_name()->bytes << intfc->methods[k].get_descriptor()->bytes);
- clss->vtable->methods[meth_idx] =
- (unsigned char*)prepare_gen_throw_abstract_method_error(clss, &intfc->methods[k]);
+ << m_name->bytes << "."
+ << intfc->get_method(k)->get_name()->bytes
+ << intfc->get_method(k)->get_descriptor()->bytes);
+ m_vtable->methods[meth_idx] =
+ (unsigned char*)prepare_gen_throw_abstract_method_error(this, intfc->get_method(k));
} else if(method->is_public()) {
- clss->vtable->methods[meth_idx] =
+ m_vtable->methods[meth_idx] =
(unsigned char *)method->get_code_addr();
- method->add_vtable_patch(&(clss->vtable->methods[meth_idx]));
+ method->add_vtable_patch(&(m_vtable->methods[meth_idx]));
} else {
TRACE2("classloader.prepare.iae", "Inserting Throw_IllegalAccessError stub for method\n\t"
- << method->get_class()->name->bytes << "."
+ << method->get_class()->get_name()->bytes << "."
<< method->get_name()->bytes << method->get_descriptor()->bytes);
- clss->vtable->methods[meth_idx] =
+ m_vtable->methods[meth_idx] =
(unsigned char*)prepare_gen_throw_illegal_access_error(intfc, method);
}
meth_idx++;
@@ -1218,500 +1152,377 @@
intfc_table = NULL;
}
return intfc_table;
-} //create_populate_class_interface_table
+} // Class::create_and_populate_interface_table
-void initialize_interface_class_data(Class *clss)
-{
- // this is an interface
- clss->instance_data_size = 0; // no instance data
- clss->unpadded_instance_data_size = 0;
- clss->allocated_size = 0;
- clss->n_instance_refs = 0;
- clss->n_virtual_method_entries = 0; // thus no virtual method entries
- clss->n_intfc_table_entries = 1; // need table entry for this interface
-} //initialize_interface_class_data
-
-
-void initialize_java_lang_object_class(Class *clss)
-{
- // java.lang.Object -- Java ROOT.
- clss->instance_data_size = 0; // set below use the unpadded_instace_data_size.
- clss->allocated_size = 0; // set below.
- clss->unpadded_instance_data_size = /*sizeof(ManagedObject)*/(unsigned)ManagedObject::get_size();
- clss->n_instance_refs = 0;
- clss->n_virtual_method_entries = clss->n_intfc_table_entries = 0;
-} //initialize_java_lang_object_class
-
-
-static void initialize_regular_class_data(Global_Env* env, Class *clss)
-{
- clss->instance_data_size = 0; // set below.
- clss->allocated_size = 0; // set below.
- // Roll over instance size, instance refs, static fields #, and num_field_padding_bytes from the super class.
- clss->unpadded_instance_data_size = clss->super_class->unpadded_instance_data_size;
- if (clss->name == env->JavaLangClass_String) {
- clss->unpadded_instance_data_size =
- ( (/*sizeof(ManagedObject)*/(unsigned)ManagedObject::get_size() + (GC_OBJECT_ALIGNMENT - 1)) / GC_OBJECT_ALIGNMENT) * GC_OBJECT_ALIGNMENT;
- }
- clss->n_instance_refs = clss->super_class->n_instance_refs;
- clss->num_field_padding_bytes = clss->super_class->num_field_padding_bytes;
- // Roll over all virtual methods and interface methods of super class.
- clss->n_virtual_method_entries = clss->super_class->n_virtual_method_entries;
- clss->n_intfc_table_entries = clss->super_class->n_intfc_table_entries;
-} //initialize_regular_class_data
-
-
-//
-// prepares a class:
-// (1) assign offsets
-// - offset of instance data fields
-// - virtual methods in vtable
-// - static data fields in static data block
-// - static methods in static method block
-//
-// (2) create class vtable
-// (3) create static field block
-// (4) create static method block
-//
-//
-//
-
-bool class_prepare(Global_Env* env, Class *clss)
+bool Class::prepare(Global_Env* env)
{
ASSERT_RAISE_AREA;
- // fast path
- switch(clss->state)
- {
- case ST_Prepared:
- case ST_Initializing:
- case ST_Initialized:
- case ST_Error:
- return true;
- default:
- break;
- }
-
- LMAutoUnlock autoUnlocker(clss->m_lock);
-
- //
//
// STEP 1 ::: SIMPLY RETURN IF already prepared, initialized, or currently initializing.
//
- //
- switch(clss->state)
- {
- case ST_Prepared:
- case ST_Initializing:
- case ST_Initialized:
- case ST_Error:
+ if(is_at_least_prepared() || in_error()) // try fast path
return true;
- default:
- break;
- }
- TRACE2("classloader.prepare", "BEGIN class prepare, class name = " << clss->name->bytes);
- assert(clss->is_verified);
+ LMAutoUnlock autoUnlocker(m_lock);
+
+ if(is_at_least_prepared() || in_error()) // try slow path
+ return true;
+
+ TRACE2("classloader.prepare", "BEGIN class prepare, class name = " << m_name->bytes);
+ assert(m_state == ST_BytecodesVerified);
- //
//
// STEP 2 ::: PREPARE SUPER-INTERFACES
//
- //
unsigned i;
- for (i=0; i<clss->n_superinterfaces; i++) {
- if(!class_is_interface(clss->superinterfaces[i].clss)) {
- REPORT_FAILED_CLASS_CLASS(clss->class_loader, clss,
- "java/lang/IncompatibleClassChangeError",
- clss->name->bytes << ": "
- << clss->superinterfaces[i].clss->name->bytes << " is not an interface");
- return false;
- }
- if(!class_prepare(env, clss->superinterfaces[i].clss)) {
- REPORT_FAILED_CLASS_CLASS(clss->class_loader, clss,
+ for(i = 0; i < m_num_superinterfaces; i++) {
+ assert(m_superinterfaces[i].clss->is_interface());
+ if(!m_superinterfaces[i].clss->prepare(env)) {
+ REPORT_FAILED_CLASS_CLASS(m_class_loader, this,
VM_Global_State::loader_env->JavaLangNoClassDefFoundError_String->bytes,
- clss->name->bytes << ": error preparing superinterface "
- << clss->superinterfaces[i].clss->name->bytes);
+ m_name->bytes << ": error preparing superinterface "
+ << m_superinterfaces[i].clss->get_name()->bytes);
return false;
}
}
//
+ // STEP 3 ::: PREPARE SUPERCLASS if needed
//
- // STEP 3 ::: PREPARE SUPERCLASS if needed; simply initialize if interface.
- //
- //
- if (class_is_interface(clss)) {
- initialize_interface_class_data(clss);
- } else if (clss->super_class != NULL) {
- // Regular class with super-class.
- if(!class_prepare(env, clss->super_class)) {
- REPORT_FAILED_CLASS_CLASS(clss->class_loader, clss,
+ if(!is_interface() && has_super_class())
+ {
+ // Regular class with super-class
+ if(!get_super_class()->prepare(env)) {
+ REPORT_FAILED_CLASS_CLASS(m_class_loader, this,
VM_Global_State::loader_env->JavaLangNoClassDefFoundError_String->bytes,
- clss->name->bytes << ": error preparing superclass "
- << clss->super_class->name->bytes);
+ m_name->bytes << ": error preparing superclass "
+ << get_super_class()->get_name()->bytes);
return false;
}
- // CLASS_VTABLE_REWORK - these will eventually be moved into the vtable but we don't have one yet.
- // Before we start adding properties make sure they are clear.
- assert(clss->class_properties == 0);
- if(clss->super_class->has_finalizer) {
- clss->has_finalizer = 1;
- }
- initialize_regular_class_data(env, clss);
- } else {
- initialize_java_lang_object_class(clss);
}
- clss->static_data_size = 0;
- clss->static_method_size = 0;
- //
- //
- // STEP 4 :::: ASSIGN OFFSETS to the class and instance data FIELDS.
- // This SETs class to ST_InstanceSizeComputed state.
- //
- //
- assign_offsets_to_class_fields(clss);
- assert(clss->state == ST_InstanceSizeComputed);
- //
- //
- // STEP 5 :::: Build GC REFERENCE OFFSET MAP
- //
- //
- build_gc_class_ref_map(env, clss);
+
//
+ // STEP 4 ::: setup selected class properties
//
- // STEP 6 :::: Calculate # of INTERFACES METHODS and build interface table DESCRIPTORS for C
+ if(!is_interface()) {
+ if(has_super_class()) {
+ if(get_super_class()->has_finalizer()) {
+ m_has_finalizer = 1;
+ }
+ // Copy over instance size, instance refs, static fields #,
+ // and num_field_padding_bytes from the super class.
+ if(m_name == env->JavaLangClass_String) {
+ // calculate unpadded instance data size
+ // for java/lang/Class separately
+ m_unpadded_instance_data_size =
+ (((unsigned)ManagedObject::get_size() + (GC_OBJECT_ALIGNMENT - 1))
+ / GC_OBJECT_ALIGNMENT)
+ * GC_OBJECT_ALIGNMENT;
+ } else {
+ m_unpadded_instance_data_size =
+ get_super_class()->m_unpadded_instance_data_size;
+ }
+ m_num_instance_refs = get_super_class()->m_num_instance_refs;
+ m_num_field_padding_bytes =
+ get_super_class()->m_num_field_padding_bytes;
+ // Copy over number of virtual methods
+ // and interface methods of super class
+ m_num_virtual_method_entries =
+ get_super_class()->m_num_virtual_method_entries;
+ m_num_intfc_table_entries = get_super_class()->m_num_intfc_table_entries;
+ } else {
+ // this is java/lang/Object
+ // FIXME: primitive classes also get here, but this assignment
+ // has no effect on them really
+ m_unpadded_instance_data_size = (unsigned)ManagedObject::get_size();
+ }
+ } else {
+ // this is interface
+ m_num_intfc_table_entries = 1; // need table entry for this interface
+ }
+
//
+ // STEP 5 ::: ASSIGN OFFSETS to the class and instance data FIELDS.
+ // This SETs class to ST_InstanceSizeComputed state.
//
- build_class_interface_table_descriptors(clss);
+ assign_offsets_to_fields();
+ assert(m_state == ST_InstanceSizeComputed);
+
//
+ // STEP 6 ::: Calculate # of INTERFACES METHODS and build interface table DESCRIPTORS for C
//
- // STEP 7 :::: ASSIGN OFFSETS to the class and virtual METHODS
+ build_interface_table_descriptors();
+
//
+ // STEP 7 ::: ASSIGN OFFSETS to the class and virtual METHODS
//
- assign_offsets_to_class_methods(clss);
- if(clss->state == ST_Error) {
+ assign_offsets_to_methods(env);
+ if(m_state == ST_Error)
return false;
- }
- //
- //
- // STEP 8 :::: Create the static field and method blocks
+
//
+ // STEP 8 ::: Create the static field block
//
- clss->static_data_block = (char *) clss->class_loader->Alloc(clss->static_data_size);
- memset(clss->static_data_block, 0, clss->static_data_size);
+ m_static_data_block = (char*)m_class_loader->Alloc(m_static_data_size);
+ memset(m_static_data_block, 0, m_static_data_size);
#ifdef VM_STATS
- // 20020923 Total number of allocations and total number of bytes for class-related data structures.
+ // Total number of allocations and total number of bytes for class-related data structures.
// This includes any rounding added to make each item aligned (current alignment is to the next 16 byte boundary).
- unsigned num_bytes = (clss->static_data_size + 15) & ~15;
- Class::num_statics_allocations++;
- if (clss->static_data_size > 0) {
- Class::num_nonempty_statics_allocations++;
+ unsigned num_bytes = (m_static_data_size + 15) & ~15;
+ VM_Statistics::get_vm_stats().num_statics_allocations++;
+ if(m_static_data_size > 0) {
+ VM_Statistics::get_vm_stats().num_nonempty_statics_allocations++;
}
- Class::total_statics_bytes += num_bytes;
+ VM_Statistics::get_vm_stats().total_statics_bytes += num_bytes;
#endif
- assert(clss->static_data_block);
- assert(( ((POINTER_SIZE_INT)(clss->static_data_block)) % 8) == 0); // block must be on a 8 byte boundary
- memset(clss->static_data_block, 0, clss->static_data_size);
+ assert(m_static_data_block);
+ // block must be on a 8 byte boundary
+ assert((((POINTER_SIZE_INT)(m_static_data_block)) % 8) == 0);
- clss->static_method_block = (unsigned char**) new char[clss->static_method_size];
- memset(clss->static_method_block, 0, clss->static_method_size);
- //
//
- // STEP 9 :::: For INTERFACES intialize static fields and return.
+ // STEP 9 ::: For INTERFACES intialize static fields and return.
//
- //
- if (class_is_interface(clss)) {
- bool init_fields = initialize_static_fields_for_interface(clss);
- //if((env->java_io_Serializable_Class != NULL && clss->name != env->java_io_Serializable_Class->name))
+ if(is_interface()) {
+ bool init_fields = initialize_static_fields_for_interface();
if(!env->InBootstrap())
{
autoUnlocker.ForceUnlock();
assert(hythread_is_suspend_enabled());
if (init_fields) {
- jvmti_send_class_prepare_event(clss);
+ jvmti_send_class_prepare_event(this);
}
}
// DONE for interfaces
- TRACE2("classloader.prepare", "END class prepare, class name = " << clss->name->bytes);
+ TRACE2("classloader.prepare", "END class prepare, class name = "
+ << m_name->bytes);
return init_fields;
}
+
//
+ // STEP 10 ::: COMPUTE number of interface method entries.
//
- // STEP 10 :::: COMPUTE number of interface method entries.
- //
- //
- for (i = 0; i < clss->n_intfc_table_entries; i++) {
- Class *intfc = clss->intfc_table_descriptors[i];
- clss->n_intfc_method_entries += intfc->n_methods;
- if (intfc->static_initializer) {
+ for(i = 0; i < m_num_intfc_table_entries; i++) {
+ Class* intfc = m_intfc_table_descriptors[i];
+ m_num_intfc_method_entries += intfc->get_number_of_methods();
+ if(intfc->m_static_initializer) {
// Don't count static initializers of interfaces.
- clss->n_intfc_method_entries--;
+ m_num_intfc_method_entries--;
}
}
+
//
+ // STEP 11 ::: ALLOCATE the Vtable descriptors array
//
- // STEP 11 :::: ALLOCATE the Vtable descriptors array
- //
- //
- unsigned n_vtable_entries = clss->n_virtual_method_entries + clss->n_intfc_method_entries;
- if (n_vtable_entries != 0) {
- clss->vtable_descriptors = new Method*[n_vtable_entries];
+ unsigned n_vtable_entries =
+ m_num_virtual_method_entries + m_num_intfc_method_entries;
+ if(n_vtable_entries != 0) {
+ m_vtable_descriptors = new Method*[n_vtable_entries];
// ppervov: FIXME: should throw OOME
- } else {
- clss->vtable_descriptors = NULL;
}
+
//
+ // STEP 12 ::: POPULATE with interface descriptors and virtual method descriptors
+ // Also, OVERRIDE superclass' methods with those of this one's
//
- // STEP 12 :::: POPULATE with interface descriptors and virtual method descriptors.
- // Also, OVERRIDE superclass' methods with those of this one's
- //
- populate_vtable_descriptors_table_and_override_methods(clss);
- //
- //
- // STEP 13 :::: CREATE VTABLE and set the Vtable entries to point to the
- // code address (a stub or jitted code)
+ populate_vtable_descriptors_table_and_override_methods();
+
//
+ // STEP 13 ::: CREATE VTABLE and set the Vtable entries to point to the
+ // code address (a stub or jitted code)
//
- clss->vtable = create_vtable(clss, n_vtable_entries);
- for (i = 0; i < n_vtable_entries; i++) {
+ create_vtable(n_vtable_entries);
+ assert(m_vtable);
+ for(i = 0; i < n_vtable_entries; i++) {
// need to populate with pointers to stubs or compiled code
- clss->vtable->methods[i] = NULL; // for now
+ m_vtable->methods[i] = NULL; // for now
}
- if (vm_vtable_pointers_are_compressed())
+
+ if(vm_vtable_pointers_are_compressed())
{
- clss->allocation_handle = (Allocation_Handle) ((POINTER_SIZE_INT)clss->vtable - vm_get_vtable_base());
+ m_allocation_handle =
+ (Allocation_Handle)((POINTER_SIZE_INT)m_vtable - vm_get_vtable_base());
}
else
{
- clss->allocation_handle = (Allocation_Handle) clss->vtable;
+ m_allocation_handle = (Allocation_Handle)m_vtable;
}
- clss->vtable->clss = clss;
+ m_vtable->clss = this;
// Set the vtable entries to point to the code address (a stub or jitted code)
- point_class_vtable_entries_to_stubs(clss);
+ point_vtable_entries_to_stubs();
//
+ // STEP 14 ::: CREATE and POPULATE the CLASS INTERFACE TABLE
//
- // STEP 14 :::: CREATE and POPULATE the CLASS INTERFACE TABLE
- //
- //
- clss->vtable->intfc_table = create_populate_class_interface_table(clss);
-
- //
- //
- // STEP 15 :::: HANDLE JAVA CLASSCLASS separately
+ m_vtable->intfc_table = create_and_populate_interface_table();
+
//
+ // STEP 15 ::: HANDLE JAVA CLASS CLASS separately
//
-
- // Make sure on one hasn't prematurely set these fields since all calculations
+ // Make sure noone hasn't prematurely set these fields since all calculations
// up to this point should be based on clss->unpadded_instance_data_size.
- assert (clss->instance_data_size == 0);
- assert (clss->allocated_size == 0);
+ assert(m_instance_data_size == 0);
+ assert(m_allocated_size == 0);
// Add any needed padding including the OBJECT_HEADER which is used to hold
// things like gc forwarding pointers, mark bits, hashes and locks..
- clss->allocated_size =
- (((clss->unpadded_instance_data_size + (GC_OBJECT_ALIGNMENT - 1))
+ m_allocated_size =
+ (((m_unpadded_instance_data_size + (GC_OBJECT_ALIGNMENT - 1))
/ GC_OBJECT_ALIGNMENT) * GC_OBJECT_ALIGNMENT) + OBJECT_HEADER_SIZE;
// Move the size to the vtable.
- clss->vtable->allocated_size = clss->allocated_size;
- clss->instance_data_size = clss->allocated_size;
- TRACE2("class.size", "class " << clss << " allocated size "
- << clss->allocated_size);
+ m_vtable->allocated_size = m_allocated_size;
+ m_instance_data_size = m_allocated_size;
+ TRACE2("class.size", "class " << this << " allocated size "
+ << m_allocated_size);
//
- //
// STEP 16 :::: HANDLE PINNING and Class PROPERTIES if needed.
//
- //
-
- if (clss->super_class) {
- if (get_prop_pinned (clss->super_class->class_properties)) {
- // If the super class is pinned then this class is pinned.
- set_prop_pinned (clss);
- }
+ if(has_super_class()
+ && (get_super_class()->m_vtable->class_properties & CL_PROP_PINNED_MASK) != 0)
+ {
+ // If the super class is pinned then this class is pinned
+ m_vtable->class_properties |= CL_PROP_PINNED_MASK;
+ set_instance_data_size_constraint_bit();
}
+
// Set up the class_properties field.
- if (clss->is_array == 1) {
- clss->array_element_size = (vm_references_are_compressed() ? sizeof(COMPRESSED_REFERENCE) : sizeof(RAW_REFERENCE));
- set_prop_array (clss);
- if (is_vector_of_primitives (clss)) {
- clss->array_element_size = sizeof_primitive_array_element (clss);
- set_prop_non_ref_array (clss);
+ if(is_array()) {
+ m_array_element_size = (vm_references_are_compressed()
+ ? sizeof(COMPRESSED_REFERENCE) : sizeof(RAW_REFERENCE));
+ m_vtable->class_properties |= CL_PROP_ARRAY_MASK;
+ if(is_vector_of_primitives(this)) {
+ m_array_element_size = sizeof_primitive_array_element(this);
+ m_vtable->class_properties |= CL_PROP_NON_REF_ARRAY_MASK;
}
- clss->vtable->array_element_size = (unsigned short)clss->array_element_size;
- switch (clss->vtable->array_element_size)
+ m_vtable->array_element_size = (unsigned short)m_array_element_size;
+ switch(m_vtable->array_element_size)
{
case 1:
- clss->vtable->array_element_shift = 0;
+ m_vtable->array_element_shift = 0;
break;
case 2:
- clss->vtable->array_element_shift = 1;
+ m_vtable->array_element_shift = 1;
break;
case 4:
- clss->vtable->array_element_shift = 2;
+ m_vtable->array_element_shift = 2;
break;
case 8:
- clss->vtable->array_element_shift = 3;
+ m_vtable->array_element_shift = 3;
break;
default:
- clss->vtable->array_element_shift = 65535;
- ASSERT(0, "Unexpected array element size: " << clss->vtable->array_element_size);
+ m_vtable->array_element_shift = 65535;
+ ASSERT(0, "Unexpected array element size: " << m_vtable->array_element_size);
break;
}
- }else{
- clss->array_element_size = 0;
- }
-
- if (clss->has_finalizer) {
- set_prop_finalizable(clss);
}
#ifndef POINTER64
- if(!strcmp("[D", clss->name->bytes)) {
+ if(!strcmp("[D", m_name->bytes)) {
// In IA32, Arrays of Doubles need to be eight byte aligned to improve
// performance. In IPF all objects (arrays, class data structures, heap objects)
// get aligned on eight byte boundaries. So, this special code is not needed.
- clss->alignment = ((GC_OBJECT_ALIGNMENT<8)?8:GC_OBJECT_ALIGNMENT);;
-
- // align doubles on 8, clear alignment field and put
- // in 8.
- set_prop_alignment_mask (clss, 8);
+ m_alignment = ((GC_OBJECT_ALIGNMENT<8)?8:GC_OBJECT_ALIGNMENT);;
+
+ // align doubles on 8, clear alignment field and put in 8.
+ m_vtable->class_properties |= 8;
// Set high bit in size so that gc knows there are constraints
+ set_instance_data_size_constraint_bit();
}
#endif
//
+ // STEP 17 ::: HANDLE ALIGNMENT and Class FINALIZER if needed.
//
- // STEP 17 :::: HANDLE ALIGNMENT and Class FINALIZER if needed.
- //
- //
- if (clss->alignment) {
- if (clss->alignment != GC_OBJECT_ALIGNMENT) {
+ if(m_alignment) {
+ if(m_alignment != GC_OBJECT_ALIGNMENT) {
// The GC will align on 4 byte boundaries by default on IA32....
#ifdef POINTER64
ASSERT(0, "Allignment is supposed to be appropriate");
#endif
// Make sure it is a legal mask.
- assert ((clss->alignment & CL_PROP_ALIGNMENT_MASK) <= CL_PROP_ALIGNMENT_MASK);
- set_prop_alignment_mask (clss, clss->alignment);
+ assert((m_alignment & CL_PROP_ALIGNMENT_MASK) <= CL_PROP_ALIGNMENT_MASK);
+ m_vtable->class_properties |= m_alignment;
+ set_instance_data_size_constraint_bit();
// make sure constraintbit was set.
- assert (get_instance_data_size(clss) != clss->instance_data_size);
+ assert(get_instance_data_size() != m_instance_data_size);
}
}
+ if(has_finalizer()) {
+ m_vtable->class_properties |= CL_PROP_FINALIZABLE_MASK;
+ set_instance_data_size_constraint_bit();
+ }
+
//
+ // STEP 18 ::: SET Class ALLOCATED SIZE to INSTANCE SIZE
//
- // STEP 18 :::: SET Class ALLOCATED SIZE to INSTANCE SIZE
- //
- //
-
// Finally set the allocated size field.
- clss->allocated_size = get_instance_data_size(clss);
+ m_allocated_size = get_instance_data_size();
-
- //
- //
- // STEP 18a: Determine if class should have special verification treatment.
- // This is needed to handle magic classes that are not verifiable but needed for, eg, reflection implementation
//
+ // STEP 18a: Determine if class should have special access check treatment.
//
- if (should_class_not_be_verified(clss))
- clss->is_not_verified = TRUE;
+ static const char* reflect = "java/lang/reflect/";
+ static const size_t reflect_len = strlen(reflect);
+ if(strncmp(m_name->bytes, reflect, reflect_len) == 0)
+ m_can_access_all = 1;
//
- //
// STEP 19 :::: SET class to ST_Prepared state.
//
+ gc_class_prepared(this, m_vtable);
+ assert(m_state == ST_InstanceSizeComputed);
+ m_state = ST_Prepared;
+ TRACE2("classloader.prepare","class " << m_name->bytes << " prepared");
+
+ //
+ // STEP 20 ::: ASSIGN VALUE to static final fields
+ //
+ // Generally speaking final value is inlined, so we wooldn't need to worry
+ // about the initialization of those static final fields. But when we use
+ // reflection mechanisms - Field.getXXX() - to access them, we got
+ // null values. Considering this, we must initialize those static
+ // final fields. Also related to this is Binary Compatibility chapter
+ // section 13.4.8 of the JLS
//
- gc_class_prepared(clss, clss->vtable);
- assert(clss->state == ST_InstanceSizeComputed);
- clss->state = ST_Prepared;
- TRACE2("classloader.prepare","class " << clss->name->bytes << " prepared");
- class_initialize_if_no_side_effects(clss);
-
- //
- // STEP 20 :::: ASSIGN VALUE to static final fields
- //
- // Generally speaking final value is inlined, so we needn't worry about the
- // initialization of to those static final fields. But when we use reflection
- // mechanisms-Field.getXXX()- to access them, we got null values. Consider this,
- // We must initialize those static final fields.
- // Also related to this is Binary Compatibility chapter of the JLS.
- // Section 13.4.8
- //
- if (!assign_values_to_class_static_final_fields(clss))
+ tmn_suspend_disable();
+ if(!assign_values_to_class_static_final_fields(this))
{
//OOME happened
- assert(hythread_is_suspend_enabled());
+ tmn_suspend_enable();
return false;
}
+ tmn_suspend_enable();
//
[... 78 lines stripped ...]