You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@openjpa.apache.org by al...@apache.org on 2009/04/05 23:29:44 UTC

svn commit: r762161 [2/7] - in /openjpa/trunk: openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/ openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/sql/ openjpa-jdbc/src/main/resources/org/apache/openjpa/jdbc/kernel/ openjpa-kernel/src/main/ja...

Modified: openjpa/trunk/openjpa-kernel/src/main/resources/org/apache/openjpa/kernel/localizer.properties
URL: http://svn.apache.org/viewvc/openjpa/trunk/openjpa-kernel/src/main/resources/org/apache/openjpa/kernel/localizer.properties?rev=762161&r1=762160&r2=762161&view=diff
==============================================================================
--- openjpa/trunk/openjpa-kernel/src/main/resources/org/apache/openjpa/kernel/localizer.properties (original)
+++ openjpa/trunk/openjpa-kernel/src/main/resources/org/apache/openjpa/kernel/localizer.properties Sun Apr  5 21:29:42 2009
@@ -1,411 +1,419 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.  
-
-cant-convert-result: There is no conversion between query projection type \
-	"{0}" and result type "{1}".
-unloaded-detached: Attempt to access an unloaded field of detached instance \
-	"{0}".
-meta-unknownid: Cannot manipulate identity of type "{0}": it''s identity type \
-	is unknown.
-new-abstract: Cannot create an instance of "{0}": abstract classes are not \
-    yet supported.
-bad-new-query: Attempt to construct a query from an extent or class.  You must \
-	pass a (possibly null) query string or template to the query factory \
-	method when creating the query. 
-update-restrict: Detected attempt to modify field "{0}" with value strategy \
-	"restrict".
-reentrant-flush: Detected reentrant flush.  Make sure your flush-time instance \
-	callback methods or event listeners do not invoke any operations that \
-	require the in-progress flush to complete.
-rolled-back: The transaction has been rolled back.  See the nested exceptions \
-	for details on the errors that occurred.
-bad-lock-level: This lock manager does not recognize lock level "{0}".
-pessimistic-mutate: You are attempting to directly mutate a persistent second \
-	class object (such as a collection or map field) that you obtained before \
-	the transaction began.  After beginning a datastore transaction, you \
-	must re-obtain any references to mutable second class objects from the \
-	owning persistent object.  Detected attempt to mutate value of field "{0}" \
-	in instance "{1}".  This instance may not be locked correctly.
-not-derefed: Encountered unknown dependent instance "{0}".  This error is \
-	often caused by either removing a dependent instance from one dependent \
-	field, but not other dependent fields that reference it, or by removing a \
-	dependent instance from its owning field, flushing \
-	(causing the unreferenced dependent instance to be deleted), and then \
-	trying to assign the deleted instance to another field before commit.  \
-	The instance cannot be un-deleted, resulting in an error.  Make sure not \
-	to leave dangling dependent references to objects, and to reassign \
-	dependent objects before flush so that OpenJPA does not think they are \
-	unreferenced and therefore safe to delete.
-init-null-pc: Attempt to initialize a state manager with a null \
-	persistence-capable instance for type "{0}".  This is often caused by \
-	attempting to load an instance of an abstract class, or \
-	neglecting to use a class indicator when the base persistence-capable \
-	class in an inheritance tree is abstract.
-init-sm-pc: Attempt to initialize a state manager with an instance that is \
-	already managed ("{0}").  You might be trying to persist this instance \
-	in two threads at the same time.
-bad-ds-oid: The type "{0}" declares datastore identity but the value \
-	passed to lookup of type "{1}" is not a OpenJPA id instance.
-null-oids: Some of the object ids passed to getObjectsById were null.
-marked-rollback: The transaction cannot be committed, because it was already \
-	marked for rollback only.  The transaction will be rolled back instead. \
-    The cause of the rollback-only status is reported in the embedded stack.
-refresh-flushed: You cannot refresh an instance that has been flushed to the \
-	data store.
-pc-loader-different: Attempt to cast instance "{0}" to PersistenceCapable failed. \
-    The object implemented org.apache.openjpa.enhance.PersistenceCapable, \
-    but the instance of that interface was loaded by two different ClassLoaders: \
-    "{1}" and "{2}".
-pc-cast: Attempt to cast instance "{0}" to PersistenceCapable failed.  Ensure \
-	that it has been enhanced.
-del-instance: The instance of type "{0}" with oid "{1}" no longer exists in \
-	the data store.  This may mean that you deleted the instance in a separate \
-	transaction, but this context still has a cached version.
-no-broker-class: The specified type "{0}" could not be loaded. Please ensure \
-	that the class exists in the project class path.
-bad-embed: Attempt to set an embedded value for unembeddable field "{0}". \
-	Please report this error to OpenJPA support.
-embed-ref: You are attempting to access an embedded object reference \
-	that was obtained before the last transaction status change. After \
-	transactions begin and end, all references to embedded objects become \
-	invalid; you have to re-acquire the reference from the owning persistent \
-	object.
-deleted: Operation attempted on a deleted instance.
-dirty: Illegal operation attempted on a dirty instance: dirty objects cannot \
-	be evicted or made transient or non-transactional.
-nested-exceps: This operation failed for some instances.  See the nested \
-	exceptions array for details.
-new: Illegal operation attempted on a newly persisted instance: new objects \
-	cannot be evicted or made transient or non-transactional.
-transient: Illegal operation attempted on a transient instance.
-not-active: Can only perform operation while a transaction is active.
-trans-active: The "{0}" transaction property cannot be set during an \
-	active transaction.
-active: This operation cannot be performed while a Transaction is active.
-closed: The context has been closed.  The stack trace at which the \
-	context was closed is held in the embedded exception.
-closed-notrace: The context has been closed.  The stack trace at which the \
-	context was closed is available if Runtime=TRACE logging is enabled.
-closed-factory: The factory has been closed.  The stack trace at \
-	which the factory was closed is held in the embedded exception.
-closed-factory-notrace: The factory has been closed.  The stack trace at \
-	which the factory was closed is available if Runtime=TRACE logging is \
-	enabled.
-non-trans-read: To perform reads on persistent data outside of a transaction, \
-	the "NontransactionalRead" property must be set on the Transaction.
-non-trans-write: To perform writes on persistent data outside of a \
-	transaction, the "NontransactionalWrite" property must be set to true.
-write-operation: To perform this operation, it must be written within a transaction, \
-	or your settings must allow nontransactional writes and must not detach \
-	all nontransactional reads.
-not-managed: The given instance "{0}" is not managed by this context.
-trans-not-managed: This broker is not configured to use managed transactions.
-bad-detached-op: You cannot perform operation {0} on detached object "{1}". \
-	This operation only applies to managed objects.
-persist-detached: Attempt to persist detached object "{0}".  If this is a new \
-  instance, make sure any version and/or auto-generated primary key fields are \
-  null/default when persisting.
-null-value: The field "{0}" of instance "{1}" contained a null value; \
-	the metadata for this field specifies that nulls are illegal.
-change-identity: Attempt to change a primary key field of an instance that \
-	already has a final object id.  Only new, unflushed instances whose id you \
-	have not retrieved can have their primary keys changed.
-managed-oid: You cannot assign managed object "{0}" to the primary key field \
-	of "{1}".  Embedded primary key values must be transient objects.
-changed-oid: You have modified the object id field of "{2}".  Its old value \
-	was "{0}", and its new value is "{1}".  Object id fields are immutable \
-	once the object id of the instance has been assigned. 
-cache-exists: An object of type "{0}" with oid "{1}" already exists in this \
-	context; another cannot be persisted.
-null-trans: Attempt to commit a null javax.transaction.Transaction.  Some \
-	application servers set the transaction to null if a rollback occurs.
-end-trans-error: An exception occurred while ending the transaction.  This \
-	exception will be re-thrown.
-not-bound: The file named "{0}" could not be found.
-naming-exception: A NamingException was thrown while obtaining the \
-	factory at "{0}" from JNDI.
-attach-deleted: The object "{0}" with id "{1}" has been deleted and \
-	cannot be attached.
-not-detachable: The class "{0}" does not declare the "detachable" metadata \
-	extension, so cannot be detached.
-not-copyable: Attempt to copy field "{0}" failed.  The field is \
-	not copyable.  This can occur with custom SCO types. Only standard or \
-    immutable SCO types can be attached and detached.
-no-detach-object-id: Cannot access the detached object id of class "{0}". \
-	Ensure that the class has the "detachable" metadata extension, and \
-	the the class has been re-enhanced.
-attach-version-del: Attempted to attach deleted instance type "{0}" with oid \
-	"{1}".  If the instance is new, the version field should be left to its \
-	default value.
-attach-wrongclass: Attempted to attach instance "{0}" of type "{1}", but this \
-	instance is already in the datastore as type "{2}".
-sjvm-acquired-lock: Acquired an exclusive lock "{0}" on oid "{1}".
-sjvm-released-lock: Released lock "{0}".
-conn-not-supported: This data store cannot return connections.
-incremental-flush-not-supported: This data store does not support incremental \
-	flushing.  If you got this error while performing a query, make sure that \
-	you have not set the FlushBeforeQueries option to true, either by \
-	setting that option in your configuration file, or by programmatically \
-	setting the current FetchConfiguration''s FlushBeforeQueries property \
-	to QueryFlushModes.FLUSH_TRUE.
-nontrans-read-not-supported: This data store does not support nontransactional \
-	reads.  Set the NontransactionalRead property to false.
-optimistic-not-supported: This datastore does not support optimistic \
-	transactions.  Set the Optimistic property to false.
-restore-unset: The RestoreValues option is off, so initial values are \
-	not available. Turn on RestoreValues to be able to obtain initial \
-	values.
-mutable-restore-unset: The RestoreValues option is not set to "all", \
-	initial values of mutable fields are not available. Set RestoreValues to \
-	"all" in your properties to be able to obtain initial mutable values.
-initial-unloaded: OpenJPA cannot provide the initial value for field "{0}" \
-	because the field was never loaded before being changed.
-dup-oid-assign: Attempt to assign id "{0}" to new instance "{1}" failed; \
-	there is already an object in the L1 cache with this id. \
-	You must delete this object (in a previous transaction or the current one) \
-	before reusing its id.  This error can also occur when a horizontally \
-	or vertically mapped classes uses auto-increment application identity and \
-	does not use a hierarchy of application identity classes.
-dup-load: Cannot load object with id "{0}".  Instance "{1}" with the same id \
-	already exists in the L1 cache.  This can occur when you \
-	assign an existing id to a new instance, and before flushing attempt to \
-	load the existing instance for that id.
-bad-id-value: The given value "{0}" cannot be converted into an identity \
-	for "{2}".  The value is the wrong type ({1}).
-factory-init: Starting OpenJPA {0}
-factory-properties: Properties: {0}
-inverse-consistency: An inverse inconsistency in the object model was \
-	detected while flushing the field "{0}" of the instance with id "{1}" \
-	in context "{2}".
-no-brokerfactory: You did not name the factory class with the required \
-	property openjpa.BrokerFactory.  Normally this property defaults \
-    appropriately; have you forgotten to include all the OpenJPA jars in your \
-    classpath?
-brokerfactory-excep: There was an error when invoking the static \
-	getInstance method on the named factory class "{0}".  See the \
-	nested exception for details.
-new-brokerfactory-excep: There was an error when invoking the static \
-	newInstance method on the named factory class "{0}".  See the \
-	nested exception for details.
-bad-brokerfactory: Could not invoke the static getInstance method on the \
-	named factory class "{0}".
-bad-new-brokerfactory: Could not invoke the static newInstance method on the \
-	named factory class "{0}".
-bad-brokerfactory-class: The named BrokerFactory "{0}" is not valid.
-instantiate-abstract: Cannot instantiate abstract class of type "{0}" with \
-	object id "{1}"; this may indicate that the inheritance discriminator \
-	for the class is not configured correctly.
-nontrans-proxied: You cannot make a property access object created with "new" \
-	nontransactional.
-no-field: Field "{0}" is not declared in "{1}", or is not managed.
-no-field-index: "{0}" is not the index of any managed field in "{1}".
-cant-cascade-persist: Encountered unmanaged object in persistent field \
-	"{0}" during flush.  However, this field does not \
-	allow cascade persist. Set the cascade attribute for this field to \
-    CascadeType.PERSIST or CascadeType.ALL (JPA annotations) or \
-    "persist" or "all" (JPA orm.xml), or enable cascade-persist globally, \
-    or manually persist the related field value prior to flushing. \
-	You cannot flush unmanaged objects or graphs that have persistent \
-    associations to unmanaged objects.
-cant-cascade-attach: Encountered new object in persistent field \
-	"{0}" during attach.  However, this field does not \
-	allow cascade attach. Set the cascade attribute for this field to \
-    CascadeType.MERGE or CascadeType.ALL (JPA annotations) or \
-    "merge" or "all" (JPA orm.xml). \
-    You cannot attach a reference to a new object without cascading.
-ref-to-deleted: Encountered deleted object "{0}" in persistent field \
-	"{1}" of managed object "{2}" during flush.
-no-version-field: Encountered object "{0}" without a version field during \
-	attach.  In order to attach an object, it must either be enhanced or must \
-	have a version field.
-inmem-agg-proj-var: Queries with aggregates or projections using variables \
-	currently cannot be executed in-memory.  Either set IgnoreCache to true, \
-	set the openjpa.FlushBeforeQueries property to true, or execute the query \
-	before changing any instances in the transaction.  The offending query was \
-	on type "{0}" with filter "{1}".
-merged-order-with-result: This query on candidate type "{0}" with filter "{1}" \
-	involves combining the results of multiple queries in memory. \
-	You have chosen to order the results on "{2}", but you have not selected \
-	this data in your setResult() clause.  Please include this ordering data \
-	in setResult() so that OpenJPA can extract it for in-memory ordering.
-bad-grouping: Your query on type "{0}" with filter "{1}" is invalid.  Your \
-    select and having clauses must only include aggregates or values that also \
-    appear in your grouping clause.
-query-nosupport: The "{0}" query type does not support this operation.
-query-unmapped: You cannot query unmapped type "{0}".
-range-too-big: The range of the query is too big. Start index: "{0}", end \
-	index: "{1}". The range must be less than Integer.MAX_VALUE.
-invalid-range: The query range from {0} to {1} is not valid.
-no-impls: Unable to execute a query on type "{0}".  This class or interface \
-	is not mapped, and does not have any mapped implementors.
-bad-param-name: The parameter name or position "{0}" passed to \
-	execute() is not valid.  All map keys must be a declared parameter \
-	name or a number matching the parameter position. Native queries only \
-	allow the use of positional parameters.
-force-in-mem: This query on type "{0}" must load the entire candidate class \
-	extent and evaluate the query in-memory.  This may be very slow.  The \
-	query must be executed in memory because OpenJPA is configured with \
-	IgnoreCache=false and FlushBeforeQueries=false and \
-	there are dirty instances that may affect the query''s outcome in the \
-	cache.
-cant-exec-inmem: Queries of this type ("{0}") cannot be executed in-memory. \
-	Either set IgnoreCache to true, set the openjpa.FlushBeforeQueries \
-	property to true, or execute the query before changing any instances in \
-	the transaction.
-executing-query: Executing query: {0}
-executing-query-with-params: Executing query: [{0}] with parameters: {1}
-not-unique: The query on candidate type "{0}" with filter "{1}" was \
-	configured to have a unique result, but more than one instance matched \
-	the query.
-no-result: The query on candidate type "{0}" with filter "{1}" was \
-	configured to have a unique result, but no instance matched \
-	the query.
-serialized: Queries that have been serialized do not support this operation.
-read-only: Attempt to modify a read-only query object.
-no-class: A candidate Class must be specified before executing a query.
-no-modify-resultclass: A query that declares a result class cannot be used \
-	to perform bulk updates.
-no-modify-unique: A query that declares unique results cannot be used \
-	to perform bulk updates.
-no-modify-range: A query that declares a result range cannot be used \
-	to perform bulk updates.
-unbound-param: Cannot execute query; the declared parameter "{0}" was \
-	not given a value.
-unbound-params: Cannot execute query; declared parameters "{0}" were not given \
-	values.  You must supply a value for each of the following parameters, \
-	in the given order: {1}
-extra-params: More parameters were passed to execute() than were declared: \
-	{1} parameters were specified for query execution, but only {0} \
-	parameters were declared in the query.
-null-primitive-param: Parameter "{0}" expects a value of primitive "{1}" \ 
-	but was given a null value.
-param-value-mismatch: Parameter "{0}" expects a value of "{1}" but was given \
-	a value of "{2}" of "{3}".
-merged-aggregate: This query on candidate type "{0}" with filter "{1}" \
-	involves combining the results of multiple sub-queries.  However, because \
-	this query is for aggregate data, OpenJPA cannot combine the sub-query \
-	aggregates into correct final values.
-bad-dec: The {1} declaration "{0}" is \
-	not valid. Variables and imports must be delimited with ";".  Parameters \
-	and orderings must be delimited with ",".  Imports require the "import" \
-	keyword, and orderings require the "ascending" or "descending" keyword.
-mod-bigdecimal: You cannot use the modulo operator (%) on numbers of type \
-	BigDecimal.
-cant-convert: Cannot convert object "{0}" of type "{1}" into an instance of \
-	"{2}".
-bad-method-class: You set the method name of this openjpa.MethodQL query to \
-	"{1}", but class "{0}" is not a valid class name.  Make sure to fully \
-	qualify the class name or to import its package into this query if the \
-	class is not in the query candidate class'' package. 
-method-not-static: Method "{0}" named in the MethodQL query must be static.
-method-return-type-invalid: Method "{0}" named in the MethodQL query must \
-    have a return type that is assignable from ResultObjectProvider. Return \
-    type is: {1}.
-no-method: You must set the query filter to the name of the method to execute \
-	for this MethodQL query instance.
-method-error: There was an error invoking method "{0}" with arguments "{1}".
-bad-param-type: The type "{0}" as used in the parameter declarations \
-	could not be found in the imports.
-cant-set: Result type "{0}" does not have any public fields or setter methods \
-	for the projection or aggregate result element "{1}", nor does it have a \
-	generic put(Object,Object) method that can be used, nor does it have a \
-	public constructor that takes the types {2}.
-pack-err: There was an error packing the projection and/or aggregate results \
-	of the query into result type "{0}".  See the nested Throwable exception \
-	for details.
-pack-instantiation-err: There was an error creating an instance of type "{0}" \
-	when packing the projection and/or aggregate results of the query. Ensure \
-	that you have defined a public no-args constructor in "{0}".
-bad-inmem-method: Method "{0}(StoreContext, ClassMetaData, boolean, Object, \
-	Map, FetchConfiguration)" is not declared in type "{1}". \
-	Check the method name supplied in your MethodQL query filter.  \
-	OpenJPA is attempting to execute this query in-memory; if you implemented \
-    the datastore method instead (a method with the same signature but without \
-    the Object argument) and want this query to execute in the datastore, \
-    either create the query before modifying objects in the current transaction, \
-	set IgnoreCache to true, or set the openjpa.FlushBeforeQueries property to \
-	true.
-bad-datastore-method: Method "{0}(StoreContext, ClassMetaData, boolean, Map, \
-	FetchConfiguration)" is not declared in type "{1}".  Check \
-	the method name supplied in your MethodQL query filter.  OpenJPA is \
-	attempting to execute this query against the datastore; if you implemented \
-	the in-memory method instead (a method with the same signature but with an \
-	Object argument) and want this query to execute in-memory, supply a \
-	Collection of candidates to filter.
-only-update-constants: Bulk update queries when executed in memory \
-	may only update to constant values.
-only-range-constants: Range values must be numeric constants.  Illegal query: \
-    {0}
-no-savepoint-copy: Unable to copy field "{0}" for savepoint.
-savepoint-exists: A savepoint with the name "{0}" already exists.  \
-	Each savepoint name must be unique.
-no-lastsavepoint: Cannot rollback/release last savepoint as no savepoint \
-	has been set.
-no-savepoint: You have not set a savepoint with the name "{0}"
-savepoint-init: This savepoint has already been initialized.
-savepoint-flush-not-supported: The configured SavepointManager does not \
-	support incremental flushing when a savepoint has been set.  You must \
-	release your savepoints before flushing.
-callback-err: Errors occurred processing listener callbacks.  See the nested \
-    exceptions for details.
-bad-agg-listener-hint: Query hint value "{0}" ({1}) cannot be converted into \
-	an aggregate listener.
-bad-filter-listener-hint: Query hint value "{0}" ({1}) cannot be converted \
-	into a filter listener.
-bad-setter-hint-arg: In query hint "{0}", cannot convert hint value "{1}" to \
-    type "{2}".
-detach-val-mismatch: The instance "{0}" is managed by another context and \
-	cannot be inspected for field values.
-detach-val-badsm: The instance "{0}" has an unknown state manager which \
-	prevents field inspection.
-null-oid: Cannot perform find using null object id.
-illegal-op-in-prestore: This operation is illegal when called during \
-	transaction completion.
-no-expressions: The query cannot be executed because it has no \
-	valid expressions.
-null-fg: Attempt to add null/empty fetch group name to fetch configuration.
-null-field: Attempt to add null/empty field name to fetch configuration.
-container-projection: Query projections cannot include array, collection, or \
-    map fields.  Invalid query: "{0}"
-existing-value-override-excep: The generated value processing detected an \
-existing value assigned to this field: {0}.  This existing value was either \
-provided via an initializer or by calling the setter method.  You either need \
-to remove the @GeneratedValue annotation or modify the code to remove the \
-initializer processing.
-invalid-tran-status: The transaction was not in a valid state ({0}) to \
-accept the "{1}" method invocation.  Processing will continue.
-multi-threaded-access: Multiple concurrent threads attempted to access a \
-    single broker. By default brokers are not thread safe; if you require \
-    and/or intend a broker to be accessed by more than one thread, set the \
-    openjpa.Multithreaded property to true to override the default behavior.
-no-saved-fields: No state snapshot is available for instance of type "{0}", \
-    but this instance uses state-comparison for dirty detection.
-cant-serialize-flushed-broker: Serialization not allowed once a broker has \
-    been flushed.
-cant-serialize-pessimistic-broker: Serialization not allowed for brokers with \
-    an active datastore (pessimistic) transaction.
-cant-serialize-connected-broker: Serialization not allowed for brokers with \
-    an active connection to the database.
-no-interface-metadata: No metadata was found for managed interface {0}.
-fetch-configuration-stack-empty: Fetch configuration stack is empty.
-gap-query-param: Parameter {1} for query "{0}" exceeds the number of {2} \
-	bound parameters with following values "{3}". This can happen if you have \
-	declared but missed to bind values for one or more parameters.
-query-execution-error: Failed to execute query "{0}". Check the query syntax \
-	for correctness. See nested exception for details. 
-invalid-timeout: An invalid timeout of {0} milliseconds was ignored.  \
-        Expected a value that is greater than or equal to -1.
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+cant-convert-result: There is no conversion between query projection type \
+	"{0}" and result type "{1}".
+unloaded-detached: Attempt to access an unloaded field of detached instance \
+	"{0}".
+meta-unknownid: Cannot manipulate identity of type "{0}": it''s identity type \
+	is unknown.
+new-abstract: Cannot create an instance of "{0}": abstract classes are not \
+    yet supported.
+bad-new-query: Attempt to construct a query from an extent or class.  You must \
+	pass a (possibly null) query string or template to the query factory \
+	method when creating the query.
+update-restrict: Detected attempt to modify field "{0}" with value strategy \
+	"restrict".
+reentrant-flush: Detected reentrant flush.  Make sure your flush-time instance \
+	callback methods or event listeners do not invoke any operations that \
+	require the in-progress flush to complete.
+rolled-back: The transaction has been rolled back.  See the nested exceptions \
+	for details on the errors that occurred.
+bad-lock-level: This lock manager does not recognize lock level "{0}".
+pessimistic-mutate: You are attempting to directly mutate a persistent second \
+	class object (such as a collection or map field) that you obtained before \
+	the transaction began.  After beginning a datastore transaction, you \
+	must re-obtain any references to mutable second class objects from the \
+	owning persistent object.  Detected attempt to mutate value of field "{0}" \
+	in instance "{1}".  This instance may not be locked correctly.
+not-derefed: Encountered unknown dependent instance "{0}".  This error is \
+	often caused by either removing a dependent instance from one dependent \
+	field, but not other dependent fields that reference it, or by removing a \
+	dependent instance from its owning field, flushing \
+	(causing the unreferenced dependent instance to be deleted), and then \
+	trying to assign the deleted instance to another field before commit.  \
+	The instance cannot be un-deleted, resulting in an error.  Make sure not \
+	to leave dangling dependent references to objects, and to reassign \
+	dependent objects before flush so that OpenJPA does not think they are \
+	unreferenced and therefore safe to delete.
+init-null-pc: Attempt to initialize a state manager with a null \
+	persistence-capable instance for type "{0}".  This is often caused by \
+	attempting to load an instance of an abstract class, or \
+	neglecting to use a class indicator when the base persistence-capable \
+	class in an inheritance tree is abstract.
+init-sm-pc: Attempt to initialize a state manager with an instance that is \
+	already managed ("{0}").  You might be trying to persist this instance \
+	in two threads at the same time.
+bad-ds-oid: The type "{0}" declares datastore identity but the value \
+	passed to lookup of type "{1}" is not a OpenJPA id instance.
+null-oids: Some of the object ids passed to getObjectsById were null.
+marked-rollback: The transaction cannot be committed, because it was already \
+	marked for rollback only.  The transaction will be rolled back instead. \
+    The cause of the rollback-only status is reported in the embedded stack.
+refresh-flushed: You cannot refresh an instance that has been flushed to the \
+	data store.
+pc-loader-different: Attempt to cast instance "{0}" to PersistenceCapable failed. \
+    The object implemented org.apache.openjpa.enhance.PersistenceCapable, \
+    but the instance of that interface was loaded by two different ClassLoaders: \
+    "{1}" and "{2}".
+pc-cast: Attempt to cast instance "{0}" to PersistenceCapable failed.  Ensure \
+	that it has been enhanced.
+del-instance: The instance of type "{0}" with oid "{1}" no longer exists in \
+	the data store.  This may mean that you deleted the instance in a separate \
+	transaction, but this context still has a cached version.
+no-broker-class: The specified type "{0}" could not be loaded. Please ensure \
+	that the class exists in the project class path.
+bad-embed: Attempt to set an embedded value for unembeddable field "{0}". \
+	Please report this error to OpenJPA support.
+embed-ref: You are attempting to access an embedded object reference \
+	that was obtained before the last transaction status change. After \
+	transactions begin and end, all references to embedded objects become \
+	invalid; you have to re-acquire the reference from the owning persistent \
+	object.
+deleted: Operation attempted on a deleted instance.
+dirty: Illegal operation attempted on a dirty instance: dirty objects cannot \
+	be evicted or made transient or non-transactional.
+nested-exceps: This operation failed for some instances.  See the nested \
+	exceptions array for details.
+new: Illegal operation attempted on a newly persisted instance: new objects \
+	cannot be evicted or made transient or non-transactional.
+transient: Illegal operation attempted on a transient instance.
+not-active: Can only perform operation while a transaction is active.
+trans-active: The "{0}" transaction property cannot be set during an \
+	active transaction.
+active: This operation cannot be performed while a Transaction is active.
+closed: The context has been closed.  The stack trace at which the \
+	context was closed is held in the embedded exception.
+closed-notrace: The context has been closed.  The stack trace at which the \
+	context was closed is available if Runtime=TRACE logging is enabled.
+closed-factory: The factory has been closed.  The stack trace at \
+	which the factory was closed is held in the embedded exception.
+closed-factory-notrace: The factory has been closed.  The stack trace at \
+	which the factory was closed is available if Runtime=TRACE logging is \
+	enabled.
+non-trans-read: To perform reads on persistent data outside of a transaction, \
+	the "NontransactionalRead" property must be set on the Transaction.
+non-trans-write: To perform writes on persistent data outside of a \
+	transaction, the "NontransactionalWrite" property must be set to true.
+write-operation: To perform this operation, it must be written within a transaction, \
+	or your settings must allow nontransactional writes and must not detach \
+	all nontransactional reads.
+not-managed: The given instance "{0}" is not managed by this context.
+trans-not-managed: This broker is not configured to use managed transactions.
+bad-detached-op: You cannot perform operation {0} on detached object "{1}". \
+	This operation only applies to managed objects.
+persist-detached: Attempt to persist detached object "{0}".  If this is a new \
+  instance, make sure any version and/or auto-generated primary key fields are \
+  null/default when persisting.
+null-value: The field "{0}" of instance "{1}" contained a null value; \
+	the metadata for this field specifies that nulls are illegal.
+change-identity: Attempt to change a primary key field of an instance that \
+	already has a final object id.  Only new, unflushed instances whose id you \
+	have not retrieved can have their primary keys changed.
+managed-oid: You cannot assign managed object "{0}" to the primary key field \
+	of "{1}".  Embedded primary key values must be transient objects.
+changed-oid: You have modified the object id field of "{2}".  Its old value \
+	was "{0}", and its new value is "{1}".  Object id fields are immutable \
+	once the object id of the instance has been assigned.
+cache-exists: An object of type "{0}" with oid "{1}" already exists in this \
+	context; another cannot be persisted.
+null-trans: Attempt to commit a null javax.transaction.Transaction.  Some \
+	application servers set the transaction to null if a rollback occurs.
+end-trans-error: An exception occurred while ending the transaction.  This \
+	exception will be re-thrown.
+not-bound: The file named "{0}" could not be found.
+naming-exception: A NamingException was thrown while obtaining the \
+	factory at "{0}" from JNDI.
+attach-deleted: The object "{0}" with id "{1}" has been deleted and \
+	cannot be attached.
+not-detachable: The class "{0}" does not declare the "detachable" metadata \
+	extension, so cannot be detached.
+not-copyable: Attempt to copy field "{0}" failed.  The field is \
+	not copyable.  This can occur with custom SCO types. Only standard or \
+    immutable SCO types can be attached and detached.
+no-detach-object-id: Cannot access the detached object id of class "{0}". \
+	Ensure that the class has the "detachable" metadata extension, and \
+	the the class has been re-enhanced.
+attach-version-del: Attempted to attach deleted instance type "{0}" with oid \
+	"{1}".  If the instance is new, the version field should be left to its \
+	default value.
+attach-wrongclass: Attempted to attach instance "{0}" of type "{1}", but this \
+	instance is already in the datastore as type "{2}".
+sjvm-acquired-lock: Acquired an exclusive lock "{0}" on oid "{1}".
+sjvm-released-lock: Released lock "{0}".
+conn-not-supported: This data store cannot return connections.
+incremental-flush-not-supported: This data store does not support incremental \
+	flushing.  If you got this error while performing a query, make sure that \
+	you have not set the FlushBeforeQueries option to true, either by \
+	setting that option in your configuration file, or by programmatically \
+	setting the current FetchConfiguration''s FlushBeforeQueries property \
+	to QueryFlushModes.FLUSH_TRUE.
+nontrans-read-not-supported: This data store does not support nontransactional \
+	reads.  Set the NontransactionalRead property to false.
+optimistic-not-supported: This datastore does not support optimistic \
+	transactions.  Set the Optimistic property to false.
+restore-unset: The RestoreValues option is off, so initial values are \
+	not available. Turn on RestoreValues to be able to obtain initial \
+	values.
+mutable-restore-unset: The RestoreValues option is not set to "all", \
+	initial values of mutable fields are not available. Set RestoreValues to \
+	"all" in your properties to be able to obtain initial mutable values.
+initial-unloaded: OpenJPA cannot provide the initial value for field "{0}" \
+	because the field was never loaded before being changed.
+dup-oid-assign: Attempt to assign id "{0}" to new instance "{1}" failed; \
+	there is already an object in the L1 cache with this id. \
+	You must delete this object (in a previous transaction or the current one) \
+	before reusing its id.  This error can also occur when a horizontally \
+	or vertically mapped classes uses auto-increment application identity and \
+	does not use a hierarchy of application identity classes.
+dup-load: Cannot load object with id "{0}".  Instance "{1}" with the same id \
+	already exists in the L1 cache.  This can occur when you \
+	assign an existing id to a new instance, and before flushing attempt to \
+	load the existing instance for that id.
+bad-id-value: The given value "{0}" cannot be converted into an identity \
+	for "{2}".  The value is the wrong type ({1}).
+factory-init: Starting OpenJPA {0}
+factory-properties: Properties: {0}
+inverse-consistency: An inverse inconsistency in the object model was \
+	detected while flushing the field "{0}" of the instance with id "{1}" \
+	in context "{2}".
+no-brokerfactory: You did not name the factory class with the required \
+	property openjpa.BrokerFactory.  Normally this property defaults \
+    appropriately; have you forgotten to include all the OpenJPA jars in your \
+    classpath?
+brokerfactory-excep: There was an error when invoking the static \
+	getInstance method on the named factory class "{0}".  See the \
+	nested exception for details.
+new-brokerfactory-excep: There was an error when invoking the static \
+	newInstance method on the named factory class "{0}".  See the \
+	nested exception for details.
+bad-brokerfactory: Could not invoke the static getInstance method on the \
+	named factory class "{0}".
+bad-new-brokerfactory: Could not invoke the static newInstance method on the \
+	named factory class "{0}".
+bad-brokerfactory-class: The named BrokerFactory "{0}" is not valid.
+instantiate-abstract: Cannot instantiate abstract class of type "{0}" with \
+	object id "{1}"; this may indicate that the inheritance discriminator \
+	for the class is not configured correctly.
+nontrans-proxied: You cannot make a property access object created with "new" \
+	nontransactional.
+no-field: Field "{0}" is not declared in "{1}", or is not managed.
+no-field-index: "{0}" is not the index of any managed field in "{1}".
+cant-cascade-persist: Encountered unmanaged object in persistent field \
+	"{0}" during flush.  However, this field does not \
+	allow cascade persist. Set the cascade attribute for this field to \
+    CascadeType.PERSIST or CascadeType.ALL (JPA annotations) or \
+    "persist" or "all" (JPA orm.xml), or enable cascade-persist globally, \
+    or manually persist the related field value prior to flushing. \
+	You cannot flush unmanaged objects or graphs that have persistent \
+    associations to unmanaged objects.
+cant-cascade-attach: Encountered new object in persistent field \
+	"{0}" during attach.  However, this field does not \
+	allow cascade attach. Set the cascade attribute for this field to \
+    CascadeType.MERGE or CascadeType.ALL (JPA annotations) or \
+    "merge" or "all" (JPA orm.xml). \
+    You cannot attach a reference to a new object without cascading.
+ref-to-deleted: Encountered deleted object "{0}" in persistent field \
+	"{1}" of managed object "{2}" during flush.
+no-version-field: Encountered object "{0}" without a version field during \
+	attach.  In order to attach an object, it must either be enhanced or must \
+	have a version field.
+inmem-agg-proj-var: Queries with aggregates or projections using variables \
+	currently cannot be executed in-memory.  Either set IgnoreCache to true, \
+	set the openjpa.FlushBeforeQueries property to true, or execute the query \
+	before changing any instances in the transaction.  The offending query was \
+	on type "{0}" with filter "{1}".
+merged-order-with-result: This query on candidate type "{0}" with filter "{1}" \
+	involves combining the results of multiple queries in memory. \
+	You have chosen to order the results on "{2}", but you have not selected \
+	this data in your setResult() clause.  Please include this ordering data \
+	in setResult() so that OpenJPA can extract it for in-memory ordering.
+bad-grouping: Your query on type "{0}" with filter "{1}" is invalid.  Your \
+    select and having clauses must only include aggregates or values that also \
+    appear in your grouping clause.
+query-nosupport: The "{0}" query type does not support this operation.
+query-unmapped: You cannot query unmapped type "{0}".
+range-too-big: The range of the query is too big. Start index: "{0}", end \
+	index: "{1}". The range must be less than Integer.MAX_VALUE.
+invalid-range: The query range from {0} to {1} is not valid.
+no-impls: Unable to execute a query on type "{0}".  This class or interface \
+	is not mapped, and does not have any mapped implementors.
+bad-param-name: The parameter name or position "{0}" passed to \
+	execute() is not valid.  All map keys must be a declared parameter \
+	name or a number matching the parameter position. Native queries only \
+	allow the use of positional parameters.
+force-in-mem: This query on type "{0}" must load the entire candidate class \
+	extent and evaluate the query in-memory.  This may be very slow.  The \
+	query must be executed in memory because OpenJPA is configured with \
+	IgnoreCache=false and FlushBeforeQueries=false and \
+	there are dirty instances that may affect the query''s outcome in the \
+	cache.
+cant-exec-inmem: Queries of this type ("{0}") cannot be executed in-memory. \
+	Either set IgnoreCache to true, set the openjpa.FlushBeforeQueries \
+	property to true, or execute the query before changing any instances in \
+	the transaction.
+executing-query: Executing query: {0}
+executing-query-with-params: Executing query: [{0}] with parameters: {1}
+not-unique: The query on candidate type "{0}" with filter "{1}" was \
+	configured to have a unique result, but more than one instance matched \
+	the query.
+no-result: The query on candidate type "{0}" with filter "{1}" was \
+	configured to have a unique result, but no instance matched \
+	the query.
+serialized: Queries that have been serialized do not support this operation.
+read-only: Attempt to modify a read-only query object.
+no-class: A candidate Class must be specified before executing a query.
+no-modify-resultclass: A query that declares a result class cannot be used \
+	to perform bulk updates.
+no-modify-unique: A query that declares unique results cannot be used \
+	to perform bulk updates.
+no-modify-range: A query that declares a result range cannot be used \
+	to perform bulk updates.
+unbound-param: Cannot execute query; the declared parameter "{0}" was \
+	not given a value.
+unbound-params: Cannot execute query; declared parameters "{0}" were not given \
+	values.  You must supply a value for each of the following parameters, \
+	in the given order: {1}
+extra-params: More parameters were passed to execute() than were declared: \
+	{1} parameters were specified for query execution, but only {0} \
+	parameters were declared in the query.
+null-primitive-param: Parameter "{0}" expects a value of primitive "{1}" \
+	but was given a null value.
+param-value-mismatch: Parameter "{0}" expects a value of "{1}" but was given \
+	a value of "{2}" of "{3}".
+merged-aggregate: This query on candidate type "{0}" with filter "{1}" \
+	involves combining the results of multiple sub-queries.  However, because \
+	this query is for aggregate data, OpenJPA cannot combine the sub-query \
+	aggregates into correct final values.
+bad-dec: The {1} declaration "{0}" is \
+	not valid. Variables and imports must be delimited with ";".  Parameters \
+	and orderings must be delimited with ",".  Imports require the "import" \
+	keyword, and orderings require the "ascending" or "descending" keyword.
+mod-bigdecimal: You cannot use the modulo operator (%) on numbers of type \
+	BigDecimal.
+cant-convert: Cannot convert object "{0}" of type "{1}" into an instance of \
+	"{2}".
+bad-method-class: You set the method name of this openjpa.MethodQL query to \
+	"{1}", but class "{0}" is not a valid class name.  Make sure to fully \
+	qualify the class name or to import its package into this query if the \
+	class is not in the query candidate class'' package.
+method-not-static: Method "{0}" named in the MethodQL query must be static.
+method-return-type-invalid: Method "{0}" named in the MethodQL query must \
+    have a return type that is assignable from ResultObjectProvider. Return \
+    type is: {1}.
+no-method: You must set the query filter to the name of the method to execute \
+	for this MethodQL query instance.
+method-error: There was an error invoking method "{0}" with arguments "{1}".
+bad-param-type: The type "{0}" as used in the parameter declarations \
+	could not be found in the imports.
+cant-set: Result type "{0}" does not have any public fields or setter methods \
+	for the projection or aggregate result element "{1}", nor does it have a \
+	generic put(Object,Object) method that can be used, nor does it have a \
+	public constructor that takes the types {2}.
+pack-err: There was an error packing the projection and/or aggregate results \
+	of the query into result type "{0}".  See the nested Throwable exception \
+	for details.
+pack-instantiation-err: There was an error creating an instance of type "{0}" \
+	when packing the projection and/or aggregate results of the query. Ensure \
+	that you have defined a public no-args constructor in "{0}".
+bad-inmem-method: Method "{0}(StoreContext, ClassMetaData, boolean, Object, \
+	Map, FetchConfiguration)" is not declared in type "{1}". \
+	Check the method name supplied in your MethodQL query filter.  \
+	OpenJPA is attempting to execute this query in-memory; if you implemented \
+    the datastore method instead (a method with the same signature but without \
+    the Object argument) and want this query to execute in the datastore, \
+    either create the query before modifying objects in the current transaction, \
+	set IgnoreCache to true, or set the openjpa.FlushBeforeQueries property to \
+	true.
+bad-datastore-method: Method "{0}(StoreContext, ClassMetaData, boolean, Map, \
+	FetchConfiguration)" is not declared in type "{1}".  Check \
+	the method name supplied in your MethodQL query filter.  OpenJPA is \
+	attempting to execute this query against the datastore; if you implemented \
+	the in-memory method instead (a method with the same signature but with an \
+	Object argument) and want this query to execute in-memory, supply a \
+	Collection of candidates to filter.
+only-update-constants: Bulk update queries when executed in memory \
+	may only update to constant values.
+only-range-constants: Range values must be numeric constants.  Illegal query: \
+    {0}
+no-savepoint-copy: Unable to copy field "{0}" for savepoint.
+savepoint-exists: A savepoint with the name "{0}" already exists.  \
+	Each savepoint name must be unique.
+no-lastsavepoint: Cannot rollback/release last savepoint as no savepoint \
+	has been set.
+no-savepoint: You have not set a savepoint with the name "{0}"
+savepoint-init: This savepoint has already been initialized.
+savepoint-flush-not-supported: The configured SavepointManager does not \
+	support incremental flushing when a savepoint has been set.  You must \
+	release your savepoints before flushing.
+callback-err: Errors occurred processing listener callbacks.  See the nested \
+    exceptions for details.
+bad-agg-listener-hint: Query hint value "{0}" ({1}) cannot be converted into \
+	an aggregate listener.
+bad-filter-listener-hint: Query hint value "{0}" ({1}) cannot be converted \
+	into a filter listener.
+bad-setter-hint-arg: In query hint "{0}", cannot convert hint value "{1}" to \
+    type "{2}".
+detach-val-mismatch: The instance "{0}" is managed by another context and \
+	cannot be inspected for field values.
+detach-val-badsm: The instance "{0}" has an unknown state manager which \
+	prevents field inspection.
+null-oid: Cannot perform find using null object id.
+illegal-op-in-prestore: This operation is illegal when called during \
+	transaction completion.
+no-expressions: The query cannot be executed because it has no \
+	valid expressions.
+null-fg: Attempt to add null/empty fetch group name to fetch configuration.
+null-field: Attempt to add null/empty field name to fetch configuration.
+container-projection: Query projections cannot include array, collection, or \
+    map fields.  Invalid query: "{0}"
+existing-value-override-excep: The generated value processing detected an \
+existing value assigned to this field: {0}.  This existing value was either \
+provided via an initializer or by calling the setter method.  You either need \
+to remove the @GeneratedValue annotation or modify the code to remove the \
+initializer processing.
+invalid-tran-status: The transaction was not in a valid state ({0}) to \
+accept the "{1}" method invocation.  Processing will continue.
+multi-threaded-access: Multiple concurrent threads attempted to access a \
+    single broker. By default brokers are not thread safe; if you require \
+    and/or intend a broker to be accessed by more than one thread, set the \
+    openjpa.Multithreaded property to true to override the default behavior.
+no-saved-fields: No state snapshot is available for instance of type "{0}", \
+    but this instance uses state-comparison for dirty detection.
+cant-serialize-flushed-broker: Serialization not allowed once a broker has \
+    been flushed.
+cant-serialize-pessimistic-broker: Serialization not allowed for brokers with \
+    an active datastore (pessimistic) transaction.
+cant-serialize-connected-broker: Serialization not allowed for brokers with \
+    an active connection to the database.
+no-interface-metadata: No metadata was found for managed interface {0}.
+fetch-configuration-stack-empty: Fetch configuration stack is empty.
+gap-query-param: Parameter {1} for query "{0}" exceeds the number of {2} \
+	bound parameters with following values "{3}". This can happen if you have \
+	declared but missed to bind values for one or more parameters.
+query-execution-error: Failed to execute query "{0}". Check the query syntax \
+	for correctness. See nested exception for details.
+invalid-timeout: An invalid timeout of {0} milliseconds was ignored.  \
+    Expected a value that is greater than or equal to -1.
+bad-hint-value: "{1}" is not a valid value for hint "{0}" caused by: {2}.
+bad-flush-before-queries: Invalid flush before queries type. Valid values are \
+	"true"(0), "false"(1) or "with-connection"(2). Specified value: {0}.
+bad-lock-level: Invalid lock mode/level. Valid values are \
+    "none"(0), "read"(10), "write"(20), "optimistic"(10), \
+    "optimistic-force-increment"(20), "pessimistic-read"(30), \
+    "pessimistic-write"(40) or "pessimistic-force-increment"(50). \
+    Specified value: {0}.

Modified: openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/SequencedActionsTest.java
URL: http://svn.apache.org/viewvc/openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/SequencedActionsTest.java?rev=762161&r1=762160&r2=762161&view=diff
==============================================================================
--- openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/SequencedActionsTest.java (original)
+++ openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/SequencedActionsTest.java Sun Apr  5 21:29:42 2009
@@ -104,8 +104,6 @@
             }
         }
         assertAllSQLInOrder(
-            "INSERT INTO " + empTableName + " .*",
-            "INSERT INTO " + empTableName + " .*",
             "INSERT INTO " + empTableName + " .*");
 
         // dynamic runtime test to determine wait time.
@@ -150,6 +148,8 @@
             em.createQuery("delete from " + empTableName).executeUpdate();
 
             em.getTransaction().commit();
+        } catch(Exception e) {
+            e.printStackTrace();
         } finally {
             if (em != null && em.isOpen()) {
                 em.close();
@@ -607,9 +607,10 @@
                         LockModeType expectedlockMode = (LockModeType)args[2];
                         LockModeType testinglockMode = em.getLockMode(employee);
                         log.trace("test version: expected=" + expectedlockMode
-                            + ", testing=" + em.getLockMode(employee));
+                            + ", testing=" + testinglockMode);
 
-                        assertEquals("", expectedlockMode, testinglockMode);
+                        assertEquals("", getCanonical(expectedlockMode),
+                            getCanonical(testinglockMode));
                         break;
                     case ResetException:
                         thisThread.throwable = null;
@@ -825,6 +826,14 @@
         }
     }
 
+    private LockModeType getCanonical(LockModeType lockMode) {
+        if( lockMode == LockModeType.READ )
+            return LockModeType.OPTIMISTIC;
+        if( lockMode == LockModeType.WRITE )
+            return LockModeType.OPTIMISTIC_FORCE_INCREMENT;
+        return lockMode;
+    }
+
     private String processException(Act curAction, Throwable t) {
         String failStr = "Caught exception: none";
         if (t != null) {

Added: openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/TestEmLockMode.java
URL: http://svn.apache.org/viewvc/openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/TestEmLockMode.java?rev=762161&view=auto
==============================================================================
--- openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/TestEmLockMode.java (added)
+++ openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/TestEmLockMode.java Sun Apr  5 21:29:42 2009
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.openjpa.persistence.lockmgr;
+
+import javax.persistence.EntityManager;
+import javax.persistence.LockModeType;
+
+import org.apache.openjpa.jdbc.conf.JDBCConfiguration;
+import org.apache.openjpa.jdbc.kernel.JDBCFetchConfigurationImpl;
+import org.apache.openjpa.jdbc.sql.DB2Dictionary;
+import org.apache.openjpa.jdbc.sql.DBDictionary;
+import org.apache.openjpa.persistence.EntityManagerImpl;
+import org.apache.openjpa.persistence.OpenJPAEntityManager;
+import org.apache.openjpa.persistence.OpenJPAEntityManagerSPI;
+
+/**
+ * Test hints using EntityManager interface.
+ */
+public class TestEmLockMode extends SequencedActionsTest {
+    private static String NON_SUPPORTED_OPTIMISTIC_SQL = 
+        "SELECT .* FROM LockEmployee .*";
+    private static String NON_SUPPORTED_FOR_UPDATE_SQL = 
+        "SELECT .* FROM LockEmployee .* FOR UPDATE.*";
+    private static String VERSION_UPDATE_SQL = 
+        "UPDATE LockEmployee SET version .* WHERE .*";
+    private static String DB2_OPTIMISTIC_SQL = 
+        "SELECT .* FROM LockEmployee .* WHERE .*";
+    private static String DB2_PESSIMISTIC_RS_SQL = 
+        "SELECT .* FROM LockEmployee .* WITH RS USE .*";
+    private static String DB2_PESSIMISTIC_RR_SQL = 
+        "SELECT .* FROM LockEmployee .* WITH RR USE .*";
+
+    public void setUp() {
+        setUp(LockEmployee.class, "openjpa.LockManager", "mixed");
+        commonSetUp();
+    }
+
+    /*
+     * Test em.find(lockmode);
+     */
+    public void testFindLockModeIsolations() {
+        EntityManager em = emf.createEntityManager();
+
+        em.getTransaction().begin();
+
+        commonTestFindLockModeIsolations(em, LockModeType.NONE, 1,
+            DB2_OPTIMISTIC_SQL, 1, NON_SUPPORTED_OPTIMISTIC_SQL, 0, null);
+        commonTestFindLockModeIsolations(em, LockModeType.READ, 1,
+            DB2_OPTIMISTIC_SQL, 1, NON_SUPPORTED_OPTIMISTIC_SQL, 1,
+            NON_SUPPORTED_OPTIMISTIC_SQL);
+        commonTestFindLockModeIsolations(em, LockModeType.WRITE, 1,
+            DB2_OPTIMISTIC_SQL, 1, NON_SUPPORTED_OPTIMISTIC_SQL, 1,
+            VERSION_UPDATE_SQL);
+        commonTestFindLockModeIsolations(em, LockModeType.OPTIMISTIC, 1,
+            DB2_OPTIMISTIC_SQL, 1, NON_SUPPORTED_OPTIMISTIC_SQL, 1,
+            NON_SUPPORTED_OPTIMISTIC_SQL);
+        commonTestFindLockModeIsolations(em,
+            LockModeType.OPTIMISTIC_FORCE_INCREMENT, 1, DB2_OPTIMISTIC_SQL, 1,
+            NON_SUPPORTED_OPTIMISTIC_SQL, 1, VERSION_UPDATE_SQL);
+        commonTestFindLockModeIsolations(em, LockModeType.PESSIMISTIC_READ, 2,
+            DB2_PESSIMISTIC_RS_SQL, 2, NON_SUPPORTED_FOR_UPDATE_SQL, 1,
+            NON_SUPPORTED_OPTIMISTIC_SQL);
+        commonTestFindLockModeIsolations(em, LockModeType.PESSIMISTIC_WRITE, 2,
+            DB2_PESSIMISTIC_RR_SQL, 2, NON_SUPPORTED_FOR_UPDATE_SQL, 1,
+            NON_SUPPORTED_OPTIMISTIC_SQL);
+        commonTestFindLockModeIsolations(em,
+            LockModeType.PESSIMISTIC_FORCE_INCREMENT, 2,
+            DB2_PESSIMISTIC_RR_SQL, 2, NON_SUPPORTED_FOR_UPDATE_SQL, 1,
+            VERSION_UPDATE_SQL);
+
+        em.getTransaction().rollback();
+        em.close();
+    }
+
+    private void commonTestFindLockModeIsolations(EntityManager em,
+        LockModeType lockMode, int expectedSupportSQLCount,
+        String expectedSupportSQL, int expectedNonSupportSQLCount,
+        String expectedNonSupportSQL, int expectedVersionUpdateCount,
+        String expectedVersionUpdateSQL) {
+        OpenJPAEntityManager oem = (OpenJPAEntityManager) em.getDelegate();
+        JDBCFetchConfigurationImpl fConfig = (JDBCFetchConfigurationImpl)
+            ((EntityManagerImpl) oem).getBroker().getFetchConfiguration();
+        DBDictionary dict = ((JDBCConfiguration) ((OpenJPAEntityManagerSPI) oem)
+            .getConfiguration()).getDBDictionaryInstance();
+
+        em.clear();
+        resetSQL();
+        int beforeIsolation = fConfig.getIsolation();
+        em.find(LockEmployee.class, 1, lockMode);
+        if (dict.supportsIsolationForUpdate() && 
+            dict instanceof DB2Dictionary) {
+            assertEquals(expectedSupportSQLCount, getSQLCount());
+            assertAllSQLInOrder(expectedSupportSQL);
+        } else {
+            assertEquals(expectedNonSupportSQLCount, getSQLCount());
+            assertAllSQLInOrder(expectedNonSupportSQL);
+        }
+
+        resetSQL();
+        em.flush();
+        assertEquals(expectedVersionUpdateCount, getSQLCount());
+        if (expectedVersionUpdateSQL != null)
+            assertAllSQLInOrder(expectedVersionUpdateSQL);
+
+        assertEquals(beforeIsolation, fConfig.getIsolation());
+    }
+
+    /*
+     * Test em.refresh(lockmode);
+     */
+    public void testRefreshLockModeIsolations() {
+        EntityManager em = emf.createEntityManager();
+
+        em.getTransaction().begin();
+
+        commonTestRefreshLockModeIsolations(em, LockModeType.NONE, 1,
+            DB2_OPTIMISTIC_SQL, 1, NON_SUPPORTED_OPTIMISTIC_SQL, 0, null);
+        commonTestRefreshLockModeIsolations(em, LockModeType.READ, 1,
+            DB2_OPTIMISTIC_SQL, 1, NON_SUPPORTED_OPTIMISTIC_SQL, 1,
+            NON_SUPPORTED_OPTIMISTIC_SQL);
+        commonTestRefreshLockModeIsolations(em, LockModeType.WRITE, 1,
+            DB2_OPTIMISTIC_SQL, 1, NON_SUPPORTED_OPTIMISTIC_SQL, 1,
+            VERSION_UPDATE_SQL);
+        commonTestRefreshLockModeIsolations(em, LockModeType.OPTIMISTIC, 1,
+            DB2_OPTIMISTIC_SQL, 1, NON_SUPPORTED_OPTIMISTIC_SQL, 1,
+            NON_SUPPORTED_OPTIMISTIC_SQL);
+        commonTestRefreshLockModeIsolations(em,
+            LockModeType.OPTIMISTIC_FORCE_INCREMENT, 1, DB2_OPTIMISTIC_SQL, 1,
+            NON_SUPPORTED_OPTIMISTIC_SQL, 1, VERSION_UPDATE_SQL);
+        commonTestRefreshLockModeIsolations(em, LockModeType.PESSIMISTIC_READ,
+            2, DB2_PESSIMISTIC_RS_SQL, 2, NON_SUPPORTED_FOR_UPDATE_SQL, 1,
+            NON_SUPPORTED_OPTIMISTIC_SQL);
+        commonTestRefreshLockModeIsolations(em, LockModeType.PESSIMISTIC_WRITE,
+            2, DB2_PESSIMISTIC_RR_SQL, 2, NON_SUPPORTED_FOR_UPDATE_SQL, 1,
+            NON_SUPPORTED_OPTIMISTIC_SQL);
+        commonTestRefreshLockModeIsolations(em,
+            LockModeType.PESSIMISTIC_FORCE_INCREMENT, 2,
+            DB2_PESSIMISTIC_RR_SQL, 2, NON_SUPPORTED_FOR_UPDATE_SQL, 1,
+            VERSION_UPDATE_SQL);
+
+        em.getTransaction().rollback();
+        em.close();
+    }
+
+    private void commonTestRefreshLockModeIsolations(EntityManager em,
+        LockModeType lockMode, int expectedSupportSQLCount,
+        String expectedSupportSQL, int expectedNonSupportSQLCount,
+        String expectedNonSupportSQL, int expectedVersionUpdateCount,
+        String expectedVersionUpdateSQL) {
+        OpenJPAEntityManager oem = (OpenJPAEntityManager) em.getDelegate();
+        JDBCFetchConfigurationImpl fConfig = (JDBCFetchConfigurationImpl) 
+            ((EntityManagerImpl) oem).getBroker().getFetchConfiguration();
+        DBDictionary dict = ((JDBCConfiguration) ((OpenJPAEntityManagerSPI) oem)
+            .getConfiguration()).getDBDictionaryInstance();
+
+        em.clear();
+        LockEmployee employee = em.find(LockEmployee.class, 1);
+        resetSQL();
+        int beforeIsolation = fConfig.getIsolation();
+        em.refresh(employee, lockMode);
+        if (dict.supportsIsolationForUpdate() && 
+            dict instanceof DB2Dictionary) {
+            assertEquals(expectedSupportSQLCount, getSQLCount());
+            assertAllSQLInOrder(expectedSupportSQL);
+        } else {
+            assertEquals(expectedNonSupportSQLCount, getSQLCount());
+            assertAllSQLInOrder(expectedNonSupportSQL);
+        }
+
+        resetSQL();
+        em.flush();
+        assertEquals(expectedVersionUpdateCount, getSQLCount());
+        if (expectedVersionUpdateSQL != null)
+            assertAllSQLInOrder(expectedVersionUpdateSQL);
+
+        assertEquals(beforeIsolation, fConfig.getIsolation());
+    }
+
+    /*
+     * Test em.lock(lockmode);
+     */
+    public void testLockLockModeIsolations() {
+        EntityManager em = emf.createEntityManager();
+
+        em.getTransaction().begin();
+
+        commonTestLockLockModeIsolations(em, LockModeType.NONE, 0, null, 0,
+            null, 0, null);
+        commonTestLockLockModeIsolations(em, LockModeType.READ, 0, null, 0,
+            null, 1, NON_SUPPORTED_OPTIMISTIC_SQL);
+        commonTestLockLockModeIsolations(em, LockModeType.WRITE, 0, null, 0,
+            null, 1, VERSION_UPDATE_SQL);
+        commonTestLockLockModeIsolations(em, LockModeType.OPTIMISTIC, 0, null,
+            0, null, 1, NON_SUPPORTED_OPTIMISTIC_SQL);
+        commonTestLockLockModeIsolations(em,
+            LockModeType.OPTIMISTIC_FORCE_INCREMENT, 0, null, 0, null, 1,
+            VERSION_UPDATE_SQL);
+        commonTestLockLockModeIsolations(em, LockModeType.PESSIMISTIC_READ, 2,
+            DB2_PESSIMISTIC_RS_SQL, 2, NON_SUPPORTED_FOR_UPDATE_SQL, 1,
+            NON_SUPPORTED_OPTIMISTIC_SQL);
+        commonTestLockLockModeIsolations(em, LockModeType.PESSIMISTIC_WRITE, 2,
+            DB2_PESSIMISTIC_RR_SQL, 2, NON_SUPPORTED_FOR_UPDATE_SQL, 1,
+            NON_SUPPORTED_OPTIMISTIC_SQL);
+        commonTestLockLockModeIsolations(em,
+            LockModeType.PESSIMISTIC_FORCE_INCREMENT, 2,
+            DB2_PESSIMISTIC_RR_SQL, 2, NON_SUPPORTED_FOR_UPDATE_SQL, 1,
+            VERSION_UPDATE_SQL);
+
+        em.getTransaction().rollback();
+        em.close();
+    }
+
+    private void commonTestLockLockModeIsolations(EntityManager em,
+        LockModeType lockMode, int expectedSupportSQLCount,
+        String expectedSupportSQL, int expectedNonSupportSQLCount,
+        String expectedNonSupportSQL, int expectedVersionUpdateCount,
+        String expectedVersionUpdateSQL) {
+        OpenJPAEntityManager oem = (OpenJPAEntityManager) em.getDelegate();
+        JDBCFetchConfigurationImpl fConfig = (JDBCFetchConfigurationImpl)
+            ((EntityManagerImpl) oem).getBroker().getFetchConfiguration();
+        DBDictionary dict = ((JDBCConfiguration) ((OpenJPAEntityManagerSPI) oem)
+            .getConfiguration()).getDBDictionaryInstance();
+
+        em.clear();
+        LockEmployee employee = em.find(LockEmployee.class, 1);
+        resetSQL();
+        int beforeIsolation = fConfig.getIsolation();
+        em.lock(employee, lockMode);
+        if (dict.supportsIsolationForUpdate() && 
+            dict instanceof DB2Dictionary) {
+            assertEquals(expectedSupportSQLCount, getSQLCount());
+            if (expectedSupportSQL != null)
+                assertAllSQLInOrder(expectedSupportSQL);
+        } else {
+            assertEquals(expectedNonSupportSQLCount, getSQLCount());
+            if (expectedNonSupportSQL != null)
+                assertAllSQLInOrder(expectedNonSupportSQL);
+        }
+        
+        resetSQL();
+        em.flush();
+        assertEquals(expectedVersionUpdateCount, getSQLCount());
+        if (expectedVersionUpdateSQL != null)
+            assertAllSQLInOrder(expectedVersionUpdateSQL);
+
+        assertEquals(beforeIsolation, fConfig.getIsolation());
+    }
+}

Propchange: openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/TestEmLockMode.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/TestEmLockTimeout.java
URL: http://svn.apache.org/viewvc/openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/TestEmLockTimeout.java?rev=762161&view=auto
==============================================================================
--- openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/TestEmLockTimeout.java (added)
+++ openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/TestEmLockTimeout.java Sun Apr  5 21:29:42 2009
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.openjpa.persistence.lockmgr;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.persistence.EntityManager;
+
+import org.apache.openjpa.persistence.OpenJPAEntityManager;
+import org.apache.openjpa.persistence.jdbc.JDBCFetchPlan;
+import org.apache.openjpa.persistence.test.AllowFailure;
+
+/**
+ * Test hints using EntityManager interface.
+ */
+public class TestEmLockTimeout extends SequencedActionsTest {
+    
+    public void setUp() {
+        setUp(LockEmployee.class
+            , "openjpa.LockManager", "mixed"
+            );
+        commonSetUp();
+        emf.close();
+    }
+
+    /*
+     * Test setting lock.timeout at the createEntityManagerFactory.
+     */
+    public void testSetJavaxLockTimeoutAtProviderCreateEmf() {
+        setUp(LockEmployee.class
+            , "openjpa.LockManager", "mixed"
+            , "javax.persistence.lock.timeout", "13"
+            );
+        
+        EntityManager em = emf.createEntityManager();
+        OpenJPAEntityManager oem = (OpenJPAEntityManager)em.getDelegate();
+        JDBCFetchPlan fPlan = (JDBCFetchPlan) oem.getFetchPlan();
+
+        int lockTmo1 = fPlan.getLockTimeout();
+        assertEquals(13, lockTmo1);
+
+        em.close();
+        emf.close();
+    }
+
+    /*
+     * Test setting lock.timeout at the createEntityManagerFactory,
+     * with multiple equivalent entries.
+     */
+    @AllowFailure(message="OPENJPA-??? - Provider.createEntityManagerFactory" +
+        " does not suppport multiple equivalent properties.")
+    public void testSetLockTimeoutsAtProviderCreateEmf() {
+        setUp(LockEmployee.class
+            , "openjpa.LockManager", "mixed"
+            , "openjpa.LockTimeout", 122
+            , "javax.persistence.lock.timeout", "133"
+            );
+        
+        EntityManager em = emf.createEntityManager();
+        OpenJPAEntityManager oem = (OpenJPAEntityManager)em.getDelegate();
+        JDBCFetchPlan fPlan = (JDBCFetchPlan) oem.getFetchPlan();
+
+        int lockTmo1 = fPlan.getLockTimeout();
+        assertEquals(133, lockTmo1);
+
+        em.close();
+        emf.close();
+    }
+
+    /*
+     * Test setting lock.timeout at the em.find(), overriding
+     * value set at createEntityManagerFactory and createEm.
+     */
+    public void testSetJavaxLockTimeoutAtFind() {
+        setUp(LockEmployee.class
+            , "openjpa.LockManager", "mixed"
+            , "javax.persistence.lock.timeout", "13"
+            );
+        
+        EntityManager em = emf.createEntityManager();
+        
+        Map<String,Object> props2 = new HashMap<String,Object>();
+        props2.put("javax.persistence.lock.timeout", 3333);
+        em.find(LockEmployee.class, 1, props2);
+        
+        OpenJPAEntityManager oem = (OpenJPAEntityManager)em.getDelegate();
+        JDBCFetchPlan fPlan = (JDBCFetchPlan) oem.getFetchPlan();
+        int lockTmo3 = fPlan.getLockTimeout();
+        assertEquals(13, lockTmo3);
+        
+        em.close();
+        emf.close();
+    }
+}

Propchange: openjpa/trunk/openjpa-persistence-jdbc/src/test/java/org/apache/openjpa/persistence/lockmgr/TestEmLockTimeout.java
------------------------------------------------------------------------------
    svn:eol-style = native