You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@openjpa.apache.org by pc...@apache.org on 2006/07/19 23:35:07 UTC

svn commit: r423615 [5/44] - in /incubator/openjpa/trunk: ./ openjpa-jdbc-5/ openjpa-jdbc-5/src/ openjpa-jdbc-5/src/main/ openjpa-jdbc-5/src/main/java/ openjpa-jdbc-5/src/main/java/org/ openjpa-jdbc-5/src/main/java/org/apache/ openjpa-jdbc-5/src/main/j...

Added: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/MappedQueryResultObjectProvider.java
URL: http://svn.apache.org/viewvc/incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/MappedQueryResultObjectProvider.java?rev=423615&view=auto
==============================================================================
--- incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/MappedQueryResultObjectProvider.java (added)
+++ incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/MappedQueryResultObjectProvider.java Wed Jul 19 14:34:44 2006
@@ -0,0 +1,457 @@
+/*
+ * Copyright 2006 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.openjpa.jdbc.kernel;
+
+import java.io.InputStream;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.Clob;
+import java.sql.Ref;
+import java.sql.SQLException;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Stack;
+
+import org.apache.openjpa.jdbc.meta.ClassMapping;
+import org.apache.openjpa.jdbc.meta.FieldMapping;
+import org.apache.openjpa.jdbc.meta.JavaSQLTypes;
+import org.apache.openjpa.jdbc.meta.QueryResultMapping;
+import org.apache.openjpa.jdbc.schema.Column;
+import org.apache.openjpa.jdbc.sql.AbstractResult;
+import org.apache.openjpa.jdbc.sql.Joins;
+import org.apache.openjpa.jdbc.sql.Result;
+import org.apache.openjpa.jdbc.sql.SQLExceptions;
+import org.apache.openjpa.lib.rop.ResultObjectProvider;
+import org.apache.openjpa.util.StoreException;
+import org.apache.openjpa.util.UnsupportedException;
+
+/**
+ * Provides the data from query result mapped by a {@link QueryResultMapping}.
+ *
+ * @author Pinaki Poddar
+ * @author Abe White
+ */
+class MappedQueryResultObjectProvider
+    implements ResultObjectProvider {
+
+    private final QueryResultMapping _map;
+    private final JDBCStore _store;
+    private final JDBCFetchConfiguration _fetch;
+    private final MappingResult _mres;
+
+    /**
+     * Constructor.
+     *
+     * @param res the result data
+     */
+    public MappedQueryResultObjectProvider(QueryResultMapping map,
+        JDBCStore store, JDBCFetchConfiguration fetch, Result res) {
+        _map = map;
+        _store = store;
+        _fetch = (fetch == null) ? store.getFetchConfiguration() : fetch;
+        _mres = new MappingResult(res);
+    }
+
+    public boolean supportsRandomAccess() {
+        try {
+            return _mres.supportsRandomAccess();
+        } catch (Throwable t) {
+            return false;
+        }
+    }
+
+    public void open() {
+    }
+
+    public Object getResultObject()
+        throws SQLException {
+        QueryResultMapping.PCResult[] pcs = _map.getPCResults();
+        Object[] cols = _map.getColumnResults();
+
+        // single object cases
+        if (pcs.length == 0 && cols.length == 1)
+            return _mres.getObject(cols[0], JavaSQLTypes.JDBC_DEFAULT, null);
+        if (pcs.length == 1 && cols.length == 0)
+            return _mres.load(pcs[0], _store,
+                (JDBCFetchState) _fetch.newFetchState());
+
+        // multiple objects
+        Object[] ret = new Object[pcs.length + cols.length];
+        for (int i = 0; i < pcs.length; i++)
+            ret[i] = _mres.load(pcs[i], _store,
+                (JDBCFetchState) _fetch.newFetchState());
+        for (int i = 0; i < cols.length; i++)
+            ret[pcs.length + i] = _mres.getObject(cols[i],
+                JavaSQLTypes.JDBC_DEFAULT, null);
+        return ret;
+    }
+
+    public boolean next()
+        throws SQLException {
+        return _mres.next();
+    }
+
+    public boolean absolute(int pos)
+        throws SQLException {
+        return _mres.absolute(pos);
+    }
+
+    public int size()
+        throws SQLException {
+        if (_fetch.getLRSSize() == LRSSizes.SIZE_UNKNOWN
+            || !supportsRandomAccess())
+            return Integer.MAX_VALUE;
+        return _mres.size();
+    }
+
+    public void reset() {
+        throw new UnsupportedException();
+    }
+
+    public void close() {
+        _mres.close();
+    }
+
+    public void handleCheckedException(Exception e) {
+        if (e instanceof SQLException)
+            throw SQLExceptions.getStore((SQLException) e,
+                _store.getDBDictionary());
+        throw new StoreException(e);
+    }
+
+    /**
+     * Result type that maps requests using a given
+     * {@link QueryResultMapping.PCResult}.
+     */
+    private static class MappingResult
+        extends AbstractResult {
+
+        private final Result _res;
+        private final Stack _requests = new Stack();
+        private QueryResultMapping.PCResult _pc = null;
+
+        /**
+         * Supply delegate on construction.
+         */
+        public MappingResult(Result res) {
+            _res = res;
+        }
+
+        /**
+         * Load an instance of the given type. Should be used in place of
+         * {@link Result#load}.
+         */
+        public Object load(QueryResultMapping.PCResult pc, JDBCStore store,
+            JDBCFetchState fetchState)
+            throws SQLException {
+            _pc = pc;
+            try {
+                return load(pc.getCandidateTypeMapping(), store, fetchState);
+            } finally {
+                _pc = null;
+            }
+        }
+
+        public Object load(ClassMapping mapping, JDBCStore store,
+            JDBCFetchState fetchState)
+            throws SQLException {
+            return load(mapping, store, fetchState, null);
+        }
+
+        public Object load(ClassMapping mapping, JDBCStore store,
+            JDBCFetchState fetchState, Joins joins)
+            throws SQLException {
+            if (_pc == null)
+                return super.load(mapping, store, fetchState, joins);
+
+            // we go direct to the store manager so we can tell it not to load
+            // anything additional
+            return ((JDBCStoreManager) store).load(mapping, fetchState,
+                _pc.getExcludes(_requests), this);
+        }
+
+        public Object getEager(FieldMapping key) {
+            Object ret = _res.getEager(key);
+            if (_pc == null || ret != null)
+                return ret;
+            return (_pc.hasEager(_requests, key)) ? this : null;
+        }
+
+        public void putEager(FieldMapping key, Object res) {
+            _res.putEager(key, res);
+        }
+
+        public void close() {
+            _res.close();
+        }
+
+        public Joins newJoins() {
+            return _res.newJoins();
+        }
+
+        public boolean supportsRandomAccess()
+            throws SQLException {
+            return _res.supportsRandomAccess();
+        }
+
+        public ClassMapping getBaseMapping() {
+            return _res.getBaseMapping();
+        }
+
+        public int size()
+            throws SQLException {
+            return _res.size();
+        }
+
+        public void startDataRequest(Object mapping) {
+            _requests.push(mapping);
+        }
+
+        public void endDataRequest() {
+            _requests.pop();
+        }
+
+        public boolean wasNull()
+            throws SQLException {
+            return _res.wasNull();
+        }
+
+        protected Object translate(Object obj, Joins joins) {
+            return (_pc == null) ? obj : _pc.map(_requests, obj, joins);
+        }
+
+        protected boolean absoluteInternal(int row)
+            throws SQLException {
+            return _res.absolute(row);
+        }
+
+        protected boolean nextInternal()
+            throws SQLException {
+            return _res.next();
+        }
+
+        protected boolean containsInternal(Object obj, Joins joins)
+            throws SQLException {
+            return _res.contains(translate(obj, joins));
+        }
+
+        protected Array getArrayInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getArray((Column) obj, joins);
+            return _res.getArray(obj);
+        }
+
+        protected InputStream getAsciiStreamInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getAsciiStream((Column) obj, joins);
+            return _res.getAsciiStream(obj);
+        }
+
+        protected BigDecimal getBigDecimalInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getBigDecimal((Column) obj, joins);
+            return _res.getBigDecimal(obj);
+        }
+
+        protected Number getNumberInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getNumber((Column) obj, joins);
+            return _res.getNumber(obj);
+        }
+
+        protected BigInteger getBigIntegerInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getBigInteger((Column) obj, joins);
+            return _res.getBigInteger(obj);
+        }
+
+        protected InputStream getBinaryStreamInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getBinaryStream((Column) obj, joins);
+            return _res.getBinaryStream(obj);
+        }
+
+        protected Blob getBlobInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getBlob((Column) obj, joins);
+            return _res.getBlob(obj);
+        }
+
+        protected boolean getBooleanInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getBoolean((Column) obj, joins);
+            return _res.getBoolean(obj);
+        }
+
+        protected byte getByteInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getByte((Column) obj, joins);
+            return _res.getByte(obj);
+        }
+
+        protected byte[] getBytesInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getBytes((Column) obj, joins);
+            return _res.getBytes(obj);
+        }
+
+        protected Calendar getCalendarInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getCalendar((Column) obj, joins);
+            return _res.getCalendar(obj);
+        }
+
+        protected char getCharInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getChar((Column) obj, joins);
+            return _res.getChar(obj);
+        }
+
+        protected Reader getCharacterStreamInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getCharacterStream((Column) obj, joins);
+            return _res.getCharacterStream(obj);
+        }
+
+        protected Clob getClobInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getClob((Column) obj, joins);
+            return _res.getClob(obj);
+        }
+
+        protected Date getDateInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getDate((Column) obj, joins);
+            return _res.getDate(obj);
+        }
+
+        protected java.sql.Date getDateInternal(Object obj, Calendar cal,
+            Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getDate((Column) obj, cal, joins);
+            return _res.getDate(obj, cal);
+        }
+
+        protected double getDoubleInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getDouble((Column) obj, joins);
+            return _res.getDouble(obj);
+        }
+
+        protected float getFloatInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getFloat((Column) obj, joins);
+            return _res.getFloat(obj);
+        }
+
+        protected int getIntInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getInt((Column) obj, joins);
+            return _res.getInt(obj);
+        }
+
+        protected Locale getLocaleInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getLocale((Column) obj, joins);
+            return _res.getLocale(obj);
+        }
+
+        protected long getLongInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getLong((Column) obj, joins);
+            return _res.getLong(obj);
+        }
+
+        protected Object getObjectInternal(Object obj, int metaTypeCode,
+            Object arg, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getObject((Column) obj, arg, joins);
+            return _res.getObject(obj, metaTypeCode, arg);
+        }
+
+        protected Object getSQLObjectInternal(Object obj, Map map, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getSQLObject((Column) obj, map, joins);
+            return _res.getSQLObject(obj, map);
+        }
+
+        protected Ref getRefInternal(Object obj, Map map, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getRef((Column) obj, map, joins);
+            return _res.getRef(obj, map);
+        }
+
+        protected short getShortInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getShort((Column) obj, joins);
+            return _res.getShort(obj);
+        }
+
+        protected String getStringInternal(Object obj, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getString((Column) obj, joins);
+            return _res.getString(obj);
+        }
+
+        protected Time getTimeInternal(Object obj, Calendar cal, Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getTime((Column) obj, cal, joins);
+            return _res.getTime(obj, cal);
+        }
+
+        protected Timestamp getTimestampInternal(Object obj, Calendar cal,
+            Joins joins)
+            throws SQLException {
+            if (obj instanceof Column)
+                return _res.getTimestamp((Column) obj, cal, joins);
+            return _res.getTimestamp(obj, cal);
+        }
+    }
+}

Propchange: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/MappedQueryResultObjectProvider.java
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/NativeJDBCSeq.java
URL: http://svn.apache.org/viewvc/incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/NativeJDBCSeq.java?rev=423615&view=auto
==============================================================================
--- incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/NativeJDBCSeq.java (added)
+++ incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/NativeJDBCSeq.java Wed Jul 19 14:34:44 2006
@@ -0,0 +1,369 @@
+/*
+ * Copyright 2006 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.openjpa.jdbc.kernel;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.text.MessageFormat;
+
+import org.apache.openjpa.conf.OpenJPAConfiguration;
+import org.apache.openjpa.jdbc.conf.JDBCConfiguration;
+import org.apache.openjpa.jdbc.conf.JDBCConfigurationImpl;
+import org.apache.openjpa.jdbc.meta.ClassMapping;
+import org.apache.openjpa.jdbc.schema.Schema;
+import org.apache.openjpa.jdbc.schema.SchemaGroup;
+import org.apache.openjpa.jdbc.schema.SchemaTool;
+import org.apache.openjpa.jdbc.schema.Schemas;
+import org.apache.openjpa.jdbc.schema.Sequence;
+import org.apache.openjpa.jdbc.sql.DBDictionary;
+import org.apache.openjpa.lib.conf.Configurable;
+import org.apache.openjpa.lib.conf.Configuration;
+import org.apache.openjpa.lib.conf.Configurations;
+import org.apache.openjpa.lib.log.Log;
+import org.apache.openjpa.lib.util.Localizer;
+import org.apache.openjpa.lib.util.Options;
+import org.apache.openjpa.util.MetaDataException;
+import org.apache.openjpa.util.UserException;
+import serp.util.Numbers;
+import serp.util.Strings;
+
+////////////////////////////////////////////////////////////
+// NOTE: Do not change property names; see SequenceMetaData
+// and SequenceMapping for standard property names.
+////////////////////////////////////////////////////////////
+
+/**
+ * {@link JDBCSeq} implementation that uses a database sequences
+ * to generate numbers.
+ *
+ * @see JDBCSeq
+ * @see AbstractJDBCSeq
+ */
+public class NativeJDBCSeq
+    extends AbstractJDBCSeq
+    implements Configurable {
+
+    public static final String ACTION_DROP = "drop";
+    public static final String ACTION_ADD = "add";
+    public static final String ACTION_GET = "get";
+
+    private static Localizer _loc = Localizer.forPackage(NativeJDBCSeq.class);
+
+    private JDBCConfiguration _conf = null;
+    private String _seqName = "OPENJPA_SEQUENCE";
+    private int _increment = 1;
+    private int _initial = 1;
+    private int _allocate = 0;
+    private Sequence _seq = null;
+    private String _select = null;
+
+    // for deprecated auto-configuration support
+    private String _format = null;
+    private String _tableName = "DUAL";
+    private boolean _subTable = false;
+
+    /**
+     * The sequence name. Defaults to <code>OPENJPA_SEQUENCE</code>.
+     */
+    public String getSequence() {
+        return _seqName;
+    }
+
+    /**
+     * The sequence name. Defaults to <code>OPENJPA_SEQUENCE</code>.
+     */
+    public void setSequence(String seqName) {
+        _seqName = seqName;
+    }
+
+    /**
+     * @deprecated Use {@link #setSequence}. Retained for
+     * backwards-compatibility for auto-configuration.
+     */
+    public void setSequenceName(String seqName) {
+        setSequence(seqName);
+    }
+
+    /**
+     * @see Sequence#getInitialValue
+     */
+    public int getInitialValue() {
+        return _initial;
+    }
+
+    /**
+     * @see Sequence#setInitialValue
+     */
+    public void setInitialValue(int initial) {
+        _initial = initial;
+    }
+
+    /**
+     * @see Sequence#getAllocate
+     */
+    public int getAllocate() {
+        return _allocate;
+    }
+
+    /**
+     * @see Sequence#setAllocate
+     */
+    public void setAllocate(int allocate) {
+        _allocate = allocate;
+    }
+
+    /**
+     * @see Sequence#getIncrement
+     */
+    public int getIncrement() {
+        return _increment;
+    }
+
+    /**
+     * @see Sequence#setIncrement
+     */
+    public void setIncrement(int increment) {
+        _increment = increment;
+    }
+
+    /**
+     * @deprecated Retained for backwards-compatibility for auto-configuration.
+     */
+    public void setTableName(String table) {
+        _tableName = table;
+    }
+
+    /**
+     * @deprecated Retained for backwards-compatibility for auto-configuration.
+     */
+    public void setFormat(String format) {
+        _format = format;
+        _subTable = true;
+    }
+
+    public void addSchema(ClassMapping mapping, SchemaGroup group) {
+        // sequence already exists?
+        if (group.isKnownSequence(_seqName))
+            return;
+
+        String schemaName = Strings.getPackageName(_seqName);
+        if (schemaName.length() == 0)
+            schemaName = Schemas.getNewTableSchema(_conf);
+
+        // create table in this group
+        Schema schema = group.getSchema(schemaName);
+        if (schema == null)
+            schema = group.addSchema(schemaName);
+        schema.importSequence(_seq);
+    }
+
+    public void setConfiguration(Configuration conf) {
+        _conf = (JDBCConfiguration) conf;
+    }
+
+    public void startConfiguration() {
+    }
+
+    public void endConfiguration() {
+        buildSequence();
+
+        DBDictionary dict = _conf.getDBDictionaryInstance();
+        if (_format == null) {
+            _format = dict.nextSequenceQuery;
+            if (_format == null)
+                throw new MetaDataException(_loc.get("no-seq-sql", _seqName));
+        }
+        if (_tableName == null)
+            _tableName = "DUAL";
+
+        String name = dict.getFullName(_seq);
+        Object[] subs = (_subTable) ? new Object[]{ name, _tableName }
+            : new Object[]{ name };
+        _select = MessageFormat.format(_format, subs);
+    }
+
+    protected Object nextInternal(JDBCStore store, ClassMapping mapping)
+        throws SQLException {
+        long next = getSequence(getConnection(store));
+        return Numbers.valueOf(next);
+    }
+
+    /**
+     * Creates the sequence object.
+     */
+    private void buildSequence() {
+        String seqName = Strings.getClassName(_seqName);
+        String schemaName = Strings.getPackageName(_seqName);
+        if (schemaName.length() == 0)
+            schemaName = Schemas.getNewTableSchema(_conf);
+
+        // build the sequence in one of the designated schemas
+        SchemaGroup group = new SchemaGroup();
+        Schema schema = group.addSchema(schemaName);
+
+        _seq = schema.addSequence(seqName);
+        _seq.setInitialValue(_initial);
+        _seq.setIncrement(_increment);
+        _seq.setAllocate(_allocate);
+    }
+
+    /**
+     * Creates the sequence in the DB.
+     */
+    public void refreshSequence()
+        throws SQLException {
+        Log log = _conf.getLog(OpenJPAConfiguration.LOG_RUNTIME);
+        if (log.isInfoEnabled())
+            log.info(_loc.get("make-native-seq"));
+
+        // create the sequence
+        SchemaTool tool = new SchemaTool(_conf);
+        tool.setIgnoreErrors(true);
+        tool.createSequence(_seq);
+    }
+
+    /**
+     * Drops the sequence in the DB.
+     */
+    public void dropSequence()
+        throws SQLException {
+        Log log = _conf.getLog(OpenJPAConfiguration.LOG_RUNTIME);
+        if (log.isInfoEnabled())
+            log.info(_loc.get("drop-native-seq"));
+
+        // drop the table
+        SchemaTool tool = new SchemaTool(_conf);
+        tool.setIgnoreErrors(true);
+        tool.dropSequence(_seq);
+    }
+
+    /**
+     * Return the next sequence value.
+     */
+    private long getSequence(Connection conn)
+        throws SQLException {
+        PreparedStatement stmnt = null;
+        ResultSet rs = null;
+        try {
+            stmnt = conn.prepareStatement(_select);
+            rs = stmnt.executeQuery();
+            if (rs.next())
+                return rs.getLong(1);
+
+            // no row !?
+            throw new UserException(_loc.get("invalid-seq-sql", _select));
+        } finally {
+            // clean up our resources
+            if (rs != null)
+                try {
+                    rs.close();
+                } catch (SQLException se) {
+                }
+            if (stmnt != null)
+                try {
+                    stmnt.close();
+                } catch (SQLException se) {
+                }
+        }
+    }
+
+    /////////
+    // Main
+    /////////
+
+    /**
+     * Usage: java org.apache.openjpa.jdbc.schema.NativeJDBCSequence [option]*
+     * -action/-a &lt;add | drop | get&gt;
+     *  Where the following options are recognized.
+     * <ul>
+     * <li><i>-properties/-p &lt;properties file or resource&gt;</i>: The
+     * path or resource name of a OpenJPA properties file containing
+     * information such as the license key	and connection data as
+     * outlined in {@link JDBCConfiguration}. Optional.</li>
+     * <li><i>-&lt;property name&gt; &lt;property value&gt;</i>: All bean
+     * properties of the OpenJPA {@link JDBCConfiguration} can be set by
+     * using their	names and supplying a value. For example:
+     * <code>-licenseKey adslfja83r3lkadf</code></li>
+     * </ul>
+     *  The various actions are as follows.
+     * <ul>
+     * <li><i>add</i>: Create the sequence.</li>
+     * <li><i>drop</i>: Drop the sequence.</li>
+     * <li><i>get</i>: Print the next sequence value.</li>
+     * </ul>
+     */
+    public static void main(String[] args)
+        throws Exception {
+        Options opts = new Options();
+        args = opts.setFromCmdLine(args);
+        JDBCConfiguration conf = new JDBCConfigurationImpl();
+        try {
+            if (!run(conf, args, opts))
+                System.out.println(_loc.get("native-seq-usage"));
+        } finally {
+            conf.close();
+        }
+    }
+
+    /**
+     * Run the tool. Returns false if invalid options were given.
+     */
+    public static boolean run(JDBCConfiguration conf, String[] args,
+        Options opts)
+        throws Exception {
+        if (opts.containsKey("help") || opts.containsKey("-help"))
+            return false;
+
+        String action = opts.removeProperty("action", "a", null);
+        Configurations.populateConfiguration(conf, opts);
+        return run(conf, args, action);
+    }
+
+    /**
+     * Run the tool. Return false if an invalid option was given.
+     */
+    public static boolean run(JDBCConfiguration conf, String[] args,
+        String action)
+        throws Exception {
+        if (args.length != 0)
+            return false;
+
+        NativeJDBCSeq seq = new NativeJDBCSeq();
+        String props = Configurations.getProperties(conf.getSequence());
+        Configurations.configureInstance(seq, conf, props);
+
+        if (ACTION_DROP.equals(action))
+            seq.dropSequence();
+        else if (ACTION_ADD.equals(action))
+            seq.refreshSequence();
+        else if (ACTION_GET.equals(action)) {
+            Connection conn = conf.getDataSource2(null).getConnection();
+            try {
+                long cur = seq.getSequence(conn);
+                System.out.println(cur);
+            } finally {
+                try {
+                    conn.close();
+                } catch (SQLException se) {
+                }
+            }
+        } else
+            return false;
+        return true;
+    }
+}

Propchange: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/NativeJDBCSeq.java
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/OperationOrderUpdateManager.java
URL: http://svn.apache.org/viewvc/incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/OperationOrderUpdateManager.java?rev=423615&view=auto
==============================================================================
--- incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/OperationOrderUpdateManager.java (added)
+++ incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/OperationOrderUpdateManager.java Wed Jul 19 14:34:44 2006
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2006 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.openjpa.jdbc.kernel;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+
+import org.apache.openjpa.jdbc.schema.ForeignKey;
+import org.apache.openjpa.jdbc.sql.PrimaryRow;
+import org.apache.openjpa.jdbc.sql.Row;
+import org.apache.openjpa.jdbc.sql.RowImpl;
+import org.apache.openjpa.jdbc.sql.RowManager;
+import org.apache.openjpa.jdbc.sql.RowManagerImpl;
+import org.apache.openjpa.jdbc.sql.SQLExceptions;
+import org.apache.openjpa.kernel.OpenJPAStateManager;
+
+/**
+ * Update manager that writes SQL in object-level operation order.
+ *
+ * @author Abe White
+ */
+public class OperationOrderUpdateManager
+    extends AbstractUpdateManager {
+
+    public boolean orderDirty() {
+        return true;
+    }
+
+    protected RowManager newRowManager() {
+        return new RowManagerImpl(true);
+    }
+
+    protected PreparedStatementManager newPreparedStatementManager
+        (JDBCStore store, Connection conn) {
+        return new PreparedStatementManagerImpl(store, conn);
+    }
+
+    protected Collection flush(RowManager rowMgr,
+        PreparedStatementManager psMgr, Collection exceps) {
+        RowManagerImpl rmimpl = (RowManagerImpl) rowMgr;
+
+        // first take care of all secondary table deletes and 'all row' deletes
+        // (which are probably secondary table deletes), since no foreign
+        // keys ever rely on secondary table pks
+        flush(rmimpl.getAllRowDeletes(), psMgr);
+        flush(rmimpl.getSecondaryDeletes(), psMgr);
+
+        // now do any 'all row' updates, which typically null keys
+        flush(rmimpl.getAllRowUpdates(), psMgr);
+
+        // gather any updates we need to avoid fk constraints on deletes
+        Collection constraintUpdates = null;
+        for (Iterator itr = rmimpl.getDeletes().iterator(); itr.hasNext();) {
+            try {
+                constraintUpdates = analyzeDeleteConstraints(rmimpl,
+                    (PrimaryRow) itr.next(), constraintUpdates);
+            } catch (SQLException se) {
+                exceps = addException(exceps, SQLExceptions.getStore
+                    (se, dict));
+            }
+        }
+        if (constraintUpdates != null) {
+            flush(constraintUpdates, psMgr);
+            constraintUpdates.clear();
+        }
+
+        // flush primary rows in order
+        for (Iterator itr = rmimpl.getOrdered().iterator(); itr.hasNext();) {
+            try {
+                constraintUpdates = flushPrimaryRow(rmimpl, (PrimaryRow)
+                    itr.next(), psMgr, constraintUpdates);
+            } catch (SQLException se) {
+                exceps = addException(exceps, SQLExceptions.getStore
+                    (se, dict));
+            }
+        }
+        if (constraintUpdates != null)
+            flush(constraintUpdates, psMgr);
+
+        // take care of all secondary table inserts and updates last, since
+        // they may rely on previous inserts or updates, but nothing relies
+        // on them
+        flush(rmimpl.getSecondaryUpdates(), psMgr);
+
+        // flush any left over prepared statements
+        psMgr.flush();
+        return exceps;
+    }
+
+    /**
+     * Analyze the delete constraints on the given row, gathering necessary
+     * updates to null fks before deleting.
+     */
+    private Collection analyzeDeleteConstraints(RowManagerImpl rowMgr,
+        PrimaryRow row, Collection updates)
+        throws SQLException {
+        if (!row.isValid())
+            return updates;
+
+        ForeignKey[] fks = row.getTable().getForeignKeys();
+        OpenJPAStateManager sm;
+        PrimaryRow rel;
+        RowImpl update;
+        for (int i = 0; i < fks.length; i++) {
+            // when deleting ref fks we set the where value instead
+            sm = row.getForeignKeySet(fks[i]);
+            if (sm == null)
+                sm = row.getForeignKeyWhere(fks[i]);
+            if (sm == null)
+                continue;
+
+            // only need an update if we have an fk to a row that's being
+            // deleted before we are
+            rel = (PrimaryRow) rowMgr.getRow(fks[i].getPrimaryKeyTable(),
+                Row.ACTION_DELETE, sm, false);
+            if (rel == null || !rel.isValid()
+                || rel.getIndex() >= row.getIndex())
+                continue;
+
+            // create an update to null the offending fk before deleting
+            update = new RowImpl(row.getTable(), Row.ACTION_UPDATE);
+            row.copyInto(update, true);
+            update.setForeignKey(fks[i], row.getForeignKeyIO(fks[i]), null);
+            if (updates == null)
+                updates = new ArrayList();
+            updates.add(update);
+        }
+        return updates;
+    }
+
+    /**
+     * Flush the given row, creating deferred updates for dependencies.
+     */
+    private Collection flushPrimaryRow(RowManagerImpl rowMgr, PrimaryRow row,
+        PreparedStatementManager psMgr, Collection updates)
+        throws SQLException {
+        if (!row.isValid())
+            return updates;
+
+        // already analyzed deletes
+        if (row.getAction() == Row.ACTION_DELETE) {
+            psMgr.flush(row);
+            return updates;
+        }
+
+        ForeignKey[] fks = row.getTable().getForeignKeys();
+        OpenJPAStateManager sm;
+        PrimaryRow rel;
+        PrimaryRow update;
+        for (int i = 0; i < fks.length; i++) {
+            sm = row.getForeignKeySet(fks[i]);
+            if (sm == null)
+                continue;
+
+            // only need an update if we have an fk to a row that's being
+            // inserted after we are; if row is dependent on itself and no
+            // fk, must be an auto-inc because otherwise we wouldn't have
+            // recorded it
+            rel = (PrimaryRow) rowMgr.getRow(fks[i].getPrimaryKeyTable(),
+                Row.ACTION_INSERT, sm, false);
+            if (rel == null || !rel.isValid()
+                || rel.getIndex() < row.getIndex()
+                || (rel == row && !fks[i].isDeferred() && !fks[i].isLogical()))
+                continue;
+
+            // don't insert or update with the given fk; create a deferred
+            // update for after the rel row has been inserted; use a primary row
+            // to prevent setting values until after flush to get auto-inc
+            update = new PrimaryRow(row.getTable(), Row.ACTION_UPDATE, null);
+            if (row.getAction() == Row.ACTION_INSERT)
+                update.wherePrimaryKey(row.getPrimaryKey());
+            else
+                row.copyInto(update, true);
+            update.setForeignKey(fks[i], row.getForeignKeyIO(fks[i]), sm);
+            row.clearForeignKey(fks[i]);
+
+            if (updates == null)
+                updates = new ArrayList();
+            updates.add(update);
+        }
+
+        if (row.isValid()) // if update, maybe no longer needed
+            psMgr.flush(row);
+        return updates;
+    }
+
+    /**
+     * Flush the given collection of secondary rows.
+     */
+    protected void flush(Collection rows, PreparedStatementManager psMgr) {
+        if (rows.isEmpty())
+            return;
+
+        RowImpl row;
+        for (Iterator itr = rows.iterator(); itr.hasNext();) {
+            row = (RowImpl) itr.next();
+            if (row.isValid())
+                psMgr.flush(row);
+        }
+    }
+}

Propchange: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/OperationOrderUpdateManager.java
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PagingResultObjectProvider.java
URL: http://svn.apache.org/viewvc/incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PagingResultObjectProvider.java?rev=423615&view=auto
==============================================================================
--- incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PagingResultObjectProvider.java (added)
+++ incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PagingResultObjectProvider.java Wed Jul 19 14:34:44 2006
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2006 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.openjpa.jdbc.kernel;
+
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.BitSet;
+
+import org.apache.openjpa.jdbc.meta.ClassMapping;
+import org.apache.openjpa.jdbc.meta.FieldMapping;
+import org.apache.openjpa.jdbc.schema.Column;
+import org.apache.openjpa.jdbc.sql.Result;
+import org.apache.openjpa.jdbc.sql.SQLBuffer;
+import org.apache.openjpa.jdbc.sql.Select;
+import org.apache.openjpa.jdbc.sql.SelectExecutor;
+import org.apache.openjpa.kernel.OpenJPAStateManager;
+import org.apache.openjpa.kernel.StoreContext;
+import org.apache.openjpa.lib.util.Closeable;
+import org.apache.openjpa.util.InternalException;
+
+/**
+ * Object provider implementation that fetches one page of results at a
+ * a time as it scrolls. If the {@link #getPagedFields} method returns a
+ * non-null bit set, this this provider is a good fit for your configuration.
+ * The method tests the following conditions:
+ * <ul>
+ * <li>The eager fetch mode is <code>parallel</code>.</li>
+ * <li>The select's result should be treated as a large result set.</li>
+ * <li>The mapping being selected has fields that use parallel selects
+ * under the current fetch configuration.</li>
+ * </ul>
+ *  To use this provider, select the candidate mapping with eager fetch
+ * mode set to <code>join</code>. This provider will take care of performing
+ * <code>parallel</code> mode batch selects for each page it reads.
+ *
+ * @author Abe White
+ * @nojavadoc
+ */
+public class PagingResultObjectProvider
+    extends SelectResultObjectProvider {
+
+    private final ClassMapping[] _mappings;
+    private final Object[] _page;
+    private final int[] _idxs;
+    private final BitSet[] _paged;
+    private int _pos = -1; // logical pos
+    private int _pagePos = -1; // pos of page start
+
+    /**
+     * Return a bit set representing batch select fields that will be paged,
+     * or null if no fields need paging, which indicates that this provider
+     * should not be used.
+     *
+     * @see #PagingResultObjectProvider
+     */
+    public static BitSet getPagedFields(Select sel, ClassMapping mapping,
+        JDBCStore store, JDBCFetchState fetchState, int eagerMode,
+        long size) {
+        JDBCFetchConfiguration fetch = fetchState.getJDBCFetchConfiguration();
+        // if we have a range then we always use paging if there are any
+        // eager select fields; otherwise it depends on lrs and fetch settings
+        if (size == Long.MAX_VALUE || !sel.getAutoDistinct()) {
+            // not lrs?
+            if (!sel.isLRS())
+                return null;
+
+            // not configured for lazy loading?
+            if (fetch.getFetchBatchSize() < 0)
+                return null;
+        }
+
+        // not configured for eager selects?
+        eagerMode = Math.min(eagerMode, fetch.getEagerFetchMode());
+        if (eagerMode != fetch.EAGER_PARALLEL)
+            return null;
+
+        // are there any mappings that require batched selects?
+        FieldMapping[] fms = mapping.getDefinedFieldMappings();
+        BitSet paged = null;
+        for (int i = 0; i < fms.length; i++) {
+            if (fetchState != null
+                && !fetchState.requiresSelect(fms[i], false))
+                continue;
+
+            if (fms[i].supportsSelect(sel, sel.EAGER_PARALLEL, null, store,
+                fetch) > 0 && (fms[i].isEagerSelectToMany() || fms[i].
+                supportsSelect(sel, sel.EAGER_OUTER, null, store, fetch) == 0))
+            {
+                if (paged == null)
+                    paged = new BitSet();
+                paged.set(fms[i].getIndex());
+            }
+        }
+        return paged;
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param sel the select to execute
+     * @param mapping the mapping of the result objects
+     * @param store the store manager to delegate loading to
+     * @param fetch the fetch configuration, or null for default
+     * @param paged the bit set returned from {@link #getPagedFields}
+     * @param size the known maximum size of the result, or
+     * {@link Long#MAX_VALUE} for no known limit
+     */
+    public PagingResultObjectProvider(SelectExecutor sel,
+        ClassMapping mapping, JDBCStore store, JDBCFetchState fetchState,
+        BitSet paged, long size) {
+        this(sel, new ClassMapping[]{ mapping }, store, fetchState,
+            new BitSet[]{ paged }, size);
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param sel the select to execute
+     * @param mappings the mappings for the independent classes of the
+     * result objects
+     * @param store the store manager to delegate loading to
+     * @param fetch the fetch configuration, or null for default
+     * @param paged the bit sets returned from {@link #getPagedFields}
+     * for each select in the possible union
+     * @param size the known maximum size of the result, or
+     * {@link Long#MAX_VALUE} for no known limit
+     */
+    public PagingResultObjectProvider(SelectExecutor sel,
+        ClassMapping[] mappings, JDBCStore store, JDBCFetchState fetchState,
+        BitSet[] paged, long size) {
+        super(sel, store, fetchState);
+        _mappings = mappings;
+        _paged = paged;
+
+        // don't let system construct this type of rop for stupid sizes
+        if (size <= 1)
+            throw new InternalException("size=" + size);
+
+        // try to find a good page size.  if the known size < batch size, use
+        // it.  if the batch size is set, then use that; if it's sorta close
+        // to the size, then use the size / 2 to get two full pages rather
+        // than a possible big one and small one.  cap everything at 50.
+        int batch = getFetchConfiguration().getFetchBatchSize();
+        int pageSize;
+        if (size <= batch && size <= 50)
+            pageSize = (int) size;
+        else if (batch > 0 && batch <= 50) {
+            if (size <= batch * 2) {
+                if (size % 2 == 0)
+                    pageSize = (int) (size / 2);
+                else
+                    pageSize = (int) (size / 2 + 1);
+            } else
+                pageSize = batch;
+        } else if (size <= 50)
+            pageSize = (int) size;
+        else if (size <= 100) {
+            if (size % 2 == 0)
+                pageSize = (int) (size / 2);
+            else
+                pageSize = (int) (size / 2 + 1);
+        } else
+            pageSize = 50;
+
+        _page = new Object[pageSize];
+        if (_paged.length > 1)
+            _idxs = new int[pageSize];
+        else
+            _idxs = null;
+    }
+
+    /**
+     * Return the page size in use.
+     */
+    public int getPageSize() {
+        return _page.length;
+    }
+
+    public void open()
+        throws SQLException {
+        super.open();
+        _pos = -1;
+    }
+
+    public boolean next()
+        throws SQLException {
+        _pos++;
+        if (inPage())
+            return _page[_pos - _pagePos] != null;
+        if (!super.next()) {
+            setSize(_pos);
+            return false;
+        }
+        return true;
+    }
+
+    public boolean absolute(int pos)
+        throws SQLException {
+        _pos = pos;
+        if (inPage())
+            return _page[_pos - _pagePos] != null;
+        return super.absolute(pos);
+    }
+
+    public Object getResultObject()
+        throws SQLException {
+        if (!inPage())
+            fillPage();
+        return _page[_pos - _pagePos];
+    }
+
+    /**
+     * Test whether our current position is within the cached page of results.
+     */
+    private boolean inPage() {
+        return _pagePos != -1 && _pos >= _pagePos
+            && _pos < _pagePos + _page.length;
+    }
+
+    /**
+     * Start a new page at the present position.
+     */
+    private void fillPage()
+        throws SQLException {
+        // clear page
+        Arrays.fill(_page, null);
+
+        // cache result objects
+        JDBCStoreManager storeMgr = (JDBCStoreManager) getStore();
+        ClassMapping mapping;
+        Result res;
+        int idx;
+        for (int i = 0; i < _page.length; i++) {
+            res = getResult();
+            idx = res.indexOf();
+            if (_idxs != null)
+                _idxs[i] = idx;
+            mapping = res.getBaseMapping();
+            if (mapping == null)
+                mapping = _mappings[idx];
+
+            // rather than use the standard result.load(), we go direct to
+            // the store manager so we can pass in our eager-fetched fields as
+            // fields to exclude from the initial load of the objects
+            _page[i] = storeMgr.load(mapping, getFetchState(),
+                _paged[idx], res);
+            if (i != _page.length - 1 && !getResult().next()) {
+                setSize(_pos + i + 1);
+                break;
+            }
+        }
+
+        // load data for eager fields
+        _pagePos = _pos;
+        if (_page[0] != null) {
+            if (_page.length > 1 && _page[1] == null)
+                loadEagerFields();
+            else
+                executeEagerSelects();
+        }
+    }
+
+    /**
+     * When there is only one instance in a page, load fields as normal.
+     */
+    private void loadEagerFields()
+        throws SQLException {
+        int idx = (_idxs == null) ? 0 : _idxs[0];
+        if (_paged[idx] == null)
+            return;
+
+        JDBCStore store = getStore();
+        OpenJPAStateManager sm = store.getContext().getStateManager(_page[0]);
+        for (int i = 0, len = _paged[idx].length(); i < len; i++) {
+            if (_paged[idx].get(i)) {
+                _mappings[idx].getFieldMapping(i).load(sm, store,
+                    getFetchState());
+            }
+        }
+    }
+
+    /**
+     * Load eager batch selects for current page of results.
+     */
+    private void executeEagerSelects()
+        throws SQLException {
+        if (_idxs == null) {
+            executeEagerSelects(_mappings[0], _paged[0], 0, _page.length);
+            return;
+        }
+
+        int start = 0;
+        int idx = _idxs[0];
+        int pos = 0;
+        for (; pos < _page.length && _page[pos] != null; pos++) {
+            if (idx != _idxs[pos]) {
+                if (_paged[idx] != null)
+                    executeEagerSelects(_mappings[idx], _paged[idx], start,
+                        pos);
+                start = pos;
+                idx = _idxs[pos];
+            }
+        }
+        if (start < pos && _paged[idx] != null) // cleanup remainder
+            executeEagerSelects(_mappings[idx], _paged[idx], start, pos);
+    }
+
+    /**
+     * Load eager batch selects for the given mapping and its superclasses.
+     */
+    private void executeEagerSelects(ClassMapping mapping, BitSet paged,
+        int start, int end)
+        throws SQLException {
+        // base case
+        if (mapping == null)
+            return;
+
+        // recurse on superclass
+        executeEagerSelects(mapping.getJoinablePCSuperclassMapping(), paged,
+            start, end);
+
+        // figure out how many batch selects to do on this mapping
+        FieldMapping[] fms = mapping.getDefinedFieldMappings();
+        int sels = 0;
+        for (int i = 0; i < fms.length; i++)
+            if (paged.get(fms[i].getIndex()))
+                sels++;
+        if (sels == 0)
+            return;
+
+        // create where condition limiting instances to this page
+        JDBCStore store = getStore();
+        Select sel = store.getSQLFactory().newSelect();
+        SQLBuffer buf = new SQLBuffer(store.getDBDictionary());
+        Column[] pks = mapping.getPrimaryKeyColumns();
+        if (pks.length == 1)
+            inContains(sel, buf, mapping, pks, start, end);
+        else
+            orContains(sel, buf, mapping, pks, start, end);
+        sel.where(buf);
+
+        StoreContext ctx = store.getContext();
+        JDBCFetchConfiguration fetch = getFetchConfiguration();
+        JDBCFetchState fetchState = (JDBCFetchState) fetch.newFetchState();
+        // do each batch select
+        Object res;
+        int esels = 0;
+        SelectExecutor esel;
+        int unions;
+        for (int i = 0; i < fms.length; i++) {
+            if (!paged.get(fms[i].getIndex()))
+                continue;
+
+            unions = fms[i].supportsSelect(sel, Select.EAGER_PARALLEL, null,
+                store, fetch);
+            if (unions == 0)
+                continue;
+
+            // we don't need to clone if this is the last select
+            esels++;
+            if (esels < sels || unions > 1)
+                esel = sel.whereClone(unions);
+            else
+                esel = sel;
+
+            // get result
+            fms[i].selectEagerParallel(esel, null, store, fetchState,
+                JDBCFetchConfiguration.EAGER_PARALLEL);
+            res = esel.execute(store, fetch);
+            try {
+                // and load result into paged instances
+                for (int j = start; j < end && _page[j] != null; j++)
+                    res = fms[i].loadEagerParallel(ctx.getStateManager
+                        (_page[j]), store, fetchState, res);
+            } finally {
+                if (res instanceof Closeable)
+                    try {
+                        ((Closeable) res).close();
+                    } catch (Exception e) {
+                    }
+            }
+        }
+    }
+
+    /**
+     * Create an IN clause limiting the results to the current page.
+     */
+    private void inContains(Select sel, SQLBuffer buf, ClassMapping mapping,
+        Column[] pks, int start, int end) {
+        buf.append(sel.getColumnAlias(pks[0])).append(" IN (");
+        for (int i = start; i < end && _page[i] != null; i++) {
+            if (i > start)
+                buf.append(", ");
+            buf.appendValue(mapping.toDataStoreValue(_page[i], pks,
+                getStore()), pks[0]);
+        }
+        buf.append(")");
+    }
+
+    /**
+     * Create OR conditions limiting the results to the curent page.
+     */
+    private void orContains(Select sel, SQLBuffer buf, ClassMapping mapping,
+        Column[] pks, int start, int end) {
+        String[] aliases = new String[pks.length];
+        for (int i = 0; i < pks.length; i++)
+            aliases[i] = sel.getColumnAlias(pks[i]);
+
+        Object[] vals;
+        buf.append("(");
+        for (int i = start; i < end && _page[i] != null; i++) {
+            if (i > start)
+                buf.append(" OR ");
+
+            vals = (Object[]) mapping.toDataStoreValue(_page[i], pks,
+                getStore());
+            buf.append("(");
+            for (int j = 0; j < vals.length; j++) {
+                if (j > 0)
+                    buf.append(" AND ");
+                buf.append(aliases[j]);
+                if (vals[j] == null)
+                    buf.append(" IS ");
+                else
+                    buf.append(" = ");
+                buf.appendValue(vals[j], pks[j]);
+            }
+            buf.append(")");
+        }
+        buf.append(")");
+    }
+}

Propchange: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PagingResultObjectProvider.java
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PessimisticLockManager.java
URL: http://svn.apache.org/viewvc/incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PessimisticLockManager.java?rev=423615&view=auto
==============================================================================
--- incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PessimisticLockManager.java (added)
+++ incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PessimisticLockManager.java Wed Jul 19 14:34:44 2006
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2006 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.openjpa.jdbc.kernel;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+import org.apache.openjpa.jdbc.meta.ClassMapping;
+import org.apache.openjpa.jdbc.sql.DBDictionary;
+import org.apache.openjpa.jdbc.sql.SQLBuffer;
+import org.apache.openjpa.jdbc.sql.SQLExceptions;
+import org.apache.openjpa.jdbc.sql.Select;
+import org.apache.openjpa.kernel.OpenJPAStateManager;
+import org.apache.openjpa.kernel.StoreContext;
+import org.apache.openjpa.kernel.VersionLockManager;
+import org.apache.openjpa.lib.util.Localizer;
+import org.apache.openjpa.util.LockException;
+
+/**
+ * Lock manager that uses exclusive database locks.
+ *
+ * @author Marc Prud'hommeaux
+ */
+public class PessimisticLockManager
+    extends VersionLockManager
+    implements JDBCLockManager {
+
+    public static final int LOCK_DATASTORE_ONLY = 1;
+
+    private static final Localizer _loc = Localizer.forPackage
+        (PessimisticLockManager.class);
+
+    private JDBCStore _store;
+
+    public PessimisticLockManager() {
+        setVersionCheckOnReadLock(false);
+        setVersionUpdateOnWriteLock(false);
+    }
+
+    public void setContext(StoreContext ctx) {
+        super.setContext(ctx);
+        _store = (JDBCStore) ctx.getStoreManager().getInnermostDelegate();
+    }
+
+    public boolean selectForUpdate(Select sel, int lockLevel) {
+        if (lockLevel == LOCK_NONE)
+            return false;
+
+        DBDictionary dict = _store.getDBDictionary();
+        if (dict.simulateLocking)
+            return false;
+        dict.assertSupport(dict.supportsSelectForUpdate,
+            "SupportsSelectForUpdate");
+
+        if (!sel.supportsLocking()) {
+            if (log.isInfoEnabled())
+                log.info(_loc.get("cant-lock-on-load",
+                    sel.toSelect(false, null).getSQL()));
+            return false;
+        }
+
+        ensureStoreManagerTransaction();
+        return true;
+    }
+
+    public void loadedForUpdate(OpenJPAStateManager sm) {
+        // we set a low lock level to indicate that we don't need datastore
+        // locking, but we don't necessarily have a read or write lock
+        // according to our superclass
+        if (getLockLevel(sm) == LOCK_NONE)
+            setLockLevel(sm, LOCK_DATASTORE_ONLY);
+    }
+
+    protected void lockInternal(OpenJPAStateManager sm, int level, int timeout,
+        Object sdata) {
+        // we can skip any already-locked instance regardless of level because
+        // we treat all locks the same (though super doesn't)
+        if (getLockLevel(sm) == LOCK_NONE) {
+            // only need to lock if not loaded from locking result
+            ConnectionInfo info = (ConnectionInfo) sdata;
+            if (info == null || info.result == null || !info.result.isLocking())
+                lockRow(sm, timeout);
+        }
+        super.lockInternal(sm, level, timeout, sdata);
+    }
+
+    /**
+     * Lock the specified instance row by issuing a "SELECT ... FOR UPDATE"
+     * statement.
+     */
+    private void lockRow(OpenJPAStateManager sm, int timeout) {
+        // assert that the dictionary supports the "SELECT ... FOR UPDATE"
+        // construct; if not, and we the assertion does not throw an
+        // exception, then just return without locking
+        DBDictionary dict = _store.getDBDictionary();
+        if (dict.simulateLocking)
+            return;
+        dict.assertSupport(dict.supportsSelectForUpdate,
+            "SupportsSelectForUpdate");
+
+        Object id = sm.getObjectId();
+        ClassMapping mapping = (ClassMapping) sm.getMetaData();
+        while (mapping.getJoinablePCSuperclassMapping() != null)
+            mapping = mapping.getJoinablePCSuperclassMapping();
+
+        // select only the PK columns, since we just want to lock
+        Select select = _store.getSQLFactory().newSelect();
+        select.select(mapping.getPrimaryKeyColumns());
+        select.wherePrimaryKey(id, mapping, _store);
+        SQLBuffer sql = select.toSelect(true, _store.getFetchConfiguration());
+
+        ensureStoreManagerTransaction();
+        Connection conn = _store.getConnection();
+        PreparedStatement stmnt = null;
+        ResultSet rs = null;
+        try {
+            stmnt = sql.prepareStatement(conn);
+            if (timeout >= 0 && dict.supportsQueryTimeout) {
+                if (timeout < 1000) {
+                    timeout = 1000;
+                    if (log.isWarnEnabled())
+                        log.warn(_loc.get("millis-query-timeout"));
+                }
+                stmnt.setQueryTimeout(timeout / 1000);
+            }
+            rs = stmnt.executeQuery();
+            if (!rs.next())
+                throw new LockException(sm.getManagedInstance());
+        } catch (SQLException se) {
+            throw SQLExceptions.getStore(se, dict);
+        } finally {
+            if (stmnt != null)
+                try {
+                    stmnt.close();
+                } catch (SQLException se) {
+                }
+            if (rs != null)
+                try {
+                    rs.close();
+                } catch (SQLException se) {
+                }
+            try {
+                conn.close();
+            } catch (SQLException se) {
+            }
+        }
+    }
+
+    /**
+     * Enforce that we have an actual transaction in progress so that we can
+     * start locking. The transaction should already be begun when using a
+     * datastore transaction; this will just be used if we are locking in
+     * optimistic mode.
+     */
+    private void ensureStoreManagerTransaction() {
+        if (!_store.getContext().isStoreActive()) {
+            _store.getContext().beginStore();
+            if (log.isInfoEnabled())
+                log.info(_loc.get("start-trans-for-lock"));
+        }
+    }
+}

Propchange: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PessimisticLockManager.java
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PreparedStatementManager.java
URL: http://svn.apache.org/viewvc/incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PreparedStatementManager.java?rev=423615&view=auto
==============================================================================
--- incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PreparedStatementManager.java (added)
+++ incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PreparedStatementManager.java Wed Jul 19 14:34:44 2006
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2006 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.openjpa.jdbc.kernel;
+
+import java.util.Collection;
+
+import org.apache.openjpa.jdbc.sql.RowImpl;
+
+/**
+ * Manages prepared statement execution.
+ *
+ * @author Abe White
+ */
+public interface PreparedStatementManager {
+
+    /**
+     * Return the exceptions encountered during all flushes.
+     */
+    public Collection getExceptions();
+
+    /**
+     * Flush the given row.
+     */
+    public void flush(RowImpl row);
+
+    /**
+     * This method must be called after the last row has been
+     * flushed, to flush any remaining statements.
+     */
+    public void flush();
+}

Propchange: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PreparedStatementManager.java
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PreparedStatementManagerImpl.java
URL: http://svn.apache.org/viewvc/incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PreparedStatementManagerImpl.java?rev=423615&view=auto
==============================================================================
--- incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PreparedStatementManagerImpl.java (added)
+++ incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PreparedStatementManagerImpl.java Wed Jul 19 14:34:44 2006
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2006 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.openjpa.jdbc.kernel;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.Collection;
+import java.util.LinkedList;
+
+import org.apache.openjpa.jdbc.meta.ClassMapping;
+import org.apache.openjpa.jdbc.schema.Column;
+import org.apache.openjpa.jdbc.sql.DBDictionary;
+import org.apache.openjpa.jdbc.sql.Row;
+import org.apache.openjpa.jdbc.sql.RowImpl;
+import org.apache.openjpa.jdbc.sql.SQLExceptions;
+import org.apache.openjpa.kernel.OpenJPAStateManager;
+import org.apache.openjpa.lib.util.Localizer;
+import org.apache.openjpa.util.OpenJPAException;
+import org.apache.openjpa.util.OptimisticException;
+
+/**
+ * Basic prepared statement manager implementation.
+ *
+ * @author Abe White
+ */
+class PreparedStatementManagerImpl
+    implements PreparedStatementManager {
+
+    private final static Localizer _loc = Localizer.forPackage
+        (PreparedStatementManagerImpl.class);
+
+    private final JDBCStore _store;
+    private final Connection _conn;
+    private final DBDictionary _dict;
+
+    // track exceptions
+    private final Collection _exceptions = new LinkedList();
+
+    /**
+     * Constructor. Supply connection.
+     */
+    public PreparedStatementManagerImpl(JDBCStore store, Connection conn) {
+        _store = store;
+        _dict = store.getDBDictionary();
+        _conn = conn;
+    }
+
+    public Collection getExceptions() {
+        return _exceptions;
+    }
+
+    public void flush(RowImpl row) {
+        try {
+            flushInternal(row);
+        } catch (SQLException se) {
+            _exceptions.add(SQLExceptions.getStore(se, _dict));
+        } catch (OpenJPAException ke) {
+            _exceptions.add(ke);
+        }
+    }
+
+    /**
+     * Flush the given row.
+     */
+    private void flushInternal(RowImpl row)
+        throws SQLException {
+        // can't batch rows with auto-inc columns
+        Column[] autoAssign = null;
+        if (row.getAction() == Row.ACTION_INSERT)
+            autoAssign = row.getTable().getAutoAssignedColumns();
+
+        // prepare statement
+        String sql = row.getSQL(_dict);
+        PreparedStatement stmnt = _conn.prepareStatement(sql);
+
+        // setup parameters and execute statement
+        row.flush(stmnt, _dict, _store);
+        try {
+            int count = stmnt.executeUpdate();
+            if (count != 1) {
+                Object failed = row.getFailedObject();
+                if (failed != null)
+                    _exceptions.add(new OptimisticException(failed));
+                else if (row.getAction() == Row.ACTION_INSERT)
+                    throw new SQLException(_loc.get
+                        ("update-failed-no-failed-obj", String.valueOf(count),
+                            sql));
+            }
+        }
+        catch (SQLException se) {
+            throw SQLExceptions.getStore(se, row.getFailedObject(), _dict);
+        } finally {
+            try {
+                stmnt.close();
+            } catch (SQLException se) {
+            }
+        }
+
+        // set auto assign values
+        if (autoAssign != null && autoAssign.length > 0
+            && row.getPrimaryKey() != null) {
+            OpenJPAStateManager sm = row.getPrimaryKey();
+            ClassMapping mapping = (ClassMapping) sm.getMetaData();
+            Object val;
+            for (int i = 0; i < autoAssign.length; i++) {
+                val = _dict.getGeneratedKey(autoAssign[i], _conn);
+                mapping.assertJoinable(autoAssign[i]).setAutoAssignedValue
+                    (sm, _store, autoAssign[i], val);
+            }
+        }
+    }
+
+    public void flush() {
+    }
+}

Propchange: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/PreparedStatementManagerImpl.java
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/ProjectionResultObjectProvider.java
URL: http://svn.apache.org/viewvc/incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/ProjectionResultObjectProvider.java?rev=423615&view=auto
==============================================================================
--- incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/ProjectionResultObjectProvider.java (added)
+++ incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/ProjectionResultObjectProvider.java Wed Jul 19 14:34:44 2006
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2006 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.openjpa.jdbc.kernel;
+
+import org.apache.openjpa.jdbc.kernel.exps.Val;
+import org.apache.openjpa.jdbc.sql.Result;
+import org.apache.openjpa.jdbc.sql.SelectExecutor;
+import org.apache.openjpa.kernel.exps.QueryExpressions;
+
+/**
+ * Object provider implementation wrapped around a projection select.
+ *
+ * @author Abe White
+ */
+class ProjectionResultObjectProvider
+    extends SelectResultObjectProvider {
+
+    private final QueryExpressions[] _exps;
+
+    /**
+     * Constructor.
+     *
+     * @param sel the select to execute
+     * @param store the store manager to delegate loading to
+     * @param fetch the fetch configuration
+     * @param exps the query expressions
+     */
+    public ProjectionResultObjectProvider(SelectExecutor sel, JDBCStore store,
+        JDBCFetchState fetchState, QueryExpressions exps) {
+        this(sel, store, fetchState, new QueryExpressions[]{ exps });
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param sel the select to execute
+     * @param store the store manager to delegate loading to
+     * @param fetch the fetch configuration
+     * @param exps the query expressions
+     */
+    public ProjectionResultObjectProvider(SelectExecutor sel, JDBCStore store,
+        JDBCFetchState fetchState, QueryExpressions[] exps) {
+        super(sel, store, fetchState);
+        _exps = exps;
+    }
+
+    public Object getResultObject()
+        throws Exception {
+        Result res = getResult();
+        int idx = res.indexOf();
+        Object[] arr = new Object[_exps[idx].projections.length];
+        for (int i = 0; i < _exps[idx].projections.length; i++)
+            arr[i] = ((Val) _exps[idx].projections[i]).load(res, getStore(),
+                getFetchState());
+        return arr;
+    }
+}

Propchange: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/ProjectionResultObjectProvider.java
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/SQLProjectionResultObjectProvider.java
URL: http://svn.apache.org/viewvc/incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/SQLProjectionResultObjectProvider.java?rev=423615&view=auto
==============================================================================
--- incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/SQLProjectionResultObjectProvider.java (added)
+++ incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/SQLProjectionResultObjectProvider.java Wed Jul 19 14:34:44 2006
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2006 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.openjpa.jdbc.kernel;
+
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+
+import org.apache.openjpa.jdbc.meta.JavaSQLTypes;
+import org.apache.openjpa.jdbc.sql.ResultSetResult;
+import org.apache.openjpa.jdbc.sql.SQLExceptions;
+import org.apache.openjpa.kernel.ResultPacker;
+import org.apache.openjpa.lib.rop.ResultObjectProvider;
+import org.apache.openjpa.util.StoreException;
+import org.apache.openjpa.util.UnsupportedException;
+import serp.util.Numbers;
+
+/**
+ * Provides all column data in a {@link ResultSet}.
+ *
+ * @author Abe White
+ */
+class SQLProjectionResultObjectProvider
+    implements ResultObjectProvider {
+
+    private final JDBCStore _store;
+    private final JDBCFetchConfiguration _fetch;
+    private final ResultSetResult _res;
+    private final ResultPacker _packer;
+    private final int _cols;
+
+    /**
+     * Constructor.
+     *
+     * @param res the result data
+     * @param cls the result class; may be null for the default
+     */
+    public SQLProjectionResultObjectProvider(JDBCStore store,
+        JDBCFetchConfiguration fetch, ResultSetResult res, Class cls)
+        throws SQLException {
+        _store = store;
+        _fetch = fetch;
+
+        ResultSetMetaData meta = res.getResultSet().getMetaData();
+        _res = res;
+        _cols = meta.getColumnCount();
+
+        if (cls != null) {
+            String[] aliases = new String[_cols];
+            for (int i = 0; i < _cols; i++)
+                aliases[i] = meta.getColumnLabel(i + 1);
+            _packer = new ResultPacker(null, aliases, cls);
+        } else
+            _packer = null;
+    }
+
+    public boolean supportsRandomAccess() {
+        try {
+            return _res.supportsRandomAccess();
+        } catch (Throwable t) {
+            return false;
+        }
+    }
+
+    public void open() {
+    }
+
+    public Object getResultObject()
+        throws SQLException {
+        if (_cols == 1) {
+            Object val = _res.getObject(Numbers.valueOf(1),
+                JavaSQLTypes.JDBC_DEFAULT, null);
+            return (_packer == null) ? val : _packer.pack(val);
+        }
+
+        Object[] vals = new Object[_cols];
+        Integer index;
+        for (int i = 0; i < vals.length; i++)
+            vals[i] = _res.getObject(Numbers.valueOf(i + 1),
+                JavaSQLTypes.JDBC_DEFAULT, null);
+        return (_packer == null) ? vals : _packer.pack(vals);
+    }
+
+    public boolean next()
+        throws SQLException {
+        return _res.next();
+    }
+
+    public boolean absolute(int pos)
+        throws SQLException {
+        return _res.absolute(pos);
+    }
+
+    public int size()
+        throws SQLException {
+        if (_fetch.getLRSSize() == LRSSizes.SIZE_UNKNOWN
+            || !supportsRandomAccess())
+            return Integer.MAX_VALUE;
+        return _res.size();
+    }
+
+    public void reset() {
+        throw new UnsupportedException();
+    }
+
+    public void close() {
+        _res.close();
+    }
+
+    public void handleCheckedException(Exception e) {
+        if (e instanceof SQLException)
+            throw SQLExceptions.getStore((SQLException) e,
+                _store.getDBDictionary());
+        throw new StoreException(e);
+    }
+}

Propchange: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/SQLProjectionResultObjectProvider.java
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/SQLStoreQuery.java
URL: http://svn.apache.org/viewvc/incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/SQLStoreQuery.java?rev=423615&view=auto
==============================================================================
--- incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/SQLStoreQuery.java (added)
+++ incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/SQLStoreQuery.java Wed Jul 19 14:34:44 2006
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2006 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.openjpa.jdbc.kernel;
+
+import java.io.IOException;
+import java.io.StreamTokenizer;
+import java.io.StringReader;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.openjpa.jdbc.meta.ClassMapping;
+import org.apache.openjpa.jdbc.meta.MappingRepository;
+import org.apache.openjpa.jdbc.meta.QueryResultMapping;
+import org.apache.openjpa.jdbc.sql.DBDictionary;
+import org.apache.openjpa.jdbc.sql.ResultSetResult;
+import org.apache.openjpa.jdbc.sql.SQLBuffer;
+import org.apache.openjpa.jdbc.sql.SQLExceptions;
+import org.apache.openjpa.kernel.AbstractStoreQuery;
+import org.apache.openjpa.kernel.QueryContext;
+import org.apache.openjpa.kernel.StoreQuery;
+import org.apache.openjpa.lib.rop.RangeResultObjectProvider;
+import org.apache.openjpa.lib.rop.ResultObjectProvider;
+import org.apache.openjpa.lib.util.Localizer;
+import org.apache.openjpa.meta.ClassMetaData;
+import org.apache.openjpa.util.UserException;
+
+/**
+ * A SQL query.
+ *
+ * @author Abe White
+ * @nojavadoc
+ */
+public class SQLStoreQuery
+    extends AbstractStoreQuery {
+
+    private static final Localizer _loc = Localizer.forPackage
+        (SQLStoreQuery.class);
+
+    private transient final JDBCStore _store;
+
+    /**
+     * Construct a query managed by the given context.
+     */
+    public SQLStoreQuery(JDBCStore store) {
+        _store = store;
+    }
+
+    public JDBCStore getStore() {
+        return _store;
+    }
+
+    /**
+     * Utility method to substitute '?num' for parameters in the given SQL
+     * statement, and re-order the <code>params</code> array to match
+     * the order of the specified parameters.
+     */
+    private static String substituteParams(String sql, List params)
+        throws IOException {
+        // if there's no "?1" positional parameter, then we don't need to
+        // perform the parsing process
+        if (sql.indexOf("?1") == -1)
+            return sql;
+
+        List paramOrder = new ArrayList();
+        StreamTokenizer tok = new StreamTokenizer(new StringReader(sql));
+        tok.resetSyntax();
+        tok.quoteChar('\'');
+        tok.wordChars('0', '9');
+        tok.wordChars('?', '?');
+
+        StringBuffer buf = new StringBuffer(sql.length());
+        for (int ttype; (ttype = tok.nextToken()) != StreamTokenizer.TT_EOF;) {
+            switch (ttype) {
+                case StreamTokenizer.TT_WORD:
+                    // a token is a positional parameter if it starts with
+                    // a "?" and the rest of the token are all numbers
+                    if (tok.sval.startsWith("?") && tok.sval.length() > 1 &&
+                        tok.sval.substring(1).indexOf("?") == -1) {
+                        buf.append("?");
+                        paramOrder.add(Integer.valueOf(tok.sval.substring(1)));
+                    } else
+                        buf.append(tok.sval);
+                    break;
+                case'\'':
+                    buf.append('\'');
+                    if (tok.sval != null) {
+                        buf.append(tok.sval);
+                        buf.append('\'');
+                    }
+                    break;
+                default:
+                    buf.append((char) ttype);
+            }
+        }
+
+        // now go through the paramOrder list and re-order the params array
+        List translated = new ArrayList();
+        for (Iterator i = paramOrder.iterator(); i.hasNext();) {
+            int index = ((Number) i.next()).intValue() - 1;
+            if (index >= params.size())
+                throw new UserException(_loc.get("sqlquery-missing-params",
+                    sql, String.valueOf(index), params));
+            translated.add(params.get(index));
+        }
+
+        // transfer the translated list into the original params list
+        params.clear();
+        params.addAll(translated);
+        return buf.toString();
+    }
+
+    public boolean supportsParameterDeclarations() {
+        return false;
+    }
+
+    public boolean supportsDataStoreExecution() {
+        return true;
+    }
+
+    public Executor newDataStoreExecutor(ClassMetaData meta,
+        boolean subclasses) {
+        return new SQLExecutor(this, meta);
+    }
+
+    public boolean requiresCandidateType() {
+        return false;
+    }
+
+    public boolean requiresParameterDeclarations() {
+        return false;
+    }
+
+    /**
+     * Executes the filter as a SQL query.
+     */
+    private static class SQLExecutor
+        extends AbstractExecutor {
+
+        private final ClassMetaData _meta;
+        private final boolean _select;
+        private final QueryResultMapping _resultMapping;
+
+        public SQLExecutor(SQLStoreQuery q, ClassMetaData candidate) {
+            QueryContext ctx = q.getContext();
+            String resultMapping = ctx.getResultMappingName();
+            if (resultMapping == null)
+                _resultMapping = null;
+            else {
+                ClassLoader envLoader = ctx.getStoreContext().getClassLoader();
+                MappingRepository repos = q.getStore().getConfiguration().
+                    getMappingRepository();
+                _resultMapping = repos.getQueryResultMapping
+                    (ctx.getResultMappingScope(), resultMapping, envLoader,
+                        true);
+            }
+            _meta = candidate;
+
+            String sql = ctx.getQueryString();
+            if (sql != null)
+                sql = sql.trim();
+            if (sql == null || sql.length() == 0)
+                throw new UserException(_loc.get("no-sql"));
+            _select = sql.length() > 6
+                && sql.substring(0, 6).equalsIgnoreCase("select");
+        }
+
+        public ResultObjectProvider executeQuery(StoreQuery q,
+            Object[] params, boolean lrs, long startIdx, long endIdx) {
+            JDBCStore store = ((SQLStoreQuery) q).getStore();
+            DBDictionary dict = store.getDBDictionary();
+            String sql = q.getContext().getQueryString();
+
+            List paramList;
+            if (params.length > 0) {
+                paramList = new ArrayList(Arrays.asList(params));
+                try {
+                    sql = substituteParams(sql, paramList);
+                } catch (IOException ioe) {
+                    throw new UserException(ioe);
+                }
+            } else
+                paramList = Collections.EMPTY_LIST;
+
+            SQLBuffer buf = new SQLBuffer(dict).append(sql);
+            Connection conn = store.getConnection();
+            JDBCFetchConfiguration fetch = (JDBCFetchConfiguration)
+                q.getContext().getFetchConfiguration();
+
+            ResultObjectProvider rop;
+            PreparedStatement stmnt = null;
+            try {
+                // use the right method depending on sel vs. proc, lrs setting
+                if (_select && !lrs)
+                    stmnt = buf.prepareStatement(conn);
+                else if (_select)
+                    stmnt = buf.prepareStatement(conn, fetch, -1, -1);
+                else if (!lrs)
+                    stmnt = buf.prepareCall(conn);
+                else
+                    stmnt = buf.prepareCall(conn, fetch, -1, -1);
+
+                int index = 0;
+                for (Iterator i = paramList.iterator(); i.hasNext();)
+                    dict.setUnknown(stmnt, ++index, i.next(), null);
+
+                ResultSetResult res = new ResultSetResult(conn, stmnt,
+                    stmnt.executeQuery(), store);
+                if (_resultMapping != null)
+                    rop = new MappedQueryResultObjectProvider(_resultMapping,
+                        store, fetch, res);
+                else if (q.getContext().getCandidateType() != null)
+                    rop = new GenericResultObjectProvider((ClassMapping) _meta,
+                        store, fetch, res);
+                else
+                    rop = new SQLProjectionResultObjectProvider(store, fetch,
+                        res, q.getContext().getResultType());
+            } catch (SQLException se) {
+                if (stmnt != null)
+                    try {
+                        stmnt.close();
+                    } catch (SQLException se2) {
+                    }
+                try {
+                    conn.close();
+                } catch (SQLException se2) {
+                }
+                throw SQLExceptions.getStore(se, dict);
+            }
+
+            if (startIdx != 0 || endIdx != Long.MAX_VALUE)
+                rop = new RangeResultObjectProvider(rop, startIdx, endIdx);
+            return rop;
+        }
+
+        public String[] getDataStoreActions(StoreQuery q, Object[] params,
+            long startIdx, long endIdx) {
+            return new String[]{ q.getContext().getQueryString() };
+        }
+
+        public boolean isPacking(StoreQuery q) {
+            return q.getContext().getCandidateType() == null;
+        }
+    }
+}

Propchange: incubator/openjpa/trunk/openjpa-jdbc/src/main/java/org/apache/openjpa/jdbc/kernel/SQLStoreQuery.java
------------------------------------------------------------------------------
    svn:executable = *