You are viewing a plain text version of this content. The canonical link for it is here.
Posted to derby-commits@db.apache.org by jb...@apache.org on 2005/05/02 08:26:03 UTC
svn commit: r165585 [34/42] - in
/incubator/derby/code/trunk/java/client/org/apache/derby: client/
client/am/ client/net/ client/resources/ jdbc/
Modified: incubator/derby/code/trunk/java/client/org/apache/derby/client/net/NetStatementRequest.java
URL: http://svn.apache.org/viewcvs/incubator/derby/code/trunk/java/client/org/apache/derby/client/net/NetStatementRequest.java?rev=165585&r1=165584&r2=165585&view=diff
==============================================================================
--- incubator/derby/code/trunk/java/client/org/apache/derby/client/net/NetStatementRequest.java (original)
+++ incubator/derby/code/trunk/java/client/org/apache/derby/client/net/NetStatementRequest.java Sun May 1 23:25:59 2005
@@ -19,286 +19,283 @@
*/
package org.apache.derby.client.net;
-import org.apache.derby.client.am.SqlException;
-import org.apache.derby.client.am.ColumnMetaData;
-import org.apache.derby.client.am.Types;
import org.apache.derby.client.am.Blob;
import org.apache.derby.client.am.Clob;
+import org.apache.derby.client.am.ColumnMetaData;
import org.apache.derby.client.am.ResultSet;
import org.apache.derby.client.am.Section;
+import org.apache.derby.client.am.SqlException;
+import org.apache.derby.client.am.Types;
// For performance, should we worry about the ordering of our DDM command parameters
-public class NetStatementRequest extends NetPackageRequest implements StatementRequestInterface
-{
- java.util.ArrayList extdtaPositions_ = null; // Integers: build EXTDTA for column i
- int overrideLid_ = FdocaConstants.FIRST_OVERRIDE_LID;
-
- // promototed parameters hold parameters that are promotoed to a different
- // data type because they are too large to represent in PROTOCOL otherwise.
- // This currently only applies for promotion of (VAR)CHAR -> CLOB and (VAR)BINARY -> BLOB
- // The key for this structure is the parameter index. Note that having this
- // collection does not eliminate the need for extdtaPositions_ because that
- // is still needed for non-promototed LOBs
- java.util.HashMap promototedParameters_ = new java.util.HashMap ();
- NetStatementRequest (NetAgent netAgent, CcsidManager ccsidManager, int bufferSize)
- {
- super (netAgent, ccsidManager, bufferSize);
- }
- //----------------------------- entry points ---------------------------------
+public class NetStatementRequest extends NetPackageRequest implements StatementRequestInterface {
+ java.util.ArrayList extdtaPositions_ = null; // Integers: build EXTDTA for column i
+ int overrideLid_ = FdocaConstants.FIRST_OVERRIDE_LID;
+
+ // promototed parameters hold parameters that are promotoed to a different
+ // data type because they are too large to represent in PROTOCOL otherwise.
+ // This currently only applies for promotion of (VAR)CHAR -> CLOB and (VAR)BINARY -> BLOB
+ // The key for this structure is the parameter index. Note that having this
+ // collection does not eliminate the need for extdtaPositions_ because that
+ // is still needed for non-promototed LOBs
+ java.util.HashMap promototedParameters_ = new java.util.HashMap();
- // Write the message to perform an execute immediate.
- // The SQL statement sent as command data cannot contain references
- // to either input variables or output variables.
- //
- // preconditions:
- public void writeExecuteImmediate (NetStatement materialStatement,
- String sql,
- Section section) throws SqlException
- {
- buildEXCSQLIMM (section,
- false, //sendQryinsid
- 0); //qryinsid
- buildSQLSTTcommandData (sql); // statement follows in sqlstt command data object
- }
+ NetStatementRequest(NetAgent netAgent, CcsidManager ccsidManager, int bufferSize) {
+ super(netAgent, ccsidManager, bufferSize);
+ }
- // Write the message to preform a prepare into.
- // Once the SQL statement has been prepared, it is executed until the unit of work, in
- // which the PRPSQLSTT command was issued, ends. An exception to this is if
- // Keep Dynamic is being used.
- //
- // preconditions:
- public void writePrepareDescribeOutput (NetStatement materialStatement,
- String sql,
- Section section) throws SqlException
- {
- buildPRPSQLSTT (section,
- sql,
- true, //sendRtnsqlda
- true, //sendTypsqlda
- CodePoint.TYPSQLDA_X_OUTPUT); //typsqlda
+ //----------------------------- entry points ---------------------------------
- if (((NetStatement) materialStatement).statement_.cursorAttributesToSendOnPrepare_ != null)
- buildSQLATTRcommandData (((NetStatement) materialStatement).statement_.cursorAttributesToSendOnPrepare_);
+ // Write the message to perform an execute immediate.
+ // The SQL statement sent as command data cannot contain references
+ // to either input variables or output variables.
+ //
+ // preconditions:
+ public void writeExecuteImmediate(NetStatement materialStatement,
+ String sql,
+ Section section) throws SqlException {
+ buildEXCSQLIMM(section,
+ false, //sendQryinsid
+ 0); //qryinsid
+ buildSQLSTTcommandData(sql); // statement follows in sqlstt command data object
+ }
- buildSQLSTTcommandData (sql); // statement follows in sqlstt command data object
- }
+ // Write the message to preform a prepare into.
+ // Once the SQL statement has been prepared, it is executed until the unit of work, in
+ // which the PRPSQLSTT command was issued, ends. An exception to this is if
+ // Keep Dynamic is being used.
+ //
+ // preconditions:
+ public void writePrepareDescribeOutput(NetStatement materialStatement,
+ String sql,
+ Section section) throws SqlException {
+ buildPRPSQLSTT(section,
+ sql,
+ true, //sendRtnsqlda
+ true, //sendTypsqlda
+ CodePoint.TYPSQLDA_X_OUTPUT); //typsqlda
- // Write the message to perform a reprepare.
- //
- // preconditions:
- public void writePrepare (NetStatement materialStatement,
- String sql,
- Section section) throws SqlException
- {
- buildPRPSQLSTT (section,
- sql,
- false, //sendRtnsqlda
- false, //sendTypsqlda
- 0); //typsqlda
+ if (((NetStatement) materialStatement).statement_.cursorAttributesToSendOnPrepare_ != null) {
+ buildSQLATTRcommandData(((NetStatement) materialStatement).statement_.cursorAttributesToSendOnPrepare_);
+ }
- if (((NetStatement) materialStatement).statement_.cursorAttributesToSendOnPrepare_ != null)
- buildSQLATTRcommandData (((NetStatement) materialStatement).statement_.cursorAttributesToSendOnPrepare_);
+ buildSQLSTTcommandData(sql); // statement follows in sqlstt command data object
+ }
- buildSQLSTTcommandData (sql); // statement follows in sqlstt command data object
- }
+ // Write the message to perform a reprepare.
+ //
+ // preconditions:
+ public void writePrepare(NetStatement materialStatement,
+ String sql,
+ Section section) throws SqlException {
+ buildPRPSQLSTT(section,
+ sql,
+ false, //sendRtnsqlda
+ false, //sendTypsqlda
+ 0); //typsqlda
- // Write the message to execute prepared sql statement.
- //
- // preconditions:
- public void writeExecute (NetPreparedStatement materialPreparedStatement,
- Section section,
- ColumnMetaData parameterMetaData,
- Object[] inputs,
- int numInputColumns,
- boolean outputExpected,
- boolean chained
- ) throws SqlException // chained flag for blobs only //dupqry
- {
- buildEXCSQLSTT (section,
- true, // sendOutexp
- outputExpected, // outexp
- false, // sendPrcnam
- null, // prcnam
- false, // sendQryblksz
- false, // sendMaxrslcnt,
- 0, // maxrslcnt,
- false, // sendMaxblkext
- 0, // maxblkext
- false, // sendRslsetflg
- 0, // resultSetFlag
- false, // sendQryrowset
- 0); // qryrowset
-
- if (numInputColumns > 0) {
- if ((extdtaPositions_ != null) && (!extdtaPositions_.isEmpty()))
- extdtaPositions_.clear(); // reset extdta column position markers
-
- boolean overrideExists = buildSQLDTAcommandData (numInputColumns,
- parameterMetaData,
- inputs);
+ if (((NetStatement) materialStatement).statement_.cursorAttributesToSendOnPrepare_ != null) {
+ buildSQLATTRcommandData(((NetStatement) materialStatement).statement_.cursorAttributesToSendOnPrepare_);
+ }
- // can we eleminate the chain argument needed for lobs
- buildEXTDTA (parameterMetaData, inputs, chained);
+ buildSQLSTTcommandData(sql); // statement follows in sqlstt command data object
}
- }
+ // Write the message to execute prepared sql statement.
+ //
+ // preconditions:
+ public void writeExecute(NetPreparedStatement materialPreparedStatement,
+ Section section,
+ ColumnMetaData parameterMetaData,
+ Object[] inputs,
+ int numInputColumns,
+ boolean outputExpected,
+ boolean chained) throws SqlException // chained flag for blobs only //dupqry
+ {
+ buildEXCSQLSTT(section,
+ true, // sendOutexp
+ outputExpected, // outexp
+ false, // sendPrcnam
+ null, // prcnam
+ false, // sendQryblksz
+ false, // sendMaxrslcnt,
+ 0, // maxrslcnt,
+ false, // sendMaxblkext
+ 0, // maxblkext
+ false, // sendRslsetflg
+ 0, // resultSetFlag
+ false, // sendQryrowset
+ 0); // qryrowset
+
+ if (numInputColumns > 0) {
+ if ((extdtaPositions_ != null) && (!extdtaPositions_.isEmpty())) {
+ extdtaPositions_.clear(); // reset extdta column position markers
+ }
+
+ boolean overrideExists = buildSQLDTAcommandData(numInputColumns,
+ parameterMetaData,
+ inputs);
- // Write the message to open a bound or prepared query with input parameters.
- // Check this -> For open query with input parameters
- //
- // preconditions:
- public void writeOpenQuery (NetPreparedStatement materialPreparedStatement,
- Section section,
- int fetchSize,
- int resultSetType,
- int numInputColumns,
- org.apache.derby.client.am.ColumnMetaData parameterMetaData,
- Object[] inputs) throws SqlException
- {
- boolean sendQryrowset = checkSendQryrowset (fetchSize, resultSetType);
- fetchSize = checkFetchsize (fetchSize, resultSetType);
- // think about if there is a way we can call build ddm just passing ddm parameters and not passing the material ps object
- // maybe not, if sometimes we need to set the caches hanging off the ps object during the ddm build
- // maybe we can extricate conditionals in the build ddm logic outside
-
- buildOPNQRY (section,
- sendQryrowset,
- fetchSize);
-
-
- // may be able to merge this with firstContinueQuery_ and push above conditional to common
- ((NetStatement) materialPreparedStatement).qryrowsetSentOnOpnqry_ = sendQryrowset;
-
- if (numInputColumns > 0) {
- if ((extdtaPositions_ != null) && (!extdtaPositions_.isEmpty()))
- extdtaPositions_.clear(); // reset extdta column position markers
- // is this the best place for this
- // EXCSQSTT needs this too
-
- // think about having this method return a boolean to
- // indicate the extdta should be built
- boolean overrideExists = buildSQLDTAcommandData (numInputColumns,
- parameterMetaData,
- inputs);
-
- // can we eleminate the chain argument needed for lobs
- // do we chain after Extdta's on open, verify this
- buildEXTDTA (parameterMetaData,
- inputs,
- false); //chained, do we chain after Extdta's on open
+ // can we eleminate the chain argument needed for lobs
+ buildEXTDTA(parameterMetaData, inputs, chained);
+ }
}
- }
- // Write the message to open a bound or prepared query without input parameters.
- // Check this-> For open query without input parameters
- public void writeOpenQuery (NetStatement materialStatement,
- Section section,
- int fetchSize,
- int resultSetType) throws SqlException
- {
- boolean sendQryrowset = checkSendQryrowset (fetchSize, resultSetType);
- fetchSize = checkFetchsize (fetchSize, resultSetType);
- // think about if there is a way we can call build ddm just passing ddm parameters and not passing the material ps object
- // maybe not, if sometimes we need to set the caches hanging off the ps object during the ddm build
- // maybe we can extricate conditionals in the build ddm logic outside
- buildOPNQRY (section,
- sendQryrowset,
- fetchSize);
+ // Write the message to open a bound or prepared query with input parameters.
+ // Check this -> For open query with input parameters
+ //
+ // preconditions:
+ public void writeOpenQuery(NetPreparedStatement materialPreparedStatement,
+ Section section,
+ int fetchSize,
+ int resultSetType,
+ int numInputColumns,
+ org.apache.derby.client.am.ColumnMetaData parameterMetaData,
+ Object[] inputs) throws SqlException {
+ boolean sendQryrowset = checkSendQryrowset(fetchSize, resultSetType);
+ fetchSize = checkFetchsize(fetchSize, resultSetType);
+ // think about if there is a way we can call build ddm just passing ddm parameters and not passing the material ps object
+ // maybe not, if sometimes we need to set the caches hanging off the ps object during the ddm build
+ // maybe we can extricate conditionals in the build ddm logic outside
+
+ buildOPNQRY(section,
+ sendQryrowset,
+ fetchSize);
+
+
+ // may be able to merge this with firstContinueQuery_ and push above conditional to common
+ ((NetStatement) materialPreparedStatement).qryrowsetSentOnOpnqry_ = sendQryrowset;
+
+ if (numInputColumns > 0) {
+ if ((extdtaPositions_ != null) && (!extdtaPositions_.isEmpty())) {
+ extdtaPositions_.clear(); // reset extdta column position markers
+ }
+ // is this the best place for this
+ // EXCSQSTT needs this too
+
+ // think about having this method return a boolean to
+ // indicate the extdta should be built
+ boolean overrideExists = buildSQLDTAcommandData(numInputColumns,
+ parameterMetaData,
+ inputs);
+
+ // can we eleminate the chain argument needed for lobs
+ // do we chain after Extdta's on open, verify this
+ buildEXTDTA(parameterMetaData,
+ inputs,
+ false); //chained, do we chain after Extdta's on open
+ }
+ }
+ // Write the message to open a bound or prepared query without input parameters.
+ // Check this-> For open query without input parameters
+ public void writeOpenQuery(NetStatement materialStatement,
+ Section section,
+ int fetchSize,
+ int resultSetType) throws SqlException {
+ boolean sendQryrowset = checkSendQryrowset(fetchSize, resultSetType);
+ fetchSize = checkFetchsize(fetchSize, resultSetType);
+
+ // think about if there is a way we can call build ddm just passing ddm parameters and not passing the material ps object
+ // maybe not, if sometimes we need to set the caches hanging off the ps object during the ddm build
+ // maybe we can extricate conditionals in the build ddm logic outside
+ buildOPNQRY(section,
+ sendQryrowset,
+ fetchSize);
- // may be able to merge this with firstContinueQuery_ and push above conditional to common
- ((NetStatement) materialStatement).qryrowsetSentOnOpnqry_ = sendQryrowset; // net-specific event
+ // may be able to merge this with firstContinueQuery_ and push above conditional to common
+ ((NetStatement) materialStatement).qryrowsetSentOnOpnqry_ = sendQryrowset; // net-specific event
- }
- // Write the message to peform a describe input.
- //
+ }
- public void writeDescribeInput (NetPreparedStatement materialPreparedStatement,
- Section section) throws SqlException
- {
- int typsqlda = CodePoint.TYPSQLDA_X_INPUT;
+ // Write the message to peform a describe input.
+ //
- buildDSCSQLSTT (section,
- true, //sendTypsqlda
- typsqlda);
- }
+ public void writeDescribeInput(NetPreparedStatement materialPreparedStatement,
+ Section section) throws SqlException {
+ int typsqlda = CodePoint.TYPSQLDA_X_INPUT;
+
+ buildDSCSQLSTT(section,
+ true, //sendTypsqlda
+ typsqlda);
+ }
- // Write the message to peform a describe output.
- //
- // preconditions:
- public void writeDescribeOutput (NetPreparedStatement materialPreparedStatement,
- Section section) throws SqlException
- {
- // pick standard, light, extended sqlda. possibly push this up even more
- // right now use SQLAM level as determining factor and go for the most data.
- // if standard is the only suported option, don't send the typsqlda
- // and let server default to standard. This prevents accidentally sending
- // a typsqlda to a downlevel server. typsqlda is only supported at sqlam 6.
+ // Write the message to peform a describe output.
+ //
+ // preconditions:
+ public void writeDescribeOutput(NetPreparedStatement materialPreparedStatement,
+ Section section) throws SqlException {
+ // pick standard, light, extended sqlda. possibly push this up even more
+ // right now use SQLAM level as determining factor and go for the most data.
+ // if standard is the only suported option, don't send the typsqlda
+ // and let server default to standard. This prevents accidentally sending
+ // a typsqlda to a downlevel server. typsqlda is only supported at sqlam 6.
//KATHEY CHECK
- buildDSCSQLSTT (section,
- true, //sendTypsqlda
- CodePoint.TYPSQLDA_X_OUTPUT); //typsqlda
- }
+ buildDSCSQLSTT(section,
+ true, //sendTypsqlda
+ CodePoint.TYPSQLDA_X_OUTPUT); //typsqlda
+ }
- // Write the message to execute a stored procedure.
- //
- // preconditions:
- public void writeExecuteCall (NetStatement materialStatement,
- boolean outputExpected,
- String procedureName,
- Section section,
- int fetchSize,
- boolean suppressResultSets, // for batch updates == true
- int resultSetType,
- ColumnMetaData parameterMetaData,
- Object[] inputs) throws SqlException // chain is for blobs
- {
- // always send QRYROWSET on EXCSQLSTT
- boolean sendQryrowset = true;
- fetchSize = (fetchSize == 0) ? org.apache.derby.client.am.Configuration.defaultFetchSize : fetchSize;
-
- boolean sendPrcnam = (procedureName != null) ? true : false;
- int numParameters = (parameterMetaData != null) ? parameterMetaData.columns_ : 0;
- outputExpected = numParameters > 0;
-
- // is it right here to send maxblkext (-1)
- buildEXCSQLSTT (section,
- true, // sendOutexp
- outputExpected, // outexp
- sendPrcnam, // sendPrcnam
- procedureName, // prcnam
- true, // sendQryblksz
- !suppressResultSets, // sendMaxrslcnt,
- CodePoint.MAXRSLCNT_NOLIMIT, // maxrslcnt,
- true, // sendMaxblkext
- -1, // maxblkext (-1 for AR capable of receiving entire result set)
- true, // sendRslsetflg
- calculateResultSetFlags(), // resultSetFlag
- sendQryrowset, // sendQryrowset
- fetchSize); // qryrowset
-
- if (numParameters > 0) {
- if ((extdtaPositions_ != null) && (!extdtaPositions_.isEmpty()))
- extdtaPositions_.clear(); // reset extdta column position markers
- // is this the best place for this (OPNQRY needs this too)
-
- // think about having this method return a boolean to
- // indicate the extdta should be built
- boolean overrideExists = buildSQLDTAcommandData (numParameters,
- parameterMetaData,
- inputs);
+ // Write the message to execute a stored procedure.
+ //
+ // preconditions:
+ public void writeExecuteCall(NetStatement materialStatement,
+ boolean outputExpected,
+ String procedureName,
+ Section section,
+ int fetchSize,
+ boolean suppressResultSets, // for batch updates == true
+ int resultSetType,
+ ColumnMetaData parameterMetaData,
+ Object[] inputs) throws SqlException // chain is for blobs
+ {
+ // always send QRYROWSET on EXCSQLSTT
+ boolean sendQryrowset = true;
+ fetchSize = (fetchSize == 0) ? org.apache.derby.client.am.Configuration.defaultFetchSize : fetchSize;
+
+ boolean sendPrcnam = (procedureName != null) ? true : false;
+ int numParameters = (parameterMetaData != null) ? parameterMetaData.columns_ : 0;
+ outputExpected = numParameters > 0;
+
+ // is it right here to send maxblkext (-1)
+ buildEXCSQLSTT(section,
+ true, // sendOutexp
+ outputExpected, // outexp
+ sendPrcnam, // sendPrcnam
+ procedureName, // prcnam
+ true, // sendQryblksz
+ !suppressResultSets, // sendMaxrslcnt,
+ CodePoint.MAXRSLCNT_NOLIMIT, // maxrslcnt,
+ true, // sendMaxblkext
+ -1, // maxblkext (-1 for AR capable of receiving entire result set)
+ true, // sendRslsetflg
+ calculateResultSetFlags(), // resultSetFlag
+ sendQryrowset, // sendQryrowset
+ fetchSize); // qryrowset
+
+ if (numParameters > 0) {
+ if ((extdtaPositions_ != null) && (!extdtaPositions_.isEmpty())) {
+ extdtaPositions_.clear(); // reset extdta column position markers
+ }
+ // is this the best place for this (OPNQRY needs this too)
+
+ // think about having this method return a boolean to
+ // indicate the extdta should be built
+ boolean overrideExists = buildSQLDTAcommandData(numParameters,
+ parameterMetaData,
+ inputs);
- buildEXTDTA (parameterMetaData, inputs, false); // no chained autocommit for CALLs
- }
+ buildEXTDTA(parameterMetaData, inputs, false); // no chained autocommit for CALLs
+ }
- ((NetStatement) materialStatement).qryrowsetSentOnOpnqry_ = sendQryrowset;
- }
+ ((NetStatement) materialStatement).qryrowsetSentOnOpnqry_ = sendQryrowset;
+ }
- // Write the message to execute an SQL Set Statement.
+ // Write the message to execute an SQL Set Statement.
/*
public void writeSetGenericSQLSetInfo (org.apache.derby.client.am.SetGenericSQLSetPiggybackCommand setGenericSQLSetPiggybackCommand,
org.apache.derby.client.am.JDBCSection section) throws SqlException
@@ -314,1267 +311,1240 @@
}
*/
- //----------------------helper methods----------------------------------------
- // These methods are "private protected", which is not a recognized java privilege,
- // but means that these methods are private to this class and to subclasses,
- // and should not be used as package-wide friendly methods.
-
- // Build the Open Query Command to open a query to a relational database.
- // At SQLAM >= 7 we can request the return of a DA, are there
- // scenarios where this should currently be done (it is not supported now)
- //
- // preconditions:
- // the sqlam and/or prdid must support command and parameters passed to this method,
- // method will not validate against the connection's level of support
- //
- void buildOPNQRY (Section section,
- boolean sendQueryRowSet,
- int fetchSize
- ) throws SqlException
- {
- createCommand ();
- markLengthBytes (CodePoint.OPNQRY);
-
- buildPKGNAMCSN (section);
- buildQRYBLKSZ(); // specify a hard coded query block size
+ //----------------------helper methods----------------------------------------
+ // These methods are "private protected", which is not a recognized java privilege,
+ // but means that these methods are private to this class and to subclasses,
+ // and should not be used as package-wide friendly methods.
+
+ // Build the Open Query Command to open a query to a relational database.
+ // At SQLAM >= 7 we can request the return of a DA, are there
+ // scenarios where this should currently be done (it is not supported now)
+ //
+ // preconditions:
+ // the sqlam and/or prdid must support command and parameters passed to this method,
+ // method will not validate against the connection's level of support
+ //
+ void buildOPNQRY(Section section,
+ boolean sendQueryRowSet,
+ int fetchSize) throws SqlException {
+ createCommand();
+ markLengthBytes(CodePoint.OPNQRY);
+
+ buildPKGNAMCSN(section);
+ buildQRYBLKSZ(); // specify a hard coded query block size
+
+ if (sendQueryRowSet) {
+ buildMAXBLKEXT(-1);
+ buildQRYROWSET(fetchSize);
+ }
- if (sendQueryRowSet) {
- buildMAXBLKEXT (-1);
- buildQRYROWSET (fetchSize);
+ updateLengthBytes(); // opnqry is complete
}
- updateLengthBytes(); // opnqry is complete
- }
-
- // Build the Execute Immediate SQL Statement Command to
- // execute a non-cursor SQL statement sent as command data.
- //
- // precondtions:
- void buildEXCSQLIMM (Section section,
- boolean sendQryinsid,
- long qryinsid) throws SqlException
- {
- createCommand ();
- markLengthBytes (CodePoint.EXCSQLIMM);
-
- buildPKGNAMCSN (section);
- buildRDBCMTOK();
- if (sendQryinsid) buildQRYINSID (qryinsid);
-
- updateLengthBytes();
- }
-
- // Build the Prepare SQL Statement Command to dynamically binds an
- // SQL statement to a section in an existing relational database (RDB) package.
- //
- // preconditions:
- // the sqlam and/or prdid must support command and parameters passed to this method,
- // method will not validate against the connection's level of support
- void buildPRPSQLSTT (Section section,
- String sql,
- boolean sendRtnsqlda,
- boolean sendTypsqlda,
- int typsqlda
- ) throws SqlException
- {
- createCommand ();
- markLengthBytes (CodePoint.PRPSQLSTT);
-
- buildPKGNAMCSN (section);
- if (sendRtnsqlda) buildRTNSQLDA();
- if (sendTypsqlda) buildTYPSQLDA (typsqlda);
-
- updateLengthBytes();
- }
-
- // Build the command to execute an SQL SET Statement.
- // Called by NETSetClientPiggybackCommand.write()
- //
- // preconditions:
- // the sqlam and/or prdid must support command and parameters passed to this method,
- // method will not validate against the connection's level of support
- void buildEXCSQLSET (Section section)
- throws SqlException
- {
- createCommand ();
- markLengthBytes (CodePoint.EXCSQLSET);
- buildPKGNAMCSN (section); // is this PKGNAMCSN or PKGNAMCT
- updateLengthBytes();
- }
-
- // Build the Execute SQL Statement (EXCSQLSTT) Command
- // to execute a non-cursor SQL statement previously bound into a named package
- // of a relational database (RDB). The SQL statement can optionally include
- // references to input variables, output variables, or both.
- //
- // At SQLAM >= 7 we can get a DA back on this, are there times that we want to request it
- // If so, we probably want to pass a parameter indicating the sqldaLevel requested.
- //
- // preconditions:
- // the sqlam and/or prdid must support command and parameters passed to this method,
- // method will not validate against the connection's level of support
- // Here is the preferred codepoint ordering:
- // PKGNAMCSN
- // RDBCMTOK
- // OUTEXP
- // QRYBLKSZ
- // MAXBLKEXT
- // MAXRSLCNT
- // RSLSETFLG
- // QRYROWSET
- // RTNSQLDA
- // TYPSQLDA
- // NBRROW
- // ATMIND
- // PRCNAM
- // OUTOVROPT
- // RDBNAM
- void buildEXCSQLSTT (Section section,
- boolean sendOutexp,
- boolean outexp,
- boolean sendPrcnam,
- String prcnam,
- boolean sendQryblksz,
- boolean sendMaxrslcnt,
- int maxrslcnt,
- boolean sendMaxblkext,
- int maxblkext,
- boolean sendRslsetflg,
- int resultSetFlag,
- boolean sendQryrowset,
- int qryrowset
- ) throws SqlException
- {
- createCommand ();
- markLengthBytes (CodePoint.EXCSQLSTT);
-
- buildPKGNAMCSN (section);
- buildRDBCMTOK();
- if (sendOutexp) buildOUTEXP (outexp);
- if (sendQryblksz) buildQRYBLKSZ();
- if (sendQryrowset && sendMaxblkext) buildMAXBLKEXT (maxblkext);
- if (sendMaxrslcnt) buildMAXRSLCNT (maxrslcnt);
- if (sendRslsetflg) buildRSLSETFLG (resultSetFlag);
- if (sendQryrowset) buildQRYROWSET (qryrowset);
- if (sendPrcnam) buildPRCNAM (prcnam);
-
- updateLengthBytes(); // command is complete, update the length bytes
- }
-
- // Build the Describe SQL Statement command.
- //
- // preconditions:
- // the sqlam and/or prdid must support command and parameters passed to this method,
- // method will not validate against the connection's level of support
- void buildDSCSQLSTT (Section section,
- boolean sendTypsqlda,
- int typsqlda
- ) throws SqlException
- {
- createCommand ();
- markLengthBytes (CodePoint.DSCSQLSTT);
-
- buildPKGNAMCSN (section);
- if (sendTypsqlda) buildTYPSQLDA (typsqlda);
-
- updateLengthBytes();
- }
-
- // Build the SQL Program Variable Data Command Data Object.
- // This object contains the input data to an SQL statement
- // that an RDB is executing.
- //
- // preconditions:
- boolean buildSQLDTAcommandData (int numInputColumns,
- ColumnMetaData parameterMetaData,
- Object[] inputRow) throws SqlException
- {
- createEncryptedCommandData();
+ // Build the Execute Immediate SQL Statement Command to
+ // execute a non-cursor SQL statement sent as command data.
+ //
+ // precondtions:
+ void buildEXCSQLIMM(Section section,
+ boolean sendQryinsid,
+ long qryinsid) throws SqlException {
+ createCommand();
+ markLengthBytes(CodePoint.EXCSQLIMM);
+
+ buildPKGNAMCSN(section);
+ buildRDBCMTOK();
+ if (sendQryinsid) {
+ buildQRYINSID(qryinsid);
+ }
- int loc = offset_;
+ updateLengthBytes();
+ }
- markLengthBytes (CodePoint.SQLDTA);
+ // Build the Prepare SQL Statement Command to dynamically binds an
+ // SQL statement to a section in an existing relational database (RDB) package.
+ //
+ // preconditions:
+ // the sqlam and/or prdid must support command and parameters passed to this method,
+ // method will not validate against the connection's level of support
+ void buildPRPSQLSTT(Section section,
+ String sql,
+ boolean sendRtnsqlda,
+ boolean sendTypsqlda,
+ int typsqlda) throws SqlException {
+ createCommand();
+ markLengthBytes(CodePoint.PRPSQLSTT);
+
+ buildPKGNAMCSN(section);
+ if (sendRtnsqlda) {
+ buildRTNSQLDA();
+ }
+ if (sendTypsqlda) {
+ buildTYPSQLDA(typsqlda);
+ }
- int[][] protocolTypesAndLengths = allocateLidAndLengthsArray (parameterMetaData);
+ updateLengthBytes();
+ }
- java.util.Hashtable protocolTypeToOverrideLidMapping = null;
- java.util.ArrayList mddOverrideArray = null;
- protocolTypeToOverrideLidMapping =
- computeProtocolTypesAndLengths (inputRow, parameterMetaData, protocolTypesAndLengths,
- protocolTypeToOverrideLidMapping) ;
+ // Build the command to execute an SQL SET Statement.
+ // Called by NETSetClientPiggybackCommand.write()
+ //
+ // preconditions:
+ // the sqlam and/or prdid must support command and parameters passed to this method,
+ // method will not validate against the connection's level of support
+ void buildEXCSQLSET(Section section)
+ throws SqlException {
+ createCommand();
+ markLengthBytes(CodePoint.EXCSQLSET);
+ buildPKGNAMCSN(section); // is this PKGNAMCSN or PKGNAMCT
+ updateLengthBytes();
+ }
- boolean overrideExists = false;
+ // Build the Execute SQL Statement (EXCSQLSTT) Command
+ // to execute a non-cursor SQL statement previously bound into a named package
+ // of a relational database (RDB). The SQL statement can optionally include
+ // references to input variables, output variables, or both.
+ //
+ // At SQLAM >= 7 we can get a DA back on this, are there times that we want to request it
+ // If so, we probably want to pass a parameter indicating the sqldaLevel requested.
+ //
+ // preconditions:
+ // the sqlam and/or prdid must support command and parameters passed to this method,
+ // method will not validate against the connection's level of support
+ // Here is the preferred codepoint ordering:
+ // PKGNAMCSN
+ // RDBCMTOK
+ // OUTEXP
+ // QRYBLKSZ
+ // MAXBLKEXT
+ // MAXRSLCNT
+ // RSLSETFLG
+ // QRYROWSET
+ // RTNSQLDA
+ // TYPSQLDA
+ // NBRROW
+ // ATMIND
+ // PRCNAM
+ // OUTOVROPT
+ // RDBNAM
+ void buildEXCSQLSTT(Section section,
+ boolean sendOutexp,
+ boolean outexp,
+ boolean sendPrcnam,
+ String prcnam,
+ boolean sendQryblksz,
+ boolean sendMaxrslcnt,
+ int maxrslcnt,
+ boolean sendMaxblkext,
+ int maxblkext,
+ boolean sendRslsetflg,
+ int resultSetFlag,
+ boolean sendQryrowset,
+ int qryrowset) throws SqlException {
+ createCommand();
+ markLengthBytes(CodePoint.EXCSQLSTT);
+
+ buildPKGNAMCSN(section);
+ buildRDBCMTOK();
+ if (sendOutexp) {
+ buildOUTEXP(outexp);
+ }
+ if (sendQryblksz) {
+ buildQRYBLKSZ();
+ }
+ if (sendQryrowset && sendMaxblkext) {
+ buildMAXBLKEXT(maxblkext);
+ }
+ if (sendMaxrslcnt) {
+ buildMAXRSLCNT(maxrslcnt);
+ }
+ if (sendRslsetflg) {
+ buildRSLSETFLG(resultSetFlag);
+ }
+ if (sendQryrowset) {
+ buildQRYROWSET(qryrowset);
+ }
+ if (sendPrcnam) {
+ buildPRCNAM(prcnam);
+ }
- buildFDODSC (numInputColumns,
- protocolTypesAndLengths,
- overrideExists,
- protocolTypeToOverrideLidMapping,
- mddOverrideArray);
+ updateLengthBytes(); // command is complete, update the length bytes
+ }
- buildFDODTA (numInputColumns,
- protocolTypesAndLengths,
- inputRow);
+ // Build the Describe SQL Statement command.
+ //
+ // preconditions:
+ // the sqlam and/or prdid must support command and parameters passed to this method,
+ // method will not validate against the connection's level of support
+ void buildDSCSQLSTT(Section section,
+ boolean sendTypsqlda,
+ int typsqlda) throws SqlException {
+ createCommand();
+ markLengthBytes(CodePoint.DSCSQLSTT);
+
+ buildPKGNAMCSN(section);
+ if (sendTypsqlda) {
+ buildTYPSQLDA(typsqlda);
+ }
- updateLengthBytes(); // for sqldta
- if (netAgent_.netConnection_.getSecurityMechanism() ==
- NetConfiguration.SECMEC_EUSRIDDTA ||
- netAgent_.netConnection_.getSecurityMechanism() ==
- NetConfiguration.SECMEC_EUSRPWDDTA)
- encryptDataStream(loc);
+ updateLengthBytes();
+ }
- return overrideExists;
- }
+ // Build the SQL Program Variable Data Command Data Object.
+ // This object contains the input data to an SQL statement
+ // that an RDB is executing.
+ //
+ // preconditions:
+ boolean buildSQLDTAcommandData(int numInputColumns,
+ ColumnMetaData parameterMetaData,
+ Object[] inputRow) throws SqlException {
+ createEncryptedCommandData();
+
+ int loc = offset_;
+
+ markLengthBytes(CodePoint.SQLDTA);
+
+ int[][] protocolTypesAndLengths = allocateLidAndLengthsArray(parameterMetaData);
+
+ java.util.Hashtable protocolTypeToOverrideLidMapping = null;
+ java.util.ArrayList mddOverrideArray = null;
+ protocolTypeToOverrideLidMapping =
+ computeProtocolTypesAndLengths(inputRow, parameterMetaData, protocolTypesAndLengths,
+ protocolTypeToOverrideLidMapping);
+
+ boolean overrideExists = false;
+
+ buildFDODSC(numInputColumns,
+ protocolTypesAndLengths,
+ overrideExists,
+ protocolTypeToOverrideLidMapping,
+ mddOverrideArray);
+
+ buildFDODTA(numInputColumns,
+ protocolTypesAndLengths,
+ inputRow);
+
+ updateLengthBytes(); // for sqldta
+ if (netAgent_.netConnection_.getSecurityMechanism() ==
+ NetConfiguration.SECMEC_EUSRIDDTA ||
+ netAgent_.netConnection_.getSecurityMechanism() ==
+ NetConfiguration.SECMEC_EUSRPWDDTA) {
+ encryptDataStream(loc);
+ }
- // Build the FDOCA Data Descriptor Scalar whose value is a FDOCA
- // Descriptor or a segment of an FDOCA Descriptor.
- //
- // preconditions:
- private void buildFDODSC (int numColumns,
- int[][] protocolTypesAndLengths,
- boolean overrideExists,
- java.util.Hashtable overrideMap,
- java.util.ArrayList overrideArray) throws SqlException
- {
- markLengthBytes (CodePoint.FDODSC);
- buildSQLDTA (numColumns, protocolTypesAndLengths, overrideExists, overrideMap, overrideArray);
- updateLengthBytes();
- }
+ return overrideExists;
+ }
- // Build the FDOCA SQLDTA Late Row Descriptor.
- //
- // preconditions:
- protected void buildSQLDTA (int numColumns,
- int[][] lidAndLengthOverrides,
- boolean overrideExists,
- java.util.Hashtable overrideMap,
- java.util.ArrayList overrideArray) throws SqlException
- {
- // mdd overrides need to be built first if any before the descriptors are built.
- if (overrideExists) {
- buildMddOverrides (overrideArray);
- writeBytes (FdocaConstants.MDD_SQLDTAGRP_TOSEND);
+ // Build the FDOCA Data Descriptor Scalar whose value is a FDOCA
+ // Descriptor or a segment of an FDOCA Descriptor.
+ //
+ // preconditions:
+ private void buildFDODSC(int numColumns,
+ int[][] protocolTypesAndLengths,
+ boolean overrideExists,
+ java.util.Hashtable overrideMap,
+ java.util.ArrayList overrideArray) throws SqlException {
+ markLengthBytes(CodePoint.FDODSC);
+ buildSQLDTA(numColumns, protocolTypesAndLengths, overrideExists, overrideMap, overrideArray);
+ updateLengthBytes();
}
- buildSQLDTAGRP (numColumns, lidAndLengthOverrides, overrideExists, overrideMap);
+ // Build the FDOCA SQLDTA Late Row Descriptor.
+ //
+ // preconditions:
+ protected void buildSQLDTA(int numColumns,
+ int[][] lidAndLengthOverrides,
+ boolean overrideExists,
+ java.util.Hashtable overrideMap,
+ java.util.ArrayList overrideArray) throws SqlException {
+ // mdd overrides need to be built first if any before the descriptors are built.
+ if (overrideExists) {
+ buildMddOverrides(overrideArray);
+ writeBytes(FdocaConstants.MDD_SQLDTAGRP_TOSEND);
+ }
- if (overrideExists) writeBytes (FdocaConstants.MDD_SQLDTA_TOSEND);
- writeBytes (FdocaConstants.SQLDTA_RLO_TOSEND);
- }
+ buildSQLDTAGRP(numColumns, lidAndLengthOverrides, overrideExists, overrideMap);
- // Build the FDOCA SQLDTAGRP Late Group Descriptor.
- // preconditions:
- protected void buildSQLDTAGRP (int numVars,
- int[][] lidAndLengthOverrides,
- boolean mddRequired,
- java.util.Hashtable overrideMap) throws SqlException
- {
- int n = 0;
- int offset = 0;
+ if (overrideExists) {
+ writeBytes(FdocaConstants.MDD_SQLDTA_TOSEND);
+ }
+ writeBytes(FdocaConstants.SQLDTA_RLO_TOSEND);
+ }
- n = calculateColumnsInSQLDTAGRPtriplet (numVars);
- buildTripletHeader (((3 * n) + 3),
- FdocaConstants.NGDA_TRIPLET_TYPE,
- FdocaConstants.SQLDTAGRP_LID);
-
- do {
- writeLidAndLengths (lidAndLengthOverrides, n, offset, mddRequired, overrideMap);
- numVars -= n;
- if (numVars == 0) break;
-
- offset += n;
- n = calculateColumnsInSQLDTAGRPtriplet (numVars);
- buildTripletHeader (((3 * n) + 3),
- FdocaConstants.CPT_TRIPLET_TYPE,
- 0x00);
- } while (true);
- }
+ // Build the FDOCA SQLDTAGRP Late Group Descriptor.
+ // preconditions:
+ protected void buildSQLDTAGRP(int numVars,
+ int[][] lidAndLengthOverrides,
+ boolean mddRequired,
+ java.util.Hashtable overrideMap) throws SqlException {
+ int n = 0;
+ int offset = 0;
+
+ n = calculateColumnsInSQLDTAGRPtriplet(numVars);
+ buildTripletHeader(((3 * n) + 3),
+ FdocaConstants.NGDA_TRIPLET_TYPE,
+ FdocaConstants.SQLDTAGRP_LID);
+
+ do {
+ writeLidAndLengths(lidAndLengthOverrides, n, offset, mddRequired, overrideMap);
+ numVars -= n;
+ if (numVars == 0) {
+ break;
+ }
+
+ offset += n;
+ n = calculateColumnsInSQLDTAGRPtriplet(numVars);
+ buildTripletHeader(((3 * n) + 3),
+ FdocaConstants.CPT_TRIPLET_TYPE,
+ 0x00);
+ } while (true);
+ }
/////////// perf end
- protected void buildOUTOVR (ResultSet resultSet,
- ColumnMetaData resultSetMetaData) throws SqlException
- {
- createCommandData();
- markLengthBytes (CodePoint.OUTOVR);
- int[][] outputOverrides =
- calculateOUTOVRLidAndLengthOverrides (resultSet, resultSetMetaData);
- buildSQLDTARD (resultSetMetaData.columns_, outputOverrides);
- updateLengthBytes();
- }
-
- private int[][] calculateOUTOVRLidAndLengthOverrides (ResultSet resultSet,
- ColumnMetaData resultSetMetaData)
- {
- int numVars = resultSetMetaData.columns_;
- int[][] lidAndLengths = new int[numVars][2]; //everything initialized to "default triplet"
+ protected void buildOUTOVR(ResultSet resultSet,
+ ColumnMetaData resultSetMetaData) throws SqlException {
+ createCommandData();
+ markLengthBytes(CodePoint.OUTOVR);
+ int[][] outputOverrides =
+ calculateOUTOVRLidAndLengthOverrides(resultSet, resultSetMetaData);
+ buildSQLDTARD(resultSetMetaData.columns_, outputOverrides);
+ updateLengthBytes();
+ }
- return lidAndLengths;
- }
+ private int[][] calculateOUTOVRLidAndLengthOverrides(ResultSet resultSet,
+ ColumnMetaData resultSetMetaData) {
+ int numVars = resultSetMetaData.columns_;
+ int[][] lidAndLengths = new int[numVars][2]; //everything initialized to "default triplet"
- protected void buildSQLDTARD (int numColumns, int[][] lidAndLengthOverrides) throws SqlException
- {
- buildSQLCADTA (numColumns, lidAndLengthOverrides);
- writeBytes (FdocaConstants.SQLDTARD_RLO_TOSEND);
- }
+ return lidAndLengths;
+ }
- protected void buildSQLCADTA (int numColumns, int[][] lidAndLengthOverrides) throws SqlException
- {
- buildSQLDTAGRP (numColumns, lidAndLengthOverrides, false, null); // false means no mdd override
- writeBytes (FdocaConstants.SQLCADTA_RLO_TOSEND);
- }
+ protected void buildSQLDTARD(int numColumns, int[][] lidAndLengthOverrides) throws SqlException {
+ buildSQLCADTA(numColumns, lidAndLengthOverrides);
+ writeBytes(FdocaConstants.SQLDTARD_RLO_TOSEND);
+ }
- private void buildFDODTA (int numVars,
- int[][] protocolTypesAndLengths,
- Object[] inputs
- ) throws SqlException
- {
- long dataLength = 0;
- Object o = null;
+ protected void buildSQLCADTA(int numColumns, int[][] lidAndLengthOverrides) throws SqlException {
+ buildSQLDTAGRP(numColumns, lidAndLengthOverrides, false, null); // false means no mdd override
+ writeBytes(FdocaConstants.SQLCADTA_RLO_TOSEND);
+ }
- markLengthBytes (CodePoint.FDODTA);
- write1Byte (FdocaConstants.NULL_LID); // write the 1-byte row indicator
+ private void buildFDODTA(int numVars,
+ int[][] protocolTypesAndLengths,
+ Object[] inputs) throws SqlException {
+ long dataLength = 0;
+ Object o = null;
+
+ markLengthBytes(CodePoint.FDODTA);
+ write1Byte(FdocaConstants.NULL_LID); // write the 1-byte row indicator
+
+ // write data for each input column
+ for (int i = 0; i < numVars; i++) {
+ if (inputs[i] == null) {
+ if ((protocolTypesAndLengths[i][0] % 2) == 1) {
+ write1Byte(FdocaConstants.NULL_DATA);
+ } else {
+ //bug check
+ }
+ } else {
+ if ((protocolTypesAndLengths[i][0] % 2) == 1) {
+ write1Byte(FdocaConstants.INDICATOR_NULLABLE);
+ }
- // write data for each input column
- for (int i = 0; i < numVars; i++) {
- if (inputs[i] == null) {
- if ((protocolTypesAndLengths[i][0] % 2) == 1)
- write1Byte (FdocaConstants.NULL_DATA);
- else {
- //bug check
- }
- }
- else {
- if ((protocolTypesAndLengths[i][0] % 2) == 1)
- write1Byte (FdocaConstants.INDICATOR_NULLABLE);
-
- switch (protocolTypesAndLengths[i][0] | 0x01) { // mask out null indicator
- case FdocaConstants.PROTOCOL_TYPE_NVARMIX:
- case FdocaConstants.PROTOCOL_TYPE_NLONGMIX:
- // What to do for server that don't understand 1208 (UTF-8)
- // check for a promototed type, and use that instead if it exists
- o = retrievePromotedParameterIfExists (i);
- if ( o == null ) {
- writeSingleorMixedCcsidLDString ((String) inputs[i], netAgent_.typdef_.getCcsidMbcEncoding());
- }
- else { // use the promototed object instead
- Clob c = (Clob)o;
- dataLength = c.length();
- setFDODTALobLength (protocolTypesAndLengths, i, dataLength);
- }
- break;
-
- case FdocaConstants.PROTOCOL_TYPE_NVARCHAR:
- case FdocaConstants.PROTOCOL_TYPE_NLONG:
- o = retrievePromotedParameterIfExists (i);
- if ( o == null ) {
-
- }
- else { // use the promototed object instead
- dataLength = ((Clob)o).length();
- setFDODTALobLength (protocolTypesAndLengths, i, dataLength);
- }
- break;
-
- case FdocaConstants.PROTOCOL_TYPE_NINTEGER:
- writeIntFdocaData (((Integer) inputs[i]).intValue());
- break;
- case FdocaConstants.PROTOCOL_TYPE_NSMALL:
- writeShortFdocaData (((Short) inputs[i]).shortValue());
- break;
- case FdocaConstants.PROTOCOL_TYPE_NFLOAT4:
- writeFloat (((Float) inputs[i]).floatValue());
- break;
- case FdocaConstants.PROTOCOL_TYPE_NFLOAT8:
- writeDouble (((Double) inputs[i]).doubleValue());
- break;
- case FdocaConstants.PROTOCOL_TYPE_NDECIMAL:
- writeBigDecimal ((java.math.BigDecimal) inputs[i],
- (protocolTypesAndLengths[i][1] >> 8) & 0xff, // described precision not actual
- protocolTypesAndLengths[i][1] & 0xff); // described scale, not actual
- break;
- case FdocaConstants.PROTOCOL_TYPE_NDATE:
- writeDate ((java.sql.Date) inputs[i]);
- break;
- case FdocaConstants.PROTOCOL_TYPE_NTIME:
- writeTime ((java.sql.Time) inputs[i]);
- break;
- case FdocaConstants.PROTOCOL_TYPE_NTIMESTAMP:
- writeTimestamp ((java.sql.Timestamp) inputs[i]);
- break;
- case FdocaConstants.PROTOCOL_TYPE_NINTEGER8:
- writeLongFdocaData (((Long) inputs[i]).longValue());
- break;
- case FdocaConstants.PROTOCOL_TYPE_NVARBYTE:
- case FdocaConstants.PROTOCOL_TYPE_NLONGVARBYTE:
- o = retrievePromotedParameterIfExists (i);
- if (o == null) {
- writeLDBytes ((byte[]) inputs[i]);
- }
- else { // use the promototed object instead
- Blob b = (Blob) o;
- dataLength = b.length();
- setFDODTALobLength (protocolTypesAndLengths, i, dataLength);
- }
- break;
- case FdocaConstants.PROTOCOL_TYPE_NLOBCSBCS:
- case FdocaConstants.PROTOCOL_TYPE_NLOBCDBCS:
- // check for a promoted Clob
- o = retrievePromotedParameterIfExists(i);
- if (o == null) {
- try {
- dataLength = ((java.sql.Clob) inputs[i]).length();
- }
- catch (java.sql.SQLException e) {
- if ( ! (e instanceof org.apache.derby.client.am.SqlException)) {
- SqlException toThrow = new SqlException (
- netAgent_.logWriter_,
- "Error obtaining length of external clob object, exception follows. ");
- toThrow.setNextException (e);
- throw toThrow;
- }
- else
- throw new SqlException(netAgent_.logWriter_, e, "Error obtaining length of blob object, exception follows. ");
- }
- }
- else {
- dataLength = ((Clob) o).length();
- }
- setFDODTALobLength (protocolTypesAndLengths, i, dataLength);
- break;
- case FdocaConstants.PROTOCOL_TYPE_NLOBBYTES:
- // check for a promoted Clob
- o = retrievePromotedParameterIfExists(i);
- if (o == null) {
- try {
- dataLength = ((java.sql.Blob) inputs[i]).length();
- }
- catch (java.sql.SQLException e) {
- if ( ! (e instanceof org.apache.derby.client.am.SqlException)) {
- SqlException toThrow = new SqlException (
- netAgent_.logWriter_,
- "Error obtaining length of external blob object, exception follows. ");
- toThrow.setNextException (e);
- throw toThrow;
- }
- else
- throw new SqlException(netAgent_.logWriter_, e, "Error obtaining length of blob object, exception follows. ");
+ switch (protocolTypesAndLengths[i][0] | 0x01) { // mask out null indicator
+ case FdocaConstants.PROTOCOL_TYPE_NVARMIX:
+ case FdocaConstants.PROTOCOL_TYPE_NLONGMIX:
+ // What to do for server that don't understand 1208 (UTF-8)
+ // check for a promototed type, and use that instead if it exists
+ o = retrievePromotedParameterIfExists(i);
+ if (o == null) {
+ writeSingleorMixedCcsidLDString((String) inputs[i], netAgent_.typdef_.getCcsidMbcEncoding());
+ } else { // use the promototed object instead
+ Clob c = (Clob) o;
+ dataLength = c.length();
+ setFDODTALobLength(protocolTypesAndLengths, i, dataLength);
+ }
+ break;
+
+ case FdocaConstants.PROTOCOL_TYPE_NVARCHAR:
+ case FdocaConstants.PROTOCOL_TYPE_NLONG:
+ o = retrievePromotedParameterIfExists(i);
+ if (o == null) {
+
+ } else { // use the promototed object instead
+ dataLength = ((Clob) o).length();
+ setFDODTALobLength(protocolTypesAndLengths, i, dataLength);
+ }
+ break;
+
+ case FdocaConstants.PROTOCOL_TYPE_NINTEGER:
+ writeIntFdocaData(((Integer) inputs[i]).intValue());
+ break;
+ case FdocaConstants.PROTOCOL_TYPE_NSMALL:
+ writeShortFdocaData(((Short) inputs[i]).shortValue());
+ break;
+ case FdocaConstants.PROTOCOL_TYPE_NFLOAT4:
+ writeFloat(((Float) inputs[i]).floatValue());
+ break;
+ case FdocaConstants.PROTOCOL_TYPE_NFLOAT8:
+ writeDouble(((Double) inputs[i]).doubleValue());
+ break;
+ case FdocaConstants.PROTOCOL_TYPE_NDECIMAL:
+ writeBigDecimal((java.math.BigDecimal) inputs[i],
+ (protocolTypesAndLengths[i][1] >> 8) & 0xff, // described precision not actual
+ protocolTypesAndLengths[i][1] & 0xff); // described scale, not actual
+ break;
+ case FdocaConstants.PROTOCOL_TYPE_NDATE:
+ writeDate((java.sql.Date) inputs[i]);
+ break;
+ case FdocaConstants.PROTOCOL_TYPE_NTIME:
+ writeTime((java.sql.Time) inputs[i]);
+ break;
+ case FdocaConstants.PROTOCOL_TYPE_NTIMESTAMP:
+ writeTimestamp((java.sql.Timestamp) inputs[i]);
+ break;
+ case FdocaConstants.PROTOCOL_TYPE_NINTEGER8:
+ writeLongFdocaData(((Long) inputs[i]).longValue());
+ break;
+ case FdocaConstants.PROTOCOL_TYPE_NVARBYTE:
+ case FdocaConstants.PROTOCOL_TYPE_NLONGVARBYTE:
+ o = retrievePromotedParameterIfExists(i);
+ if (o == null) {
+ writeLDBytes((byte[]) inputs[i]);
+ } else { // use the promototed object instead
+ Blob b = (Blob) o;
+ dataLength = b.length();
+ setFDODTALobLength(protocolTypesAndLengths, i, dataLength);
+ }
+ break;
+ case FdocaConstants.PROTOCOL_TYPE_NLOBCSBCS:
+ case FdocaConstants.PROTOCOL_TYPE_NLOBCDBCS:
+ // check for a promoted Clob
+ o = retrievePromotedParameterIfExists(i);
+ if (o == null) {
+ try {
+ dataLength = ((java.sql.Clob) inputs[i]).length();
+ } catch (java.sql.SQLException e) {
+ if (!(e instanceof org.apache.derby.client.am.SqlException)) {
+ SqlException toThrow = new SqlException(netAgent_.logWriter_,
+ "Error obtaining length of external clob object, exception follows. ");
+ toThrow.setNextException(e);
+ throw toThrow;
+ } else {
+ throw new SqlException(netAgent_.logWriter_, e, "Error obtaining length of blob object, exception follows. ");
+ }
+ }
+ } else {
+ dataLength = ((Clob) o).length();
+ }
+ setFDODTALobLength(protocolTypesAndLengths, i, dataLength);
+ break;
+ case FdocaConstants.PROTOCOL_TYPE_NLOBBYTES:
+ // check for a promoted Clob
+ o = retrievePromotedParameterIfExists(i);
+ if (o == null) {
+ try {
+ dataLength = ((java.sql.Blob) inputs[i]).length();
+ } catch (java.sql.SQLException e) {
+ if (!(e instanceof org.apache.derby.client.am.SqlException)) {
+ SqlException toThrow = new SqlException(netAgent_.logWriter_,
+ "Error obtaining length of external blob object, exception follows. ");
+ toThrow.setNextException(e);
+ throw toThrow;
+ } else {
+ throw new SqlException(netAgent_.logWriter_, e, "Error obtaining length of blob object, exception follows. ");
+ }
+ }
+ } else { // use promoted Blob
+ dataLength = ((Blob) o).length();
+ }
+ setFDODTALobLength(protocolTypesAndLengths, i, dataLength);
+ break;
+ case FdocaConstants.PROTOCOL_TYPE_NLOBCMIXED:
+ // check for a promoted Clob
+ o = retrievePromotedParameterIfExists(i);
+ if (o == null) {
+ if (((Clob) inputs[i]).isString()) {
+ dataLength = ((Clob) inputs[i]).getUTF8Length();
+ } else // must be a Unicode stream
+ {
+ dataLength = ((Clob) inputs[i]).length();
+ }
+ } else { // use promoted Clob
+ dataLength = ((Clob) o).length();
+ }
+ setFDODTALobLength(protocolTypesAndLengths, i, dataLength);
+ break;
+ default:
+ throw new SqlException(netAgent_.logWriter_, "unrecognized jdbc type. " +
+ " type: " + protocolTypesAndLengths[i][0] +
+ ", columnCount: " + numVars +
+ ", columnIndex: " + i);
+ }
}
- }
- else { // use promoted Blob
- dataLength = ((Blob) o).length();
- }
- setFDODTALobLength (protocolTypesAndLengths, i, dataLength);
- break;
- case FdocaConstants.PROTOCOL_TYPE_NLOBCMIXED :
- // check for a promoted Clob
- o = retrievePromotedParameterIfExists(i);
- if (o == null) {
- if (((Clob) inputs[i]).isString())
- dataLength = ((Clob) inputs[i]).getUTF8Length();
- else // must be a Unicode stream
- dataLength = ((Clob) inputs[i]).length();
- }
- else { // use promoted Clob
- dataLength = ((Clob) o).length();
- }
- setFDODTALobLength(protocolTypesAndLengths, i, dataLength);
- break;
- default:
- throw new SqlException (netAgent_.logWriter_, "unrecognized jdbc type. " +
- " type: " + protocolTypesAndLengths[i][0] +
- ", columnCount: " + numVars +
- ", columnIndex: " + i);
}
- }
+ updateLengthBytes(); // for fdodta
}
- updateLengthBytes(); // for fdodta
- }
- // preconditions:
- private void buildEXTDTA(ColumnMetaData parameterMetaData,
- Object[] inputRow,
- boolean chained
- ) throws SqlException
- {
- // build the EXTDTA data, if necessary
- if (extdtaPositions_ != null) {
- boolean chainFlag, chainedWithSameCorrelator;
-
- for (int i = 0; i < extdtaPositions_.size(); i++) {
- int index = ((Integer) extdtaPositions_.get(i)).intValue();
-
- // is this the last EXTDTA to be built?
- if (i != extdtaPositions_.size() - 1) { // no
- chainFlag = true;
- chainedWithSameCorrelator = true;
- }
- else { // yes
- chainFlag = chained;
- chainedWithSameCorrelator = false;
- }
-
- // do we have to write a null byte?
- boolean writeNullByte = false;
- if (parameterMetaData.nullable_[index])
- writeNullByte = true;
- // Use the type of the input parameter rather than the input
- // column if possible.
- int parameterType = parameterMetaData.clientParamtertype_[index];
- if (parameterType == 0)
- parameterType = parameterMetaData.types_[index];
-
- // the follow types are possible due to promotion to BLOB
- if (parameterType == Types.BLOB
- || parameterType == Types.BINARY
- || parameterType == Types.VARBINARY
- || parameterType == Types.LONGVARBINARY) {
- Blob o = (Blob) retrievePromotedParameterIfExists(index);
- java.sql.Blob b = (o == null) ? (java.sql.Blob) inputRow[index] : o;
- boolean isExternalBlob = !(b instanceof org.apache.derby.client.am.Blob);
- if (isExternalBlob) {
- try {
- writeScalarStream (chainFlag,
- chainedWithSameCorrelator,
- CodePoint.EXTDTA,
- (int) b.length(),
- b.getBinaryStream(),
- writeNullByte,
- index + 1);
- }
- catch (java.sql.SQLException e) {
- if (!(e instanceof org.apache.derby.client.am.SqlException)) {
- SqlException toThrow = new SqlException(netAgent_.logWriter_, "Error occurred while streaming from external blob object, exception follows. ");
- toThrow.setNextException(e);
- throw toThrow;
- }
- else
- throw new SqlException( netAgent_.logWriter_, e, "Error obtaining length of blob object, exception follows. ");
- }
- }
- else if (((Blob) b).isBinaryStream()) {
- writeScalarStream (chainFlag,
- chainedWithSameCorrelator,
- CodePoint.EXTDTA,
- (int) ((Blob) b).length(),
- ((Blob) b).getBinaryStream(),
- writeNullByte,
- index + 1);
- }
- else { // must be a binary string
- // note: a possible optimization is to use writeScalarLobBytes
- // when the input is small
- // use this: if (b.length () < DssConstants.MAX_DSS_LEN - 6 - 4)
- // writeScalarLobBytes (...)
- // Yes, this would avoid having to new up a java.io.ByteArrayInputStream
- writeScalarStream (chainFlag,
- chainedWithSameCorrelator,
- CodePoint.EXTDTA,
- (int) ((Blob) b).length(),
- ((Blob) b).getBinaryStream(),
- writeNullByte,
- index + 1);
- }
- }
- // the follow types are possible due to promotion to CLOB
- else if (
- parameterType == Types.CLOB
- || parameterType == Types.CHAR
- || parameterType == Types.VARCHAR
- || parameterType == Types.LONGVARCHAR) {
- Clob o = (Clob) retrievePromotedParameterIfExists(index);
- java.sql.Clob c = (o == null) ? (java.sql.Clob) inputRow[index] : o;
- boolean isExternalClob = !(c instanceof org.apache.derby.client.am.Clob);
-
- if (isExternalClob) {
- try {
- writeScalarStream (chainFlag,
- chainedWithSameCorrelator,
- CodePoint.EXTDTA,
- (int) c.length (),
- c.getCharacterStream(),
- writeNullByte,
- index + 1);
- }
- catch (java.sql.SQLException e) {
- if (!(e instanceof org.apache.derby.client.am.SqlException)) {
- SqlException toThrow = new SqlException(netAgent_.logWriter_, "Error occurred while streaming from external clob object, exception follows. ");
- toThrow.setNextException(e);
- throw toThrow;
+ // preconditions:
+ private void buildEXTDTA(ColumnMetaData parameterMetaData,
+ Object[] inputRow,
+ boolean chained) throws SqlException {
+ // build the EXTDTA data, if necessary
+ if (extdtaPositions_ != null) {
+ boolean chainFlag, chainedWithSameCorrelator;
+
+ for (int i = 0; i < extdtaPositions_.size(); i++) {
+ int index = ((Integer) extdtaPositions_.get(i)).intValue();
+
+ // is this the last EXTDTA to be built?
+ if (i != extdtaPositions_.size() - 1) { // no
+ chainFlag = true;
+ chainedWithSameCorrelator = true;
+ } else { // yes
+ chainFlag = chained;
+ chainedWithSameCorrelator = false;
+ }
+
+ // do we have to write a null byte?
+ boolean writeNullByte = false;
+ if (parameterMetaData.nullable_[index]) {
+ writeNullByte = true;
+ }
+ // Use the type of the input parameter rather than the input
+ // column if possible.
+ int parameterType = parameterMetaData.clientParamtertype_[index];
+ if (parameterType == 0) {
+ parameterType = parameterMetaData.types_[index];
+ }
+
+ // the follow types are possible due to promotion to BLOB
+ if (parameterType == Types.BLOB
+ || parameterType == Types.BINARY
+ || parameterType == Types.VARBINARY
+ || parameterType == Types.LONGVARBINARY) {
+ Blob o = (Blob) retrievePromotedParameterIfExists(index);
+ java.sql.Blob b = (o == null) ? (java.sql.Blob) inputRow[index] : o;
+ boolean isExternalBlob = !(b instanceof org.apache.derby.client.am.Blob);
+ if (isExternalBlob) {
+ try {
+ writeScalarStream(chainFlag,
+ chainedWithSameCorrelator,
+ CodePoint.EXTDTA,
+ (int) b.length(),
+ b.getBinaryStream(),
+ writeNullByte,
+ index + 1);
+ } catch (java.sql.SQLException e) {
+ if (!(e instanceof org.apache.derby.client.am.SqlException)) {
+ SqlException toThrow = new SqlException(netAgent_.logWriter_, "Error occurred while streaming from external blob object, exception follows. ");
+ toThrow.setNextException(e);
+ throw toThrow;
+ } else {
+ throw new SqlException(netAgent_.logWriter_, e, "Error obtaining length of blob object, exception follows. ");
+ }
+ }
+ } else if (((Blob) b).isBinaryStream()) {
+ writeScalarStream(chainFlag,
+ chainedWithSameCorrelator,
+ CodePoint.EXTDTA,
+ (int) ((Blob) b).length(),
+ ((Blob) b).getBinaryStream(),
+ writeNullByte,
+ index + 1);
+ } else { // must be a binary string
+ // note: a possible optimization is to use writeScalarLobBytes
+ // when the input is small
+ // use this: if (b.length () < DssConstants.MAX_DSS_LEN - 6 - 4)
+ // writeScalarLobBytes (...)
+ // Yes, this would avoid having to new up a java.io.ByteArrayInputStream
+ writeScalarStream(chainFlag,
+ chainedWithSameCorrelator,
+ CodePoint.EXTDTA,
+ (int) ((Blob) b).length(),
+ ((Blob) b).getBinaryStream(),
+ writeNullByte,
+ index + 1);
+ }
+ }
+ // the follow types are possible due to promotion to CLOB
+ else if (
+ parameterType == Types.CLOB
+ || parameterType == Types.CHAR
+ || parameterType == Types.VARCHAR
+ || parameterType == Types.LONGVARCHAR) {
+ Clob o = (Clob) retrievePromotedParameterIfExists(index);
+ java.sql.Clob c = (o == null) ? (java.sql.Clob) inputRow[index] : o;
+ boolean isExternalClob = !(c instanceof org.apache.derby.client.am.Clob);
+
+ if (isExternalClob) {
+ try {
+ writeScalarStream(chainFlag,
+ chainedWithSameCorrelator,
+ CodePoint.EXTDTA,
+ (int) c.length(),
+ c.getCharacterStream(),
+ writeNullByte,
+ index + 1);
+ } catch (java.sql.SQLException e) {
+ if (!(e instanceof org.apache.derby.client.am.SqlException)) {
+ SqlException toThrow = new SqlException(netAgent_.logWriter_, "Error occurred while streaming from external clob object, exception follows. ");
+ toThrow.setNextException(e);
+ throw toThrow;
+ } else {
+ throw new SqlException(netAgent_.logWriter_, e, "Error obtaining length of blob object, exception follows. ");
+ }
+ }
+ } else if (((Clob) c).isCharacterStream()) {
+ writeScalarStream(chainFlag,
+ chainedWithSameCorrelator,
+ CodePoint.EXTDTA,
+ (int) ((Clob) c).length(),
+ ((Clob) c).getCharacterStream(),
+ writeNullByte,
+ index + 1);
+ } else if (((Clob) c).isAsciiStream()) {
+ writeScalarStream(chainFlag,
+ chainedWithSameCorrelator,
+ CodePoint.EXTDTA,
+ (int) ((Clob) c).length(),
+ ((Clob) c).getAsciiStream(),
+ writeNullByte,
+ index + 1);
+ } else if (((Clob) c).isUnicodeStream()) {
+ writeScalarStream(chainFlag,
+ chainedWithSameCorrelator,
+ CodePoint.EXTDTA,
+ (int) ((Clob) c).length(),
+ ((Clob) c).getUnicodeStream(),
+ writeNullByte,
+ index + 1);
+ } else { // must be a String
+ // note: a possible optimization is to use writeScalarLobBytes
+ // when the input is small.
+ // use this: if (c.length () < DssConstants.MAX_DSS_LEN - 6 - 4)
+ // writeScalarLobBytes (...)
+ writeScalarStream(chainFlag,
+ chainedWithSameCorrelator,
+ CodePoint.EXTDTA,
+ (int) ((Clob) c).getUTF8Length(),
+ new java.io.ByteArrayInputStream(((Clob) c).getUtf8String()),
+ writeNullByte,
+ index + 1);
+ }
}
- else
- throw new SqlException(netAgent_.logWriter_, e, "Error obtaining length of blob object, exception follows. ");
- }
- }
- else if (((Clob) c).isCharacterStream()) {
- writeScalarStream (chainFlag,
- chainedWithSameCorrelator,
- CodePoint.EXTDTA,
- (int) ((Clob) c).length (),
- ((Clob) c).getCharacterStream(),
- writeNullByte,
- index + 1);
- }
- else if (((Clob) c).isAsciiStream ())
- writeScalarStream (chainFlag,
- chainedWithSameCorrelator,
- CodePoint.EXTDTA,
- (int) ((Clob) c).length (),
- ((Clob) c).getAsciiStream (),
- writeNullByte,
- index + 1);
- else if (((Clob) c).isUnicodeStream ())
- writeScalarStream (chainFlag,
- chainedWithSameCorrelator,
- CodePoint.EXTDTA,
- (int) ((Clob) c).length (),
- ((Clob) c).getUnicodeStream (),
- writeNullByte,
- index + 1);
- else { // must be a String
- // note: a possible optimization is to use writeScalarLobBytes
- // when the input is small.
- // use this: if (c.length () < DssConstants.MAX_DSS_LEN - 6 - 4)
- // writeScalarLobBytes (...)
- writeScalarStream (chainFlag,
- chainedWithSameCorrelator,
- CodePoint.EXTDTA,
- (int) ((Clob) c).getUTF8Length (),
- new java.io.ByteArrayInputStream (((Clob) c).getUtf8String ()),
- writeNullByte,
- index + 1);
}
}
- }
}
- }
- //-------------------------helper methods-------------------------------------
- // returns the a promototedParameter object for index or null if it does not exist
- private Object retrievePromotedParameterIfExists (int index) {
-
- // consider using a nonsynchronized container or array
- if (promototedParameters_.isEmpty()) return null;
- return promototedParameters_.get(new Integer(index));
- }
+ //-------------------------helper methods-------------------------------------
+ // returns the a promototedParameter object for index or null if it does not exist
+ private Object retrievePromotedParameterIfExists(int index) {
+
+ // consider using a nonsynchronized container or array
+ if (promototedParameters_.isEmpty()) {
+ return null;
+ }
+ return promototedParameters_.get(new Integer(index));
+ }
- private int calculateColumnsInSQLDTAGRPtriplet (int numVars) {
- if (numVars > FdocaConstants.MAX_VARS_IN_NGDA) //rename to MAX_VARS_IN_SQLDTAGRP_TRIPLET
- return FdocaConstants.MAX_VARS_IN_NGDA;
- return numVars;
- }
+ private int calculateColumnsInSQLDTAGRPtriplet(int numVars) {
+ if (numVars > FdocaConstants.MAX_VARS_IN_NGDA) //rename to MAX_VARS_IN_SQLDTAGRP_TRIPLET
+ {
+ return FdocaConstants.MAX_VARS_IN_NGDA;
+ }
+ return numVars;
+ }
- // Consider refacctor so that this does not even have to look
- // at the actual object data, and only uses tags from the meta data
- // only have to call this once, rather than calling this for every input row
- // backburner: after refactoring this, later on, think about replacing case statements with table lookups
- private java.util.Hashtable computeProtocolTypesAndLengths (Object[] inputRow,
- ColumnMetaData parameterMetaData,
- int[][] lidAndLengths,
- java.util.Hashtable overrideMap
- ) throws SqlException
- {
- int numVars = parameterMetaData.columns_;
- String s = null;
- if (!promototedParameters_.isEmpty()) promototedParameters_.clear();
-
- for (int i = 0; i < numVars; i++) {
-
- int jdbcType;
- // Send the input type unless it is not available.
- // (e.g an output parameter)
- jdbcType = parameterMetaData.clientParamtertype_[i];
- if (jdbcType == 0)
- jdbcType = parameterMetaData.types_[i];
-
- // jdbc semantics - This should happen outside of the build methods
- // if describe input is not supported, we require the user to at least
- // call setNull() and provide the type information. Otherwise, we won't
- // be able to guess the right PROTOCOL type to send to the server, and an
- // exception is thrown.
-
- if (jdbcType == 0)
- throw new SqlException (netAgent_.logWriter_, "Invalid JDBC Type for parameter " + i);
-
- switch (jdbcType) {
- case java.sql.Types.CHAR :
- case java.sql.Types.VARCHAR :
- // lid: PROTOCOL_TYPE_NVARMIX, length override: 32767 (max)
- // dataFormat: String
- // this won't work if 1208 is not supported
- s = (String) inputRow[i];
- // assumes UTF-8 characters at most 3 bytes long
- // Flow the String as a VARCHAR
- if (s == null || s.length() <= 32767 / 3) {
- lidAndLengths[i][0] = FdocaConstants.PROTOCOL_TYPE_NVARMIX;
- lidAndLengths[i][1] = 32767;
- }
- else {
- // Flow the data as CLOB data if the data too large to for LONGVARCHAR
- java.io.ByteArrayInputStream bais = null;
- byte[] ba = null;
- try {
- ba = s.getBytes("UTF-8");
- bais = new java.io.ByteArrayInputStream(ba);
- Clob c = new Clob(netAgent_, bais, "UTF-8", ba.length);
- // inputRow[i] = c;
- // Place the new Lob in the promototedParameter_ collection for
- // NetStatementRequest use
- promototedParameters_.put(new Integer(i), c);
+ // Consider refacctor so that this does not even have to look
+ // at the actual object data, and only uses tags from the meta data
+ // only have to call this once, rather than calling this for every input row
+ // backburner: after refactoring this, later on, think about replacing case statements with table lookups
+ private java.util.Hashtable computeProtocolTypesAndLengths(Object[] inputRow,
+ ColumnMetaData parameterMetaData,
+ int[][] lidAndLengths,
+ java.util.Hashtable overrideMap) throws SqlException {
+ int numVars = parameterMetaData.columns_;
+ String s = null;
+ if (!promototedParameters_.isEmpty()) {
+ promototedParameters_.clear();
+ }
- lidAndLengths[i][0] = FdocaConstants.PROTOCOL_TYPE_NLOBCMIXED;
- lidAndLengths[i][1] = buildPlaceholderLength(c.length());
- }
- catch (java.io.UnsupportedEncodingException e) {
- throw new SqlException(netAgent_.logWriter_, e, "Error in building String parameter: throwable attached");
- }
- }
- break;
- case java.sql.Types.INTEGER :
- // lid: PROTOCOL_TYPE_NINTEGER, length override: 4
- // dataFormat: Integer
- lidAndLengths[i][0] = FdocaConstants.PROTOCOL_TYPE_NINTEGER;
- lidAndLengths[i][1] = 4;
- break;
- case java.sql.Types.SMALLINT :
- case java.sql.Types.TINYINT:
- case java.sql.Types.BOOLEAN:
- case java.sql.Types.BIT:
- // lid: PROTOCOL_TYPE_NSMALL, length override: 2
- // dataFormat: Short
- lidAndLengths[i][0] = FdocaConstants.PROTOCOL_TYPE_NSMALL;
- lidAndLengths[i][1] = 2;
- break;
- case java.sql.Types.REAL :
- // lid: PROTOCOL_TYPE_NFLOAT4, length override: 4
- // dataFormat: Float
- lidAndLengths[i][0] = FdocaConstants.PROTOCOL_TYPE_NFLOAT4;
- lidAndLengths[i][1] = 4;
- break;
- case java.sql.Types.DOUBLE :
- case java.sql.Types.FLOAT :
- // lid: PROTOCOL_TYPE_NFLOAT8, length override: 8
- // dataFormat: Double
- lidAndLengths[i][0] = FdocaConstants.PROTOCOL_TYPE_NFLOAT8;
- lidAndLengths[i][1] = 8;
- break;
- case java.sql.Types.NUMERIC :
- case java.sql.Types.DECIMAL :
- // lid: PROTOCOL_TYPE_NDECIMAL
- // dataFormat: java.math.BigDecimal
- // input only:
- // if null and describe input - use describe input precision and scale
- // if not null and describe input - calculate precision and actual scale from data
- // if null and no describe input - guess with precision 1 scale 0
- // if not null and no describe input - calculate precision and actual scale from data
- // output only:
- // use largest precision/scale based on registered scale from registerOutParameter
- // inout:
- // if null - use largest precision/scale based on scale from registerOutParameter
- // if not null - write bigDecimal () pass registered scale so it can pad, you don't even
- // have to look at the actual scale at this level.
- /*
- if (parameterMetaData.isGuessed) {
- java.math.BigDecimal bigDecimal = (java.math.BigDecimal) inputRow[i];
- int precision = Utils.computeBigDecimalPrecision (bigDecimal);
- lidAndLengths[i][1] = (precision << 8) + // use precision above
- (bigDecimal.scale() << 0);
- }
- */
- // Split this entire method into two parts, the first method is called only once and the inputRow is not passed,!!
- // the second method is called for every inputRow and overrides inputDA lengths/scales based upon the acutal data!
- // for decimal and blob columns only
- int precision = parameterMetaData.sqlPrecision_[i];
- int scale = parameterMetaData.sqlScale_[i];
- lidAndLengths[i][0] = FdocaConstants.PROTOCOL_TYPE_NDECIMAL;
- lidAndLengths[i][1] = (precision << 8) + (scale << 0);
- break;
- case java.sql.Types.DATE :
- // for input, output, and inout parameters
- // lid: PROTOCOL_TYPE_NDATE, length override: 8
- // dataFormat: java.sql.Date
- lidAndLengths[i][0] = FdocaConstants.PROTOCOL_TYPE_NDATE;
- lidAndLengths[i][1] = 10;
- break;
- case java.sql.Types.TIME :
- // for input, output, and inout parameters
- // lid: PROTOCOL_TYPE_NTIME, length override: 8
- // dataFormat: java.sql.Time
- lidAndLengths[i][0] = FdocaConstants.PROTOCOL_TYPE_NTIME;
- lidAndLengths[i][1] = 8;
- break;
- case java.sql.Types.TIMESTAMP :
- // for input, output, and inout parameters
- // lid: PROTOCOL_TYPE_NTIME, length overrid: 26
- // dataFormat: java.sql.Timestamp
- lidAndLengths[i][0] = FdocaConstants.PROTOCOL_TYPE_NTIMESTAMP;
- lidAndLengths[i][1] = 26;
- break;
- case java.sql.Types.BIGINT :
- // if SQLAM < 6 this should be mapped to decimal (19,0) in common layer
- // if SQLAM >=6, lid: PROTOCOL_TYPE_NINTEGER8, length override: 8
- // dataFormat: Long
- lidAndLengths[i][0] = FdocaConstants.PROTOCOL_TYPE_NINTEGER8;
- lidAndLengths[i][1] = 8;
- break;
- case java.sql.Types.LONGVARCHAR :
- // Is this the right thing to do // should this be 32700
- s = (String) inputRow[i];
- if (s == null || s.length() <= 32767 / 3) {
- lidAndLengths[i][0] = FdocaConstants.PROTOCOL_TYPE_NLONGMIX;
- lidAndLengths[i][1] = 32767;
- }
- else {
[... 987 lines stripped ...]