package org.postgresql.jdbc2;


import java.io.*;
import java.math.*;
import java.sql.*;
import java.util.Vector;
import org.postgresql.largeobject.*;
import org.postgresql.util.PSQLException;

/* $Header: /cvsroot/pgsql/src/interfaces/jdbc/org/postgresql/jdbc2/Attic/AbstractJdbc2Statement.java,v 1.8 2002/10/30 04:33:29 barry Exp $
 * This class defines methods of the jdbc2 specification.  This class extends
 * org.postgresql.jdbc1.AbstractJdbc1Statement which provides the jdbc1
 * methods.  The real Statement class (for jdbc2) is org.postgresql.jdbc2.Jdbc2Statement
 */
public abstract class AbstractJdbc2Statement extends org.postgresql.jdbc1.AbstractJdbc1Statement
{

	protected Vector batch = null;
	protected int resultsettype;		 // the resultset type to return
	protected int concurrency;		 // is it updateable or not?

	public AbstractJdbc2Statement (AbstractJdbc2Connection c)
	{
		super(c);
		resultsettype = java.sql.ResultSet.TYPE_SCROLL_INSENSITIVE;
		concurrency = java.sql.ResultSet.CONCUR_READ_ONLY;
	}

	public AbstractJdbc2Statement(AbstractJdbc2Connection connection, String sql) throws SQLException
	{
		super(connection, sql);
	}

	/*
	 * Execute a SQL statement that may return multiple results. We
	 * don't have to worry about this since we do not support multiple
	 * ResultSets.	 You can use getResultSet or getUpdateCount to
	 * retrieve the result.
	 *
	 * @param sql any SQL statement
	 * @return true if the next result is a ResulSet, false if it is
	 *	an update count or there are no more results
	 * @exception SQLException if a database access error occurs
	 */
	public boolean execute() throws SQLException
	{
		boolean l_return = super.execute();
		//Now do the jdbc2 specific stuff
		//required for ResultSet.getStatement() to work and updateable resultsets
		((AbstractJdbc2ResultSet)result).setStatement((Statement)this);

		return l_return;
	}

	// ** JDBC 2 Extensions **

	public void addBatch(String p_sql) throws SQLException
	{
		if (batch == null)
			batch = new Vector();
		batch.addElement(p_sql);
	}

	public void clearBatch() throws SQLException
	{
		batch = null;
	}

	public int[] executeBatch() throws SQLException
	{
		if (batch == null)
			batch = new Vector();
		int size = batch.size();
		int[] result = new int[size];
		int i = 0;
		try
		{
			for (i = 0;i < size;i++)
				result[i] = this.executeUpdate((String)batch.elementAt(i));
		}
		catch (SQLException e)
		{
			int[] resultSucceeded = new int[i];
			System.arraycopy(result, 0, resultSucceeded, 0, i);

			PBatchUpdateException updex =
				new PBatchUpdateException("postgresql.stat.batch.error",
										  new Integer(i), batch.elementAt(i), resultSucceeded);
			updex.setNextException(e);

			throw updex;
		}
		finally
		{
			batch.removeAllElements();
		}
		return result;
	}

	public void cancel() throws SQLException
	{
		((AbstractJdbc2Connection)connection).cancelQuery();
	}

	public java.sql.Connection getConnection() throws SQLException
	{
		return (java.sql.Connection)connection;
	}

	public int getFetchDirection() throws SQLException
	{
		throw new PSQLException("postgresql.psqlnotimp");
	}

	public int getFetchSize() throws SQLException
	{
		return 0;
	}

	public int getResultSetConcurrency() throws SQLException
	{
		return concurrency;
	}

	public int getResultSetType() throws SQLException
	{
		return resultsettype;
	}

	public void setFetchDirection(int direction) throws SQLException
	{
		throw org.postgresql.Driver.notImplemented();
	}

	public void setFetchSize(int rows) throws SQLException
	{
		throw org.postgresql.Driver.notImplemented();
	}

	public void setResultSetConcurrency(int value) throws SQLException
	{
		concurrency = value;
	}

	public void setResultSetType(int value) throws SQLException
	{
		resultsettype = value;
	}

	public void addBatch() throws SQLException
	{
		addBatch(this.toString());
	}

	public java.sql.ResultSetMetaData getMetaData() throws SQLException
	{
		java.sql.ResultSet rs = getResultSet();
		if (rs != null)
			return rs.getMetaData();

		// Does anyone really know what this method does?
		return null;
	}

	public void setArray(int i, java.sql.Array x) throws SQLException
	{
		setString(i, x.toString());
	}

	public void setBlob(int i, Blob x) throws SQLException
	{
		InputStream l_inStream = x.getBinaryStream();
		LargeObjectManager lom = connection.getLargeObjectAPI();
		int oid = lom.create();
		LargeObject lob = lom.open(oid);
		OutputStream los = lob.getOutputStream();
		byte[] buf = new byte[4096];
		try
		{
			// could be buffered, but then the OutputStream returned by LargeObject
			// is buffered internally anyhow, so there would be no performance
			// boost gained, if anything it would be worse!
			int bytesRemaining = (int)x.length();
			int numRead = l_inStream.read(buf, 0, Math.min(buf.length, bytesRemaining));
			while (numRead != -1 && bytesRemaining > 0)
			{
				bytesRemaining -= numRead;
				if ( numRead == buf.length )
					los.write(buf); // saves a buffer creation and copy in LargeObject since it's full
				else
					los.write(buf,0,numRead);
				numRead = l_inStream.read(buf, 0, Math.min(buf.length, bytesRemaining));
			}
		}
		catch (IOException se)
		{
			throw new PSQLException("postgresql.unusual", se);
		}
		finally
		{
			try
			{
				los.close();
                l_inStream.close();
            }
            catch( Exception e ) {}
		}
		setInt(i, oid);
	}

	public void setCharacterStream(int i, java.io.Reader x, int length) throws SQLException
	{
		if (connection.haveMinimumCompatibleVersion("7.2"))
		{
			//Version 7.2 supports CharacterStream for for the PG text types
			//As the spec/javadoc for this method indicate this is to be used for
			//large text values (i.e. LONGVARCHAR)	PG doesn't have a separate
			//long varchar datatype, but with toast all the text datatypes are capable of
			//handling very large values.  Thus the implementation ends up calling
			//setString() since there is no current way to stream the value to the server
			char[] l_chars = new char[length];
			int l_charsRead;
			try
			{
				l_charsRead = x.read(l_chars, 0, length);
			}
			catch (IOException l_ioe)
			{
				throw new PSQLException("postgresql.unusual", l_ioe);
			}
			setString(i, new String(l_chars, 0, l_charsRead));
		}
		else
		{
			//Version 7.1 only supported streams for LargeObjects
			//but the jdbc spec indicates that streams should be
			//available for LONGVARCHAR instead
			LargeObjectManager lom = connection.getLargeObjectAPI();
			int oid = lom.create();
			LargeObject lob = lom.open(oid);
			OutputStream los = lob.getOutputStream();
			try
			{
				// could be buffered, but then the OutputStream returned by LargeObject
				// is buffered internally anyhow, so there would be no performance
				// boost gained, if anything it would be worse!
				int c = x.read();
				int p = 0;
				while (c > -1 && p < length)
				{
					los.write(c);
					c = x.read();
					p++;
				}
				los.close();
			}
			catch (IOException se)
			{
				throw new PSQLException("postgresql.unusual", se);
			}
			// lob is closed by the stream so don't call lob.close()
			setInt(i, oid);
		}
	}

	public void setClob(int i, Clob x) throws SQLException
	{
		InputStream l_inStream = x.getAsciiStream();
		int l_length = (int) x.length();
		LargeObjectManager lom = connection.getLargeObjectAPI();
		int oid = lom.create();
		LargeObject lob = lom.open(oid);
		OutputStream los = lob.getOutputStream();
		try
		{
			// could be buffered, but then the OutputStream returned by LargeObject
			// is buffered internally anyhow, so there would be no performance
			// boost gained, if anything it would be worse!
			int c = l_inStream.read();
			int p = 0;
			while (c > -1 && p < l_length)
			{
				los.write(c);
				c = l_inStream.read();
				p++;
			}
			los.close();
		}
		catch (IOException se)
		{
			throw new PSQLException("postgresql.unusual", se);
		}
		// lob is closed by the stream so don't call lob.close()
		setInt(i, oid);
	}

	public void setNull(int i, int t, String s) throws SQLException
	{
		setNull(i, t);
	}

	public void setRef(int i, Ref x) throws SQLException
	{
		throw org.postgresql.Driver.notImplemented();
	}

	public void setDate(int i, java.sql.Date d, java.util.Calendar cal) throws SQLException
	{
		if (cal == null)
			setDate(i, d);
		else
		{
			cal.setTime(d);
			setDate(i, new java.sql.Date(cal.getTime().getTime()));
		}
	}

	public void setTime(int i, Time t, java.util.Calendar cal) throws SQLException
	{
		if (cal == null)
			setTime(i, t);
		else
		{
			cal.setTime(t);
			setTime(i, new java.sql.Time(cal.getTime().getTime()));
		}
	}

	public void setTimestamp(int i, Timestamp t, java.util.Calendar cal) throws SQLException
	{
		if (cal == null)
			setTimestamp(i, t);
		else
		{
			cal.setTime(t);
			setTimestamp(i, new java.sql.Timestamp(cal.getTime().getTime()));
		}
	}

	// ** JDBC 2 Extensions for CallableStatement**

	public java.sql.Array getArray(int i) throws SQLException
	{
		throw org.postgresql.Driver.notImplemented();
	}

	public java.math.BigDecimal getBigDecimal(int parameterIndex) throws SQLException
	{
		checkIndex (parameterIndex, Types.NUMERIC, "BigDecimal");
		return ((BigDecimal)callResult);
	}

	public Blob getBlob(int i) throws SQLException
	{
		throw org.postgresql.Driver.notImplemented();
	}

	public Clob getClob(int i) throws SQLException
	{
		throw org.postgresql.Driver.notImplemented();
	}

	public Object getObject(int i, java.util.Map map) throws SQLException
	{
		throw org.postgresql.Driver.notImplemented();
	}

	public Ref getRef(int i) throws SQLException
	{
		throw org.postgresql.Driver.notImplemented();
	}

	public java.sql.Date getDate(int i, java.util.Calendar cal) throws SQLException
	{
		throw org.postgresql.Driver.notImplemented();
	}

	public Time getTime(int i, java.util.Calendar cal) throws SQLException
	{
		throw org.postgresql.Driver.notImplemented();
	}

	public Timestamp getTimestamp(int i, java.util.Calendar cal) throws SQLException
	{
		throw org.postgresql.Driver.notImplemented();
	}

	// no custom types allowed yet..
	public void registerOutParameter(int parameterIndex, int sqlType, String typeName) throws SQLException
	{
		throw org.postgresql.Driver.notImplemented();
	}


	//This is needed by AbstractJdbc2ResultSet to determine if the query is updateable or not
	protected String[] getSqlFragments()
	{
		return m_sqlFragments;
	}

}