From c3f6b46d8a4f73722439b4755ef5df42adff81d9 Mon Sep 17 00:00:00 2001 From: Michael Kleehammer Date: Mon, 6 Oct 2008 21:07:44 -0500 Subject: [PATCH] Import from Subversion 2.0.63; reworked versioning --- .gitignore | 9 + MANIFEST.in | 13 + README.txt | 143 +++ setup.py | 165 ++++ src/buffer.cpp | 58 ++ src/buffer.h | 55 ++ src/connection.cpp | 751 ++++++++++++++ src/connection.h | 55 ++ src/cursor.cpp | 2046 +++++++++++++++++++++++++++++++++++++++ src/cursor.h | 113 +++ src/errors.cpp | 300 ++++++ src/errors.h | 52 + src/getdata.cpp | 573 +++++++++++ src/getdata.h | 9 + src/params.cpp | 746 ++++++++++++++ src/params.h | 11 + src/pyodbc.h | 136 +++ src/pyodbc.rc | 100 ++ src/pyodbcmodule.cpp | 813 ++++++++++++++++ src/pyodbcmodule.h | 62 ++ src/resource.h | 14 + src/row.cpp | 343 +++++++ src/row.h | 34 + src/wrapper.h | 50 + tests/accesstests.py | 648 +++++++++++++ tests/dbapi20.py | 850 ++++++++++++++++ tests/dbapitests.py | 43 + tests/empty.accdb | Bin 0 -> 311296 bytes tests/empty.mdb | Bin 0 -> 188416 bytes tests/pgtests.py | 422 ++++++++ tests/sqlservertests.py | 972 +++++++++++++++++++ tests/testutils.py | 99 ++ web/docs.html | 1166 ++++++++++++++++++++++ web/index.html | 234 +++++ web/license.html | 48 + web/styles.css | 131 +++ web/tutorial.html | 122 +++ 37 files changed, 11386 insertions(+) create mode 100644 .gitignore create mode 100644 MANIFEST.in create mode 100644 README.txt create mode 100644 setup.py create mode 100644 src/buffer.cpp create mode 100644 src/buffer.h create mode 100644 src/connection.cpp create mode 100644 src/connection.h create mode 100644 src/cursor.cpp create mode 100644 src/cursor.h create mode 100644 src/errors.cpp create mode 100644 src/errors.h create mode 100644 src/getdata.cpp create mode 100644 src/getdata.h create mode 100644 src/params.cpp create mode 100644 src/params.h create mode 100644 src/pyodbc.h create mode 100644 src/pyodbc.rc create mode 100644 src/pyodbcmodule.cpp create mode 100644 src/pyodbcmodule.h create mode 100644 src/resource.h create mode 100644 src/row.cpp create mode 100644 src/row.h create mode 100644 src/wrapper.h create mode 100644 tests/accesstests.py create mode 100644 tests/dbapi20.py create mode 100644 tests/dbapitests.py create mode 100644 tests/empty.accdb create mode 100644 tests/empty.mdb create mode 100644 tests/pgtests.py create mode 100644 tests/sqlservertests.py create mode 100644 tests/testutils.py create mode 100644 web/docs.html create mode 100644 web/index.html create mode 100644 web/license.html create mode 100644 web/styles.css create mode 100644 web/tutorial.html diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..d3fb18ae --- /dev/null +++ b/.gitignore @@ -0,0 +1,9 @@ +setup.cfg +MANIFEST +build +dist +*.pdb +*.pyc +*.pyo +tmp +web/*.cmd diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..0a3d154b --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,13 @@ +include src\*.h +include src\*.cpp +include tests\* +include README.txt +prune setup.cfg + +include web\* +prune web\*.cmd + +# For some reason, I keep getting setup.PY. Probably +# because I use PATHEXT on Windows. +prune setup.PY +include setup.py diff --git a/README.txt b/README.txt new file mode 100644 index 00000000..95ff5d21 --- /dev/null +++ b/README.txt @@ -0,0 +1,143 @@ + +Overview +======== + +This project is a Python database module for ODBC that implements the Python DB API 2.0 +specification. + + homepage: http://sourceforge.net/projects/pyodbc + source: http://github.com/mkleehammer/pyodbc + +This module requires: + + * Python 2.4 or greater + * ODBC 3.0 or greater + +On Windows, the easiest way to install is to use the Windows installer program available at +http://sourceforge.net/projects/pyodbc. + +Source can be obtained at + +To build from source, either check the source out of version control or download a source +extract and run: + + python setup.py build install + +Module Specific Behavior +======================= + +General +------- + +* The pyodbc.connect function accepts a single parameter: the ODBC connection string. This + string is not read or modified by pyodbc, so consult the ODBC documentation or your ODBC + driver's documentation for details. The general format is: + + cnxn = pyodbc.connect('DSN=mydsn;UID=userid;PWD=pwd') + +* Connection caching in the ODBC driver manager is automatically enabled. + +* Autocommit is not supported. Always call cnxn.commit() since the DB API specification + requires a rollback when a connection is closed that was not specifically committed. + +* When a connection is closed, all cursors created from the connection are closed. + + +Data Types +---------- + +* Dates, times, and timestamps use the Python datetime module's date, time, and datetime + classes. These classes can be passed directly as parameters and will be returned when + querying date/time columns. + +* Binary data is passed and returned in Python buffer objects. + +* Decimal and numeric columns are passed and returned using the Python 2.4 decimal class. + + +Convenience Methods +------------------- + +* Cursors are iterable and returns Row objects. + + cursor.execute("select a,b from tmp") + for row in cursor: + print row + + +* The DB API PEP does not specify the return type for Cursor.execute, so pyodbc tries to be + maximally convenient: + + 1) If a SELECT is executed, the Cursor itself is returned to allow code like the following: + + for row in cursor.execute("select a,b from tmp"): + print row + + 2) If an UPDATE, INSERT, or DELETE statement is issued, the number of rows affected is + returned: + + count = cursor.execute("delete from tmp where a in (1,2,3)") + + 3) Otherwise (CREATE TABLE, etc.), None is returned. + + +* An execute method has been added to the Connection class. It creates a Cursor and returns + whatever Cursor.execute returns. This allows for the following: + + for row in cnxn.execute("select a,b from tmp"): + print row + + or + + rows = cnxn.execute("select * from tmp where a in (1,2,3)").fetchall() + + Since each call creates a new Cursor, only use this when executing a single statement. + + +* Both Cursor.execute and Connection.execute allow parameters to be passed as additional + parameters following the query. + + cnxn.execute("select a,b from tmp where a=? or a=?", 1, 2) + + The specification is not entirely clear, but most other drivers require parameters to be + passed in a sequence. To ensure compatibility, pyodbc will also accept this format: + + cnxn.execute("select a,b from tmp where a=? or a=?", (1, 2)) + + +* Row objects are derived from tuple to match the API specification, but they also support + accessing columns by name. + + for row in cnxn.execute("select A,b from tmp"): + print row.a, row.b + + +* The following are not supported or are ignored: nextset, setinputsizes, setoutputsizes. + + +* Values in Row objects can be replaced, either by name or index. Sometimes it is convenient + to "preprocess" values. + + row = cursor.execute("select a,b from tmp").fetchone() + + row.a = calc(row.a) + row[1] = calc(row.b) + + +Goals / Design +============== + +* This module should not require any 3rd party modules other than ODBC. + +* Only built-in data types should be used where possible. + + a) Reduces the number of libraries to learn. + + b) Reduces the number of modules and libraries to install. + + c) Eventually a standard is usually introduced. For example, many previous database drivers + used the mxDate classes. Now that Python 2.3 has introduced built-in date/time classes, + using those modules is more complicated than using the built-ins. + +* It should adhere to the DB API specification, but be maximally convenient where possible. + The most common usages should be optimized for convenience and speed. diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..ecd7f06f --- /dev/null +++ b/setup.py @@ -0,0 +1,165 @@ +#!/usr/bin/python + +import sys, os, re +from distutils.core import setup, Command +from distutils.extension import Extension +from distutils.errors import * +from os.path import exists, abspath, dirname, join, isdir + +OFFICIAL_BUILD = 9999 + +def main(): + + version_str, version = get_version() + + files = [ 'pyodbcmodule.cpp', 'cursor.cpp', 'row.cpp', 'connection.cpp', 'buffer.cpp', 'params.cpp', 'errors.cpp', 'getdata.cpp' ] + files = [ join('src', f) for f in files ] + libraries = [] + + extra_compile_args = None + extra_link_args = None + + if os.name == 'nt': + # Windows native + files.append(join('src', 'pyodbc.rc')) + libraries.append('odbc32') + extra_compile_args = [ '/W4' ] + + # Add debugging symbols + extra_compile_args = [ '/W4', '/Zi', '/Od' ] + extra_link_args = [ '/DEBUG' ] + + elif os.environ.get("OS", '').lower().startswith('windows'): + # Windows Cygwin (posix on windows) + # OS name not windows, but still on Windows + libraries.append('odbc32') + + elif sys.platform == 'darwin': + # OS/X now ships with iODBC. + libraries.append('iodbc') + + else: + # Other posix-like: Linux, Solaris, etc. + # What is the proper way to detect iODBC, MyODBC, unixODBC, etc.? + libraries.append('odbc') + + if exists('MANIFEST'): + os.remove('MANIFEST') + + setup (name = "pyodbc", + version = version_str, + description = "DB API Module for ODBC", + + long_description = ('A Python DB API 2 module for ODBC. This project provides an up-to-date, ' + 'convenient interface to ODBC using native data types like datetime and decimal.'), + + maintainer = "Michael Kleehammer", + maintainer_email = "michael@kleehammer.com", + + ext_modules = [ Extension('pyodbc', files, + libraries=libraries, + define_macros = [ ('PYODBC_%s' % name, value) for name,value in zip(['MAJOR', 'MINOR', 'MICRO', 'BUILD'], version) ], + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args + ) ], + + classifiers = [ 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: MIT License', + 'Operating System :: Microsoft :: Windows', + 'Operating System :: POSIX', + 'Programming Language :: Python', + 'Topic :: Database', + ], + + url = 'http://pyodbc.sourceforge.net', + download_url = 'http://github.com/pyodbc/pyodbc/tree/master') + + +def get_version(): + """ + Returns the version of the product as (description, [major,minor,micro,beta]). + + If the release is official, `beta` will be 9999 (OFFICIAL_BUILD). + + 1. If in a git repository, use the latest tag (git describe). + 2. If in an unzipped source directory (from setup.py sdist), + read the version from the PKG-INFO file. + 3. Use 2.1.0.0 and complain a lot. + """ + # My goal is to (1) provide accurate tags for official releases but (2) not have to manage tags for every test + # release. + # + # Official versions are tagged using 3 numbers: major, minor, micro. A build of a tagged version should produce + # the version using just these pieces, such as 2.1.4. + # + # Unofficial versions are "working towards" the next version. So the next unofficial build after 2.1.4 would be a + # beta for 2.1.5. Using 'git describe' we can find out how many changes have been made after 2.1.4 and we'll use + # this count as the beta id (beta1, beta2, etc.) + # + # Since the 4 numbers are put into the Windows DLL, we want to make sure the beta versions sort *after* the + # official, so we set the final build number to 9999, but we don't show it. + + name = None # branch/feature name. Should be None for official builds. + numbers = None # The 4 integers that make up the version. + + # If this is a source release the version will have already been assigned and be in the PKG-INFO file. + + name, numbers = _get_version_pkginfo() + + # If not a source release, we should be in a git repository. Look for the latest tag. + + if not numbers: + name, numbers = _get_version_git() + + if not numbers: + print 'WARNING: Unable to determine version. Using 2.1.0.0' + name, numbers = '2.1.0-unsupported', [2,1,0,0] + + return name, numbers + + +def _get_version_pkginfo(): + filename = join(dirname(abspath(__file__)), 'PKG-INFO') + if exists(filename): + re_ver = re.compile(r'^Version: \s+ (\d+)\.(\d+)\.(\d+) (?: -beta(\d+))?', re.VERBOSE) + for line in open(filename): + match = re_ver.search(line) + if match: + name = line.split(':', 1)[1].strip() + numbers = [ int(n or 0) for n in match.groups() ] + return name, numbers + + return None, None + + +def _get_version_git(): + n, result = getoutput('git describe --tags') + if n: + print 'WARNING: git describe failed with: %s %s' % (n, result) + return None, None + + match = re.match(r'(\d+).(\d+).(\d+) (?: -(\d+)-g[0-9a-z]+)?', result, re.VERBOSE) + if not match: + return None, None + + numbers = [ int(n or OFFICIAL_BUILD) for n in match.groups() ] + if numbers[-1] == OFFICIAL_BUILD: + name = '%s.%s.%s' % tuple(numbers[:3]) + if numbers[-1] != OFFICIAL_BUILD: + # This is a beta of the next micro release, so increment the micro number to reflect this. + numbers[-2] += 1 + name = '%s.%s.%s-beta%s' % tuple(numbers) + return name, numbers + + + +def getoutput(cmd): + pipe = os.popen(cmd, 'r') + text = pipe.read().rstrip('\n') + status = pipe.close() or 0 + return status, text + +if __name__ == '__main__': + main() diff --git a/src/buffer.cpp b/src/buffer.cpp new file mode 100644 index 00000000..e0706789 --- /dev/null +++ b/src/buffer.cpp @@ -0,0 +1,58 @@ + +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +#include "pyodbc.h" +#include "buffer.h" +#include "pyodbcmodule.h" + +Py_ssize_t +PyBuffer_GetMemory(PyObject* buffer, const char** pp) +{ + PyBufferProcs* procs = buffer->ob_type->tp_as_buffer; + + if (!procs || !PyType_HasFeature(buffer->ob_type, Py_TPFLAGS_HAVE_GETCHARBUFFER)) + { + // Can't access the memory directly because the buffer object doesn't support it. + return -1; + } + + if (procs->bf_getsegcount(buffer, 0) != 1) + { + // Can't access the memory directly because there is more than one segment. + return -1; + } + +#if PY_VERSION_HEX >= 0x02050000 + char* pT = 0; +#else + const char* pT = 0; +#endif + Py_ssize_t cb = procs->bf_getcharbuffer(buffer, 0, &pT); + + if (pp) + *pp = pT; + + return cb; +} + +Py_ssize_t +PyBuffer_Size(PyObject* self) +{ + if (!PyBuffer_Check(self)) + { + PyErr_SetString(PyExc_TypeError, "Not a buffer!"); + return 0; + } + + Py_ssize_t total_len = 0; + self->ob_type->tp_as_buffer->bf_getsegcount(self, &total_len); + return total_len; +} diff --git a/src/buffer.h b/src/buffer.h new file mode 100644 index 00000000..a8c94e25 --- /dev/null +++ b/src/buffer.h @@ -0,0 +1,55 @@ + +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +#ifndef _BUFFER_H +#define _BUFFER_H + +// If the buffer object has a single, accessible segment, returns the length of the buffer. If 'pp' is not NULL, the +// address of the segment is also returned. If there is more than one segment or if it cannot be accessed, -1 is +// returned and 'pp' is not modified. +Py_ssize_t +PyBuffer_GetMemory(PyObject* buffer, const char** pp); + +// Returns the size of a Python buffer. +// +// If an error occurs, zero is returned, but zero is a valid buffer size (I guess), so use PyErr_Occurred to determine +// if it represents a failure. +Py_ssize_t +PyBuffer_Size(PyObject* self); + + +class BufferSegmentIterator +{ + PyObject* pBuffer; + Py_ssize_t iSegment; + Py_ssize_t cSegments; + +public: + BufferSegmentIterator(PyObject* _pBuffer) + { + pBuffer = _pBuffer; + PyBufferProcs* procs = pBuffer->ob_type->tp_as_buffer; + iSegment = 0; + cSegments = procs->bf_getsegcount(pBuffer, 0); + } + + bool Next(byte*& pb, SQLLEN &cb) + { + if (iSegment >= cSegments) + return false; + + PyBufferProcs* procs = pBuffer->ob_type->tp_as_buffer; + cb = procs->bf_getreadbuffer(pBuffer, iSegment++, (void**)&pb); + return true; + } +}; + +#endif diff --git a/src/connection.cpp b/src/connection.cpp new file mode 100644 index 00000000..a5de31e4 --- /dev/null +++ b/src/connection.cpp @@ -0,0 +1,751 @@ + +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +#include "pyodbc.h" +#include "connection.h" +#include "cursor.h" +#include "pyodbcmodule.h" +#include "errors.h" + +static char connection_doc[] = + "Connection objects manage connections to the database.\n" + "\n" + "Each manages a single ODBC HDBC."; + +static Connection* +Connection_Validate(PyObject* self) +{ + Connection* cnxn; + + if (self == 0 || !Connection_Check(self)) + { + PyErr_SetString(PyExc_TypeError, "Connection object required"); + return 0; + } + + cnxn = (Connection*)self; + + if (cnxn->hdbc == SQL_NULL_HANDLE) + { + PyErr_SetString(ProgrammingError, "Attempt to use a closed connection."); + return 0; + } + + return cnxn; +} + +static bool Connect(PyObject* pConnectString, HDBC hdbc, bool fAnsi) +{ + // This should have been checked by the global connect function. + I(PyString_Check(pConnectString) || PyUnicode_Check(pConnectString)); + + const int cchMax = 600; + + if (PySequence_Length(pConnectString) >= cchMax) + { + PyErr_SetString(PyExc_TypeError, "connection string too long"); + return false; + } + + // The driver manager determines if the app is a Unicode app based on whether we call SQLDriverConnectA or + // SQLDriverConnectW. Some drivers, notably Microsoft Access/Jet, change their behavior based on this, so we try + // the Unicode version first. (The Access driver only supports Unicode text, but SQLDescribeCol returns SQL_CHAR + // instead of SQL_WCHAR if we connect with the ANSI version. Obviously this causes lots of errors since we believe + // what it tells us (SQL_CHAR).) + + // Python supports only UCS-2 and UCS-4, so we shouldn't need to worry about receiving surrogate pairs. However, + // Windows does use UCS-16, so it is possible something would be misinterpreted as one. We may need to examine + // this more. + + SQLRETURN ret; + + if (!fAnsi) + { + SQLWCHAR szConnectW[cchMax]; + if (PyUnicode_Check(pConnectString)) + { + Py_UNICODE* p = PyUnicode_AS_UNICODE(pConnectString); + for (int i = 0, c = PyUnicode_GET_SIZE(pConnectString); i <= c; i++) + szConnectW[i] = (wchar_t)p[i]; + } + else + { + const char* p = PyString_AS_STRING(pConnectString); + for (int i = 0, c = PyString_GET_SIZE(pConnectString); i <= c; i++) + szConnectW[i] = (wchar_t)p[i]; + } + + Py_BEGIN_ALLOW_THREADS + ret = SQLDriverConnectW(hdbc, 0, szConnectW, SQL_NTS, 0, 0, 0, SQL_DRIVER_NOPROMPT); + Py_END_ALLOW_THREADS + if (SQL_SUCCEEDED(ret)) + return true; + + // The Unicode function failed. If the error is that the driver doesn't have a Unicode version (IM001), continue + // to the ANSI version. + + PyObject* error = GetErrorFromHandle("SQLDriverConnectW", hdbc, SQL_NULL_HANDLE); + if (!HasSqlState(error, "IM001")) + { + PyErr_SetObject(PyObject_Type(error), error); + return false; + } + Py_XDECREF(error); + } + + SQLCHAR szConnect[cchMax]; + if (PyUnicode_Check(pConnectString)) + { + Py_UNICODE* p = PyUnicode_AS_UNICODE(pConnectString); + for (int i = 0, c = PyUnicode_GET_SIZE(pConnectString); i <= c; i++) + { + if (p[i] > 0xFF) + { + PyErr_SetString(PyExc_TypeError, "A Unicode connection string was supplied but the driver does " + "not have a Unicode connect function"); + return false; + } + szConnect[i] = (char)p[i]; + } + } + else + { + const char* p = PyString_AS_STRING(pConnectString); + memcpy(szConnect, p, PyString_GET_SIZE(pConnectString) + 1); + } + + Py_BEGIN_ALLOW_THREADS + ret = SQLDriverConnect(hdbc, 0, szConnect, SQL_NTS, 0, 0, 0, SQL_DRIVER_NOPROMPT); + Py_END_ALLOW_THREADS + if (SQL_SUCCEEDED(ret)) + return true; + + RaiseErrorFromHandle("SQLDriverConnect", hdbc, SQL_NULL_HANDLE); + + return false; +} + + +PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi) +{ + // pConnectString + // A string or unicode object. (This must be checked by the caller.) + // + // fAnsi + // If true, do not attempt a Unicode connection. + + // + // Allocate HDBC and connect + // + + HDBC hdbc = SQL_NULL_HANDLE; + if (!SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_DBC, henv, &hdbc))) + return RaiseErrorFromHandle("SQLAllocHandle", SQL_NULL_HANDLE, SQL_NULL_HANDLE); + + if (!Connect(pConnectString, hdbc, fAnsi)) + { + // Connect has already set an exception. + SQLFreeHandle(SQL_HANDLE_DBC, hdbc); + return 0; + } + + // + // Connected, so allocate the Connection object. + // + + // Set all variables to something valid, so we don't crash in dealloc if this function fails. + + Connection* cnxn = PyObject_NEW(Connection, &ConnectionType); + + if (cnxn == 0) + { + SQLFreeHandle(SQL_HANDLE_DBC, hdbc); + return 0; + } + + cnxn->hdbc = hdbc; + cnxn->searchescape = 0; + cnxn->odbc_major = 3; + cnxn->odbc_minor = 50; + cnxn->nAutoCommit = fAutoCommit ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF; + cnxn->supports_describeparam = false; + cnxn->datetime_precision = 19; // default: "yyyy-mm-dd hh:mm:ss" + + // + // Initialize autocommit mode. + // + + // The DB API says we have to default to manual-commit, but ODBC defaults to auto-commit. We also provide a + // keyword parameter that allows the user to override the DB API and force us to start in auto-commit (in which + // case we don't have to do anything). + + if (fAutoCommit == false && !SQL_SUCCEEDED(SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)cnxn->nAutoCommit, SQL_IS_UINTEGER))) + { + RaiseErrorFromHandle("SQLSetConnnectAttr(SQL_ATTR_AUTOCOMMIT)", cnxn->hdbc, SQL_NULL_HANDLE); + Py_DECREF(cnxn); + return 0; + } + +#ifdef TRACE_ALL + printf("cnxn.new cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); +#endif + + // + // Gather connection-level information we'll need later. + // + + // FUTURE: Measure performance here. Consider caching by connection string if necessary. + + char szVer[20]; + SQLSMALLINT cch = 0; + if (SQL_SUCCEEDED(SQLGetInfo(cnxn->hdbc, SQL_DRIVER_ODBC_VER, szVer, _countof(szVer), &cch))) + { + char* dot = strchr(szVer, '.'); + if (dot) + { + *dot = '\0'; + cnxn->odbc_major=(char)atoi(szVer); + cnxn->odbc_minor=(char)atoi(dot + 1); + } + } + + char szYN[2]; + if (SQL_SUCCEEDED(SQLGetInfo(cnxn->hdbc, SQL_DESCRIBE_PARAMETER, szYN, _countof(szYN), &cch))) + { + cnxn->supports_describeparam = szYN[0] == 'Y'; + } + + // What is the datetime precision? This unfortunately requires a cursor (HSTMT). + + HSTMT hstmt = 0; + if (SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_STMT, cnxn->hdbc, &hstmt))) + { + if (SQL_SUCCEEDED(SQLGetTypeInfo(hstmt, SQL_TYPE_TIMESTAMP)) && SQL_SUCCEEDED(SQLFetch(hstmt))) + { + SQLINTEGER columnsize; + if (SQL_SUCCEEDED(SQLGetData(hstmt, 3, SQL_INTEGER, &columnsize, sizeof(columnsize), 0))) + { + cnxn->datetime_precision = columnsize; + } + } + + SQLFreeStmt(hstmt, SQL_CLOSE); + } + + return reinterpret_cast(cnxn); +} + + +static int +Connection_clear(Connection* cnxn) +{ + // Internal method for closing the connection. (Not called close so it isn't confused with the external close + // method.) + + if (cnxn->hdbc != SQL_NULL_HANDLE) + { + // REVIEW: Release threads? (But make sure you zero out hdbc *first*! + +#ifdef TRACE_ALL + printf("cnxn.clear cnxn=%p hdbc=%d\n", cnxn, cnxn->hdbc); +#endif + + if (cnxn->nAutoCommit == SQL_AUTOCOMMIT_OFF) + SQLEndTran(SQL_HANDLE_DBC, cnxn->hdbc, SQL_ROLLBACK); + SQLDisconnect(cnxn->hdbc); + SQLFreeHandle(SQL_HANDLE_DBC, cnxn->hdbc); + cnxn->hdbc = SQL_NULL_HANDLE; + } + + Py_XDECREF(cnxn->searchescape); + cnxn->searchescape = 0; + + return 0; +} + +static void +Connection_dealloc(PyObject* self) +{ + Connection* cnxn = (Connection*)self; + Connection_clear(cnxn); + PyObject_Del(self); +} + +static char close_doc[] = + "Close the connection now (rather than whenever __del__ is called).\n" + "\n" + "The connection will be unusable from this point forward and a ProgrammingError\n" + "will be raised if any operation is attempted with the connection. The same\n" + "applies to all cursor objects trying to use the connection.\n" + "\n" + "Note that closing a connection without committing the changes first will cause\n" + "an implicit rollback to be performed."; + +static PyObject* +Connection_close(PyObject* self, PyObject* args) +{ + UNUSED(args); + + Connection* cnxn = Connection_Validate(self); + if (!cnxn) + return 0; + + Connection_clear(cnxn); + + Py_RETURN_NONE; +} + +static PyObject* +Connection_cursor(PyObject* self, PyObject* args) +{ + UNUSED(args); + + Connection* cnxn = Connection_Validate(self); + if (!cnxn) + return 0; + + return (PyObject*)Cursor_New(cnxn); +} + +static PyObject* +Connection_execute(PyObject* self, PyObject* args) +{ + PyObject* result = 0; + + Cursor* cursor; + Connection* cnxn = Connection_Validate(self); + + if (!cnxn) + return 0; + + cursor = Cursor_New(cnxn); + if (!cursor) + return 0; + + result = Cursor_execute((PyObject*)cursor, args); + + Py_DECREF((PyObject*)cursor); + + return result; +} + +enum +{ + GI_YESNO, + GI_STRING, + GI_UINTEGER, + GI_USMALLINT, +}; + +struct GetInfoType +{ + SQLUSMALLINT infotype; + int datatype; // GI_XXX +}; + +static const GetInfoType aInfoTypes[] = { + { SQL_ACCESSIBLE_PROCEDURES, GI_YESNO }, + { SQL_ACCESSIBLE_TABLES, GI_YESNO }, + { SQL_ACTIVE_ENVIRONMENTS, GI_USMALLINT }, + { SQL_AGGREGATE_FUNCTIONS, GI_UINTEGER }, + { SQL_ALTER_DOMAIN, GI_UINTEGER }, + { SQL_ALTER_TABLE, GI_UINTEGER }, + { SQL_ASYNC_MODE, GI_UINTEGER }, + { SQL_BATCH_ROW_COUNT, GI_UINTEGER }, + { SQL_BATCH_SUPPORT, GI_UINTEGER }, + { SQL_BOOKMARK_PERSISTENCE, GI_UINTEGER }, + { SQL_CATALOG_LOCATION, GI_USMALLINT }, + { SQL_CATALOG_NAME, GI_YESNO }, + { SQL_CATALOG_NAME_SEPARATOR, GI_STRING }, + { SQL_CATALOG_TERM, GI_STRING }, + { SQL_CATALOG_USAGE, GI_UINTEGER }, + { SQL_COLLATION_SEQ, GI_STRING }, + { SQL_COLUMN_ALIAS, GI_YESNO }, + { SQL_CONCAT_NULL_BEHAVIOR, GI_USMALLINT }, + { SQL_CONVERT_FUNCTIONS, GI_UINTEGER }, + { SQL_CONVERT_VARCHAR, GI_UINTEGER }, + { SQL_CORRELATION_NAME, GI_USMALLINT }, + { SQL_CREATE_ASSERTION, GI_UINTEGER }, + { SQL_CREATE_CHARACTER_SET, GI_UINTEGER }, + { SQL_CREATE_COLLATION, GI_UINTEGER }, + { SQL_CREATE_DOMAIN, GI_UINTEGER }, + { SQL_CREATE_SCHEMA, GI_UINTEGER }, + { SQL_CREATE_TABLE, GI_UINTEGER }, + { SQL_CREATE_TRANSLATION, GI_UINTEGER }, + { SQL_CREATE_VIEW, GI_UINTEGER }, + { SQL_CURSOR_COMMIT_BEHAVIOR, GI_USMALLINT }, + { SQL_CURSOR_ROLLBACK_BEHAVIOR, GI_USMALLINT }, + { SQL_DATABASE_NAME, GI_STRING }, + { SQL_DATA_SOURCE_NAME, GI_STRING }, + { SQL_DATA_SOURCE_READ_ONLY, GI_YESNO }, + { SQL_DATETIME_LITERALS, GI_UINTEGER }, + { SQL_DBMS_NAME, GI_STRING }, + { SQL_DBMS_VER, GI_STRING }, + { SQL_DDL_INDEX, GI_UINTEGER }, + { SQL_DEFAULT_TXN_ISOLATION, GI_UINTEGER }, + { SQL_DESCRIBE_PARAMETER, GI_YESNO }, + { SQL_DM_VER, GI_STRING }, + { SQL_DRIVER_NAME, GI_STRING }, + { SQL_DRIVER_ODBC_VER, GI_STRING }, + { SQL_DRIVER_VER, GI_STRING }, + { SQL_DROP_ASSERTION, GI_UINTEGER }, + { SQL_DROP_CHARACTER_SET, GI_UINTEGER }, + { SQL_DROP_COLLATION, GI_UINTEGER }, + { SQL_DROP_DOMAIN, GI_UINTEGER }, + { SQL_DROP_SCHEMA, GI_UINTEGER }, + { SQL_DROP_TABLE, GI_UINTEGER }, + { SQL_DROP_TRANSLATION, GI_UINTEGER }, + { SQL_DROP_VIEW, GI_UINTEGER }, + { SQL_DYNAMIC_CURSOR_ATTRIBUTES1, GI_UINTEGER }, + { SQL_DYNAMIC_CURSOR_ATTRIBUTES2, GI_UINTEGER }, + { SQL_EXPRESSIONS_IN_ORDERBY, GI_YESNO }, + { SQL_FILE_USAGE, GI_USMALLINT }, + { SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1, GI_UINTEGER }, + { SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2, GI_UINTEGER }, + { SQL_GETDATA_EXTENSIONS, GI_UINTEGER }, + { SQL_GROUP_BY, GI_USMALLINT }, + { SQL_IDENTIFIER_CASE, GI_USMALLINT }, + { SQL_IDENTIFIER_QUOTE_CHAR, GI_STRING }, + { SQL_INDEX_KEYWORDS, GI_UINTEGER }, + { SQL_INFO_SCHEMA_VIEWS, GI_UINTEGER }, + { SQL_INSERT_STATEMENT, GI_UINTEGER }, + { SQL_INTEGRITY, GI_YESNO }, + { SQL_KEYSET_CURSOR_ATTRIBUTES1, GI_UINTEGER }, + { SQL_KEYSET_CURSOR_ATTRIBUTES2, GI_UINTEGER }, + { SQL_KEYWORDS, GI_STRING }, + { SQL_LIKE_ESCAPE_CLAUSE, GI_YESNO }, + { SQL_MAX_ASYNC_CONCURRENT_STATEMENTS, GI_UINTEGER }, + { SQL_MAX_BINARY_LITERAL_LEN, GI_UINTEGER }, + { SQL_MAX_CATALOG_NAME_LEN, GI_USMALLINT }, + { SQL_MAX_CHAR_LITERAL_LEN, GI_UINTEGER }, + { SQL_MAX_COLUMNS_IN_GROUP_BY, GI_USMALLINT }, + { SQL_MAX_COLUMNS_IN_INDEX, GI_USMALLINT }, + { SQL_MAX_COLUMNS_IN_ORDER_BY, GI_USMALLINT }, + { SQL_MAX_COLUMNS_IN_SELECT, GI_USMALLINT }, + { SQL_MAX_COLUMNS_IN_TABLE, GI_USMALLINT }, + { SQL_MAX_COLUMN_NAME_LEN, GI_USMALLINT }, + { SQL_MAX_CONCURRENT_ACTIVITIES, GI_USMALLINT }, + { SQL_MAX_CURSOR_NAME_LEN, GI_USMALLINT }, + { SQL_MAX_DRIVER_CONNECTIONS, GI_USMALLINT }, + { SQL_MAX_IDENTIFIER_LEN, GI_USMALLINT }, + { SQL_MAX_INDEX_SIZE, GI_UINTEGER }, + { SQL_MAX_PROCEDURE_NAME_LEN, GI_USMALLINT }, + { SQL_MAX_ROW_SIZE, GI_UINTEGER }, + { SQL_MAX_ROW_SIZE_INCLUDES_LONG, GI_YESNO }, + { SQL_MAX_SCHEMA_NAME_LEN, GI_USMALLINT }, + { SQL_MAX_STATEMENT_LEN, GI_UINTEGER }, + { SQL_MAX_TABLES_IN_SELECT, GI_USMALLINT }, + { SQL_MAX_TABLE_NAME_LEN, GI_USMALLINT }, + { SQL_MAX_USER_NAME_LEN, GI_USMALLINT }, + { SQL_MULTIPLE_ACTIVE_TXN, GI_YESNO }, + { SQL_MULT_RESULT_SETS, GI_YESNO }, + { SQL_NEED_LONG_DATA_LEN, GI_YESNO }, + { SQL_NON_NULLABLE_COLUMNS, GI_USMALLINT }, + { SQL_NULL_COLLATION, GI_USMALLINT }, + { SQL_NUMERIC_FUNCTIONS, GI_UINTEGER }, + { SQL_ODBC_INTERFACE_CONFORMANCE, GI_UINTEGER }, + { SQL_ODBC_VER, GI_STRING }, + { SQL_OJ_CAPABILITIES, GI_UINTEGER }, + { SQL_ORDER_BY_COLUMNS_IN_SELECT, GI_YESNO }, + { SQL_PARAM_ARRAY_ROW_COUNTS, GI_UINTEGER }, + { SQL_PARAM_ARRAY_SELECTS, GI_UINTEGER }, + { SQL_PROCEDURES, GI_YESNO }, + { SQL_PROCEDURE_TERM, GI_STRING }, + { SQL_QUOTED_IDENTIFIER_CASE, GI_USMALLINT }, + { SQL_ROW_UPDATES, GI_YESNO }, + { SQL_SCHEMA_TERM, GI_STRING }, + { SQL_SCHEMA_USAGE, GI_UINTEGER }, + { SQL_SCROLL_OPTIONS, GI_UINTEGER }, + { SQL_SEARCH_PATTERN_ESCAPE, GI_STRING }, + { SQL_SERVER_NAME, GI_STRING }, + { SQL_SPECIAL_CHARACTERS, GI_STRING }, + { SQL_SQL92_DATETIME_FUNCTIONS, GI_UINTEGER }, + { SQL_SQL92_FOREIGN_KEY_DELETE_RULE, GI_UINTEGER }, + { SQL_SQL92_FOREIGN_KEY_UPDATE_RULE, GI_UINTEGER }, + { SQL_SQL92_GRANT, GI_UINTEGER }, + { SQL_SQL92_NUMERIC_VALUE_FUNCTIONS, GI_UINTEGER }, + { SQL_SQL92_PREDICATES, GI_UINTEGER }, + { SQL_SQL92_RELATIONAL_JOIN_OPERATORS, GI_UINTEGER }, + { SQL_SQL92_REVOKE, GI_UINTEGER }, + { SQL_SQL92_ROW_VALUE_CONSTRUCTOR, GI_UINTEGER }, + { SQL_SQL92_STRING_FUNCTIONS, GI_UINTEGER }, + { SQL_SQL92_VALUE_EXPRESSIONS, GI_UINTEGER }, + { SQL_SQL_CONFORMANCE, GI_UINTEGER }, + { SQL_STANDARD_CLI_CONFORMANCE, GI_UINTEGER }, + { SQL_STATIC_CURSOR_ATTRIBUTES1, GI_UINTEGER }, + { SQL_STATIC_CURSOR_ATTRIBUTES2, GI_UINTEGER }, + { SQL_STRING_FUNCTIONS, GI_UINTEGER }, + { SQL_SUBQUERIES, GI_UINTEGER }, + { SQL_SYSTEM_FUNCTIONS, GI_UINTEGER }, + { SQL_TABLE_TERM, GI_STRING }, + { SQL_TIMEDATE_ADD_INTERVALS, GI_UINTEGER }, + { SQL_TIMEDATE_DIFF_INTERVALS, GI_UINTEGER }, + { SQL_TIMEDATE_FUNCTIONS, GI_UINTEGER }, + { SQL_TXN_CAPABLE, GI_USMALLINT }, + { SQL_TXN_ISOLATION_OPTION, GI_UINTEGER }, + { SQL_UNION, GI_UINTEGER }, + { SQL_USER_NAME, GI_STRING }, + { SQL_XOPEN_CLI_YEAR, GI_STRING }, +}; + +static PyObject* +Connection_getinfo(PyObject* self, PyObject* args) +{ + Connection* cnxn = Connection_Validate(self); + if (!cnxn) + return 0; + + SQLUSMALLINT infotype; + if (!PyArg_ParseTuple(args, "l", &infotype)) + return 0; + + unsigned int i = 0; + for (; i < _countof(aInfoTypes); i++) + { + if (aInfoTypes[i].infotype == infotype) + break; + } + + if (i == _countof(aInfoTypes)) + return RaiseErrorV(0, ProgrammingError, "Invalid getinfo value: %d", infotype); + + char szBuffer[0x1000]; + SQLSMALLINT cch = 0; + + if (!SQL_SUCCEEDED(SQLGetInfo(cnxn->hdbc, infotype, szBuffer, sizeof(szBuffer), &cch))) + { + RaiseErrorFromHandle("SQLGetInfo", cnxn->hdbc, SQL_NULL_HANDLE); + return 0; + } + + PyObject* result = 0; + + switch (aInfoTypes[i].datatype) + { + case GI_YESNO: + result = (szBuffer[0] == 'Y') ? Py_True : Py_False; + Py_INCREF(result); + break; + + case GI_STRING: + result = PyString_FromStringAndSize(szBuffer, (Py_ssize_t)cch); + break; + + case GI_UINTEGER: + { + SQLUINTEGER n = *(SQLUINTEGER*)szBuffer; // Does this work on PPC or do we need a union? + if (n <= (SQLUINTEGER)PyInt_GetMax()) + result = PyInt_FromLong((long)n); + else + result = PyLong_FromUnsignedLong(n); + break; + } + + case GI_USMALLINT: + result = PyInt_FromLong(*(SQLUSMALLINT*)szBuffer); + break; + } + + return result; +} + + +static PyObject* +Connection_endtrans(PyObject* self, PyObject* args, SQLSMALLINT type) +{ + UNUSED(args); + + Connection* cnxn = Connection_Validate(self); + if (!cnxn) + return 0; + +#ifdef TRACE_ALL + printf("%s: cnxn=%p hdbc=%d\n", (type == SQL_COMMIT) ? "commit" : "rollback", cnxn, cnxn->hdbc); +#endif + + if (!SQL_SUCCEEDED(SQLEndTran(SQL_HANDLE_DBC, cnxn->hdbc, type))) + { + RaiseErrorFromHandle("SQLEndTran", cnxn->hdbc, SQL_NULL_HANDLE); + return 0; + } + + Py_RETURN_NONE; +} + +static PyObject* +Connection_commit(PyObject* self, PyObject* args) +{ + return Connection_endtrans(self, args, SQL_COMMIT); +} + +static PyObject* +Connection_rollback(PyObject* self, PyObject* args) +{ + return Connection_endtrans(self, args, SQL_ROLLBACK); +} + +static char cursor_doc[] = + "Return a new Cursor Object using the connection."; + +static char execute_doc[] = + "execute(sql, [params]) --> None | Cursor | count\n" \ + "\n" \ + "Creates a new Cursor object, calls its execute method, and returns its return\n" \ + "value. See Cursor.execute for a description of the parameter formats and\n" \ + "return values.\n" \ + "\n" \ + "This is a convenience method that is not part of the DB API. Since a new\n" \ + "Cursor is allocated by each call, this should not be used if more than one SQL\n" \ + "statement needs to be executed."; + +static char commit_doc[] = + "Commit any pending transaction to the database."; + +static char rollback_doc[] = + "Causes the the database to roll back to the start of any pending transaction."; + +static char getinfo_doc[] = + "getinfo(type) --> str | int | bool\n" + "\n" + "Calls SQLGetInfo, passing `type`, and returns the result formatted as a Python object."; + + +PyObject* +Connection_getautocommit(PyObject* self, void* closure) +{ + UNUSED(closure); + + Connection* cnxn = Connection_Validate(self); + if (!cnxn) + return 0; + + PyObject* result = (cnxn->nAutoCommit == SQL_AUTOCOMMIT_ON) ? Py_True : Py_False; + Py_INCREF(result); + return result; +} + +static int +Connection_setautocommit(PyObject* self, PyObject* value, void* closure) +{ + UNUSED(closure); + + Connection* cnxn = Connection_Validate(self); + if (!cnxn) + return -1; + + if (value == 0) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the autocommit attribute."); + return -1; + } + + int nAutoCommit = PyObject_IsTrue(value) ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF; + if (!SQL_SUCCEEDED(SQLSetConnectAttr(cnxn->hdbc, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)nAutoCommit, SQL_IS_UINTEGER))) + { + RaiseErrorFromHandle("SQLSetConnectAttr", cnxn->hdbc, SQL_NULL_HANDLE); + return -1; + } + + cnxn->nAutoCommit = nAutoCommit; + + return 0; +} + + +PyObject* +Connection_getsearchescape(Connection* self, void* closure) +{ + UNUSED(closure); + + if (!self->searchescape) + { + char sz[8] = { 0 }; + SQLSMALLINT cch = 0; + + if (!SQL_SUCCEEDED(SQLGetInfo(self->hdbc, SQL_SEARCH_PATTERN_ESCAPE, &sz, _countof(sz), &cch))) + return RaiseErrorFromHandle("SQLGetInfo", self->hdbc, SQL_NULL_HANDLE); + + self->searchescape = PyString_FromStringAndSize(sz, (Py_ssize_t)cch); + } + + Py_INCREF(self->searchescape); + return self->searchescape; +} + +static struct PyMethodDef Connection_methods[] = +{ + { "cursor", (PyCFunction)Connection_cursor, METH_NOARGS, cursor_doc }, + { "close", (PyCFunction)Connection_close, METH_NOARGS, close_doc }, + { "execute", (PyCFunction)Connection_execute, METH_VARARGS, execute_doc }, + { "commit", (PyCFunction)Connection_commit, METH_NOARGS, commit_doc }, + { "rollback", (PyCFunction)Connection_rollback, METH_NOARGS, rollback_doc }, + { "getinfo", (PyCFunction)Connection_getinfo, METH_VARARGS, getinfo_doc }, + { 0, 0, 0, 0 } +}; + +static PyGetSetDef Connection_getseters[] = { + { "searchescape", (getter)Connection_getsearchescape, 0, + "The ODBC search pattern escape character, as returned by\n" + "SQLGetInfo(SQL_SEARCH_PATTERN_ESCAPE). These are driver specific.", 0 }, + { "autocommit", Connection_getautocommit, Connection_setautocommit, + "Returns True if the connection is in autocommit mode; False otherwise.", 0 }, + { 0 } +}; + +PyTypeObject ConnectionType = +{ + PyObject_HEAD_INIT(0) + 0, // ob_size + "pyodbc.Connection", // tp_name + sizeof(Connection), // tp_basicsize + 0, // tp_itemsize + (destructor)Connection_dealloc, // destructor tp_dealloc + 0, // tp_print + 0, // tp_getattr + 0, // tp_setattr + 0, // tp_compare + 0, // tp_repr + 0, // tp_as_number + 0, // tp_as_sequence + 0, // tp_as_mapping + 0, // tp_hash + 0, // tp_call + 0, // tp_str + 0, // tp_getattro + 0, // tp_setattro + 0, // tp_as_buffer + Py_TPFLAGS_DEFAULT, // tp_flags + connection_doc, // tp_doc + 0, // tp_traverse + 0, // tp_clear + 0, // tp_richcompare + 0, // tp_weaklistoffset + 0, // tp_iter + 0, // tp_iternext + Connection_methods, // tp_methods + 0, // tp_members + Connection_getseters, // tp_getset + 0, // tp_base + 0, // tp_dict + 0, // tp_descr_get + 0, // tp_descr_set + 0, // tp_dictoffset + 0, // tp_init + 0, // tp_alloc + 0, // tp_new + 0, // tp_free + 0, // tp_is_gc + 0, // tp_bases + 0, // tp_mro + 0, // tp_cache + 0, // tp_subclasses + 0, // tp_weaklist +}; diff --git a/src/connection.h b/src/connection.h new file mode 100644 index 00000000..baab7e30 --- /dev/null +++ b/src/connection.h @@ -0,0 +1,55 @@ + +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS + * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef CONNECTION_H +#define CONNECTION_H + +struct Cursor; + +extern PyTypeObject ConnectionType; + +struct Connection +{ + PyObject_HEAD + + // Set to SQL_NULL_HANDLE when the connection is closed. + HDBC hdbc; + + // Will be SQL_AUTOCOMMIT_ON or SQL_AUTOCOMMIT_OFF. + int nAutoCommit; + + // The ODBC version the driver supports, from SQLGetInfo(DRIVER_ODBC_VER). This is set after connecting. + char odbc_major; + char odbc_minor; + + // The escape character from SQLGetInfo. This is not initialized until requested, so this may be zero! + PyObject* searchescape; + + // Will be true if SQLDescribeParam is supported. If false, we'll have to guess but the user will not be able + // to insert NULLs into binary columns. + bool supports_describeparam; + + // The column size of datetime columns, obtained from SQLGetInfo(), used to determine the datetime precision. + int datetime_precision; +}; + +#define Connection_Check(op) PyObject_TypeCheck(op, &ConnectionType) +#define Connection_CheckExact(op) ((op)->ob_type == &ConnectionType) + +/* + * Used by the module's connect function to create new connection objects. If unable to connect to the database, an + * exception is set and zero is returned. + */ +PyObject* Connection_New(PyObject* pConnectString, bool fAutoCommit, bool fAnsi); + +#endif diff --git a/src/cursor.cpp b/src/cursor.cpp new file mode 100644 index 00000000..2f095d81 --- /dev/null +++ b/src/cursor.cpp @@ -0,0 +1,2046 @@ + +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Note: This project has gone from C++ (when it was ported from pypgdb) to C, back to C++ (where it will stay). If +// you are making modifications, feel free to move variable declarations from the top of functions to where they are +// actually used. + +#include "pyodbc.h" +#include "cursor.h" +#include "pyodbcmodule.h" +#include "connection.h" +#include "row.h" +#include "buffer.h" +#include "params.h" +#include "errors.h" +#include "getdata.h" + +enum +{ + CURSOR_REQUIRE_CNXN = 0x00000001, + CURSOR_REQUIRE_OPEN = 0x00000003, // includes _CNXN + CURSOR_REQUIRE_RESULTS = 0x00000007, // includes _OPEN + CURSOR_RAISE_ERROR = 0x00000010, +}; + +inline bool +StatementIsValid(Cursor* cursor) +{ + return cursor->cnxn != 0 && ((Connection*)cursor->cnxn)->hdbc != SQL_NULL_HANDLE && cursor->hstmt != SQL_NULL_HANDLE; +} + +extern PyTypeObject CursorType; + +inline bool +Cursor_Check(PyObject* o) +{ + return o != 0 && o->ob_type == &CursorType; +} + + +Cursor* Cursor_Validate(PyObject* obj, DWORD flags) +{ + // Validates that a PyObject is a Cursor (like Cursor_Check) and optionally some other requirements controlled by + // `flags`. If valid and all requirements (from the flags) are met, the cursor is returned, cast to Cursor*. + // Otherwise zero is returned. + // + // Designed to be used at the top of methods to convert the PyObject pointer and perform necessary checks. + // + // Valid flags are from the CURSOR_ enum above. Note that unless CURSOR_RAISE_ERROR is supplied, an exception + // will not be set. (When deallocating, we really don't want an exception.) + + Connection* cnxn = 0; + Cursor* cursor = 0; + + if (!Cursor_Check(obj)) + { + if (flags & CURSOR_RAISE_ERROR) + PyErr_SetString(ProgrammingError, "Invalid cursor object."); + return 0; + } + + cursor = (Cursor*)obj; + cnxn = (Connection*)cursor->cnxn; + + if (cnxn == 0) + { + if (flags & CURSOR_RAISE_ERROR) + PyErr_SetString(ProgrammingError, "Attempt to use a closed cursor."); + return 0; + } + + if (IsSet(flags, CURSOR_REQUIRE_OPEN)) + { + if (cursor->hstmt == SQL_NULL_HANDLE) + { + if (flags & CURSOR_RAISE_ERROR) + PyErr_SetString(ProgrammingError, "Attempt to use a closed cursor."); + return 0; + } + + if (cnxn->hdbc == SQL_NULL_HANDLE) + { + if (flags & CURSOR_RAISE_ERROR) + PyErr_SetString(ProgrammingError, "The cursor's connection has been closed."); + return 0; + } + } + + if (IsSet(flags, CURSOR_REQUIRE_RESULTS) && cursor->colinfos == 0) + { + if (flags & CURSOR_RAISE_ERROR) + PyErr_SetString(ProgrammingError, "No results. Previous SQL was not a query."); + return 0; + } + + return cursor; +} + + +inline bool IsNumericType(SQLSMALLINT sqltype) +{ + switch (sqltype) + { + case SQL_DECIMAL: + case SQL_NUMERIC: + case SQL_REAL: + case SQL_FLOAT: + case SQL_DOUBLE: + case SQL_SMALLINT: + case SQL_INTEGER: + case SQL_TINYINT: + case SQL_BIGINT: + return true; + } + + return false; +} + + +PyObject* +PythonTypeFromSqlType(const SQLCHAR* name, SQLSMALLINT type) +{ + // Returns a type object ('int', 'str', etc.) for the given ODBC C type. This is used to populate + // Cursor.description with the type of Python object that will be returned for each column. + // + // name + // The name of the column, only used to create error messages. + // + // type + // The ODBC C type (SQL_C_CHAR, etc.) of the column. + // + // The returned object does not have its reference count incremented! + + PyObject* pytype = 0; + + switch (type) + { + case SQL_CHAR: + case SQL_VARCHAR: + case SQL_LONGVARCHAR: + case SQL_GUID: + pytype = (PyObject*)&PyString_Type; + break; + + case SQL_DECIMAL: + case SQL_NUMERIC: + pytype = (PyObject*)decimal_type; + + case SQL_REAL: + case SQL_FLOAT: + case SQL_DOUBLE: + pytype = (PyObject*)&PyFloat_Type; + break; + + case SQL_SMALLINT: + case SQL_INTEGER: + case SQL_TINYINT: + pytype = (PyObject*)&PyInt_Type; + break; + + case SQL_TYPE_DATE: + pytype = (PyObject*)PyDateTimeAPI->DateType; + break; + + case SQL_TYPE_TIME: + pytype = (PyObject*)PyDateTimeAPI->TimeType; + break; + + case SQL_TYPE_TIMESTAMP: + pytype = (PyObject*)PyDateTimeAPI->DateTimeType; + break; + + case SQL_BIGINT: + pytype = (PyObject*)&PyLong_Type; + break; + + case SQL_BIT: + pytype = (PyObject*)&PyBool_Type; + break; + + case SQL_BINARY: + case SQL_VARBINARY: + case SQL_LONGVARBINARY: + pytype = (PyObject*)&PyBuffer_Type; + break; + + + case SQL_WCHAR: + case SQL_WVARCHAR: + case SQL_WLONGVARCHAR: + pytype = (PyObject*)&PyUnicode_Type; + break; + + default: + return RaiseErrorV(0, 0, "ODBC data type %d is not supported. Cannot read column %s.", type, (const char*)name); + } + + Py_INCREF(pytype); + return pytype; +} + + +static bool +create_name_map(Cursor* cur, SQLSMALLINT field_count, bool lower) +{ + // Called after an execute to construct the map shared by rows. + + bool success = false; + PyObject *desc = 0, *colmap = 0, *colinfo = 0, *type = 0, *index = 0, *nullable_obj=0; + SQLRETURN ret; + + I(cur->hstmt != SQL_NULL_HANDLE && cur->colinfos != 0); + + // These are the values we expect after free_results. If this function fails, we do not modify any members, so + // they should be set to something Cursor_close can deal with. + I(cur->description == Py_None); + I(cur->map_name_to_index == 0); + + if (cur->cnxn->hdbc == SQL_NULL_HANDLE) + { + RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); + return false; + } + + desc = PyTuple_New((Py_ssize_t)field_count); + colmap = PyDict_New(); + if (!desc || !colmap) + goto done; + + for (int i = 0; i < field_count; i++) + { + SQLCHAR name[300]; + SQLSMALLINT nDataType; + SQLULEN nColSize; + SQLSMALLINT cDecimalDigits; + SQLSMALLINT nullable; + + SQLWCHAR name2[300]; + + Py_BEGIN_ALLOW_THREADS + ret = SQLDescribeCol(cur->hstmt, (SQLUSMALLINT)(i + 1), name, _countof(name), 0, &nDataType, &nColSize, &cDecimalDigits, &nullable); + ret = SQLDescribeColW(cur->hstmt, (SQLUSMALLINT)(i + 1), name2, _countof(name), 0, &nDataType, &nColSize, &cDecimalDigits, &nullable); + Py_END_ALLOW_THREADS + + if (cur->cnxn->hdbc == SQL_NULL_HANDLE) + { + // The connection was closed by another thread in the ALLOW_THREADS block above. + RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); + goto done; + } + + if (!SQL_SUCCEEDED(ret)) + { + RaiseErrorFromHandle("SQLDescribeCol", cur->cnxn->hdbc, cur->hstmt); + goto done; + } + +#ifdef TRACE_ALL + printf("Col %d: type=%d colsize=%d\n", (i+1), (int)nDataType, (int)nColSize); +#endif + if (lower) + _strlwr((char*)name); + + type = PythonTypeFromSqlType(name, nDataType); + if (!type) + goto done; + + switch (nullable) + { + case SQL_NO_NULLS: + nullable_obj = Py_False; + break; + case SQL_NULLABLE: + nullable_obj = Py_True; + break; + case SQL_NULLABLE_UNKNOWN: + default: + nullable_obj = Py_None; + break; + } + + // The Oracle ODBC driver has a bug (I call it) that it returns a data size of 0 when a numeric value is + // retrieved from a UNION: http://support.microsoft.com/?scid=kb%3Ben-us%3B236786&x=13&y=6 + // + // Unfortunately, I don't have a test system for this yet, so I'm *trying* something. (Not a good sign.) If + // the size is zero and it appears to be a numeric type, we'll try to come up with our own length using any + // other data we can get. + + if (nColSize == 0 && IsNumericType(nDataType)) + { + // I'm not sure how + if (cDecimalDigits != 0) + { + nColSize = cDecimalDigits + 3; + } + else + { + // I'm not sure if this is a good idea, but ... + nColSize = 42; + } + } + + colinfo = Py_BuildValue("(sOOiOOO)", + (char*)name, + type, // type_code + Py_None, // display size + (int)nColSize, // internal_size + Py_None, // precision + Py_None, // scale + nullable_obj); // null_ok + if (!colinfo) + goto done; + + + nullable_obj = 0; + + index = PyInt_FromLong(i); + if (!index) + goto done; + + PyDict_SetItemString(colmap, (const char*)name, index); + Py_DECREF(index); // SetItemString increments + index = 0; + + PyTuple_SET_ITEM(desc, i, colinfo); + colinfo = 0; // reference stolen by SET_ITEM + } + + Py_XDECREF(cur->description); + cur->description = desc; + desc = 0; + cur->map_name_to_index = colmap; + colmap = 0; + + success = true; + + done: + Py_XDECREF(nullable_obj); + Py_XDECREF(desc); + Py_XDECREF(colmap); + Py_XDECREF(index); + Py_XDECREF(colinfo); + + return success; +} + +enum free_results_type +{ + FREE_STATEMENT, + KEEP_STATEMENT +}; + +static bool +free_results(Cursor* self, free_results_type free_statement) +{ + // Internal function called any time we need to free the memory associated with query results. It is safe to call + // this even when a query has not been executed. + + // If we ran out of memory, it is possible that we have a cursor but colinfos is zero. However, we should be + // deleting this object, so the cursor will be freed when the HSTMT is destroyed. */ + + if (self->colinfos) + { + free(self->colinfos); + self->colinfos = 0; + } + + if (StatementIsValid(self)) + { + if (free_statement == FREE_STATEMENT) + { + SQLRETURN ret; + Py_BEGIN_ALLOW_THREADS + ret = SQLFreeStmt(self->hstmt, SQL_CLOSE); + Py_END_ALLOW_THREADS; + } + else + { + SQLRETURN ret; + Py_BEGIN_ALLOW_THREADS + ret = SQLFreeStmt(self->hstmt, SQL_UNBIND); + ret = SQLFreeStmt(self->hstmt, SQL_RESET_PARAMS); + Py_END_ALLOW_THREADS; + + } + + if (self->cnxn->hdbc == SQL_NULL_HANDLE) + { + // The connection was closed by another thread in the ALLOW_THREADS block above. + RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); + return false; + } + } + + if (self->description != Py_None) + { + Py_DECREF(self->description); + self->description = Py_None; + Py_INCREF(Py_None); + } + + if (self->map_name_to_index) + { + Py_DECREF(self->map_name_to_index); + self->map_name_to_index = 0; + } + + self->rowcount = -1; + + return true; +} + + +static void +closeimpl(Cursor* cur) +{ + // An internal function for the shared 'closing' code used by Cursor_close and Cursor_dealloc. + // + // This method releases the GIL lock while closing, so verify the HDBC still exists if you use it. + + free_results(cur, FREE_STATEMENT); + + FreeParameterInfo(cur); + FreeParameterData(cur); + + if (StatementIsValid(cur)) + { + HSTMT hstmt = cur->hstmt; + cur->hstmt = SQL_NULL_HANDLE; + Py_BEGIN_ALLOW_THREADS + SQLFreeHandle(SQL_HANDLE_STMT, hstmt); + Py_END_ALLOW_THREADS + } + + + Py_XDECREF(cur->pPreparedSQL); + Py_XDECREF(cur->description); + Py_XDECREF(cur->map_name_to_index); + Py_XDECREF(cur->cnxn); + + cur->pPreparedSQL = 0; + cur->description = 0; + cur->map_name_to_index = 0; + cur->cnxn = 0; +} + +static char close_doc[] = + "Close the cursor now (rather than whenever __del__ is called). The cursor will\n" + "be unusable from this point forward; a ProgrammingError exception will be\n" + "raised if any operation is attempted with the cursor."; + +static PyObject* +Cursor_close(PyObject* self, PyObject* args) +{ + UNUSED(args); + + Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); + if (!cursor) + return 0; + + closeimpl(cursor); + + Py_INCREF(Py_None); + return Py_None; +} + +static void +Cursor_dealloc(Cursor* cursor) +{ + if (Cursor_Validate((PyObject*)cursor, CURSOR_REQUIRE_CNXN)) + { + closeimpl(cursor); + } + + PyObject_Del(cursor); +} + + + +bool +InitColumnInfo(Cursor* cursor, SQLUSMALLINT iCol, ColumnInfo* pinfo) +{ + // Initializes ColumnInfo from result set metadata. + + SQLRETURN ret; + + // REVIEW: This line fails on OS/X with the FileMaker driver : http://www.filemaker.com/support/updaters/xdbc_odbc_mac.html + // + // I suspect the problem is that it doesn't allow NULLs in some of the parameters, so I'm going to supply them all + // to see what happens. + + SQLCHAR ColumnName[200]; + SQLSMALLINT BufferLength = _countof(ColumnName); + SQLSMALLINT NameLength = 0; + SQLSMALLINT DataType = 0; + SQLULEN ColumnSize = 0; + SQLSMALLINT DecimalDigits = 0; + SQLSMALLINT Nullable = 0; + + Py_BEGIN_ALLOW_THREADS + ret = SQLDescribeCol(cursor->hstmt, iCol, + ColumnName, + BufferLength, + &NameLength, + &DataType, + &ColumnSize, + &DecimalDigits, + &Nullable); + Py_END_ALLOW_THREADS + + pinfo->sql_type = DataType; + pinfo->column_size = ColumnSize; + + if (cursor->cnxn->hdbc == SQL_NULL_HANDLE) + { + // The connection was closed by another thread in the ALLOW_THREADS block above. + RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); + return false; + } + + if (!SQL_SUCCEEDED(ret)) + { + RaiseErrorFromHandle("SQLDescribeCol", cursor->cnxn->hdbc, cursor->hstmt); + return false; + } + + // If it is an integer type, determine if it is signed or unsigned. The buffer size is the same but we'll need to + // know when we convert to a Python integer. + + switch (pinfo->sql_type) + { + case SQL_TINYINT: + case SQL_SMALLINT: + case SQL_INTEGER: + case SQL_BIGINT: + { + SQLLEN f; + Py_BEGIN_ALLOW_THREADS + ret = SQLColAttribute(cursor->hstmt, iCol, SQL_DESC_UNSIGNED, 0, 0, 0, &f); + Py_END_ALLOW_THREADS + + if (cursor->cnxn->hdbc == SQL_NULL_HANDLE) + { + // The connection was closed by another thread in the ALLOW_THREADS block above. + RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); + return false; + } + + if (!SQL_SUCCEEDED(ret)) + { + RaiseErrorFromHandle("SQLColAttribute", cursor->cnxn->hdbc, cursor->hstmt); + return false; + } + pinfo->is_unsigned = (f == SQL_TRUE); + break; + } + + default: + pinfo->is_unsigned = false; + } + + return true; +} + + +static bool +PrepareResults(Cursor* cur, int cCols) +{ + // Called after a SELECT has been executed to perform pre-fetch work. + // + // Allocates the ColumnInfo structures describing the returned data. + + int i; + I(cur->colinfos == 0); + + cur->colinfos = (ColumnInfo*)malloc(sizeof(ColumnInfo) * cCols); + if (cur->colinfos == 0) + { + PyErr_NoMemory(); + return false; + } + + for (i = 0; i < cCols; i++) + { + if (!InitColumnInfo(cur, (SQLSMALLINT)(i + 1), &cur->colinfos[i])) + { + free(cur->colinfos); + cur->colinfos = 0; + return false; + } + } + + return true; +} + +static PyObject* +execute(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first) +{ + // Internal function to execute SQL, called by .execute and .executemany. + // + // pSql + // A PyString, PyUnicode, or derived object containing the SQL. + // + // params + // Pointer to an optional sequence of parameters, and possibly the SQL statement (see skip_first): + // (SQL, param1, param2) or (param1, param2). + // + // skip_first + // If true, the first element in `params` is ignored. (It will be the SQL statement and `params` will be the + // entire tuple passed to Cursor.execute.) Otherwise all of the params are used. (This case occurs when called + // from Cursor.executemany, in which case the sequences do not contain the SQL statement.) Ignored if params is + // zero. + + // Normalize the parameter variables. + + int params_offset = skip_first ? 1 : 0; + Py_ssize_t cParams = params == 0 ? 0 : PySequence_Length(params) - params_offset; + + SQLRETURN ret = 0; + + free_results(cur, FREE_STATEMENT); + + const char* szLastFunction = ""; + + if (cParams > 0) + { + // There are parameters, so we'll need to prepare the SQL statement and bind the parameters. (We need to + // prepare the statement because we can't bind a NULL (None) object without knowing the target datatype. There + // is no one data type that always maps to the others (no, not even varchar)). + + if (!PrepareAndBind(cur, pSql, params, skip_first)) + return 0; + + szLastFunction = "SQLExecute"; + Py_BEGIN_ALLOW_THREADS + ret = SQLExecute(cur->hstmt); + Py_END_ALLOW_THREADS + } + else + { + // REVIEW: Why don't we always prepare? It is highly unlikely that a user would need to execute the same SQL + // repeatedly if it did not have parameters, so we are not losing performance, but it would simplify the code. + + Py_XDECREF(cur->pPreparedSQL); + cur->pPreparedSQL = 0; + + szLastFunction = "SQLExecDirect"; + if (PyString_Check(pSql)) + { + Py_BEGIN_ALLOW_THREADS + ret = SQLExecDirect(cur->hstmt, (SQLCHAR*)PyString_AS_STRING(pSql), SQL_NTS); + Py_END_ALLOW_THREADS + } + else + { + Py_BEGIN_ALLOW_THREADS + ret = SQLExecDirectW(cur->hstmt, (SQLWCHAR*)PyUnicode_AsUnicode(pSql), SQL_NTS); + Py_END_ALLOW_THREADS + } + } + + if (cur->cnxn->hdbc == SQL_NULL_HANDLE) + { + // The connection was closed by another thread in the ALLOW_THREADS block above. + + FreeParameterData(cur); + + return RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); + } + + if (!SQL_SUCCEEDED(ret) && ret != SQL_NEED_DATA && ret != SQL_NO_DATA) + { + // We could try dropping through the while and if below, but if there is an error, we need to raise it before + // FreeParameterData calls more ODBC functions. + return RaiseErrorFromHandle("SQLExecDirectW", cur->cnxn->hdbc, cur->hstmt); + } + + while (ret == SQL_NEED_DATA) + { + // We have bound a `buffer` object using SQL_DATA_AT_EXEC, so ODBC is asking us for the data now. We gave the + // buffer pointer to ODBC in SQLBindParameter -- SQLParamData below gives the pointer back to us. + + szLastFunction = "SQLParamData"; + PyObject* pParam; + ret = SQLParamData(cur->hstmt, (SQLPOINTER*)&pParam); + + if (ret == SQL_NEED_DATA) + { + szLastFunction = "SQLPutData"; + if (PyBuffer_Check(pParam)) + { + // Buffers can have multiple segments, so we might need multiple writes. Looping through buffers isn't + // difficult, but we've wrapped it up in an iterator object to keep this loop simple. + + BufferSegmentIterator it(pParam); + byte* pb; + SQLLEN cb; + while (it.Next(pb, cb)) + SQLPutData(cur->hstmt, pb, cb); + } + else if (PyUnicode_Check(pParam)) + { + // REVIEW: This will fail if PyUnicode != wchar_t + Py_UNICODE* p = PyUnicode_AS_UNICODE(pParam); + SQLLEN offset = 0; + SQLLEN cb = (SQLLEN)PyUnicode_GET_SIZE(pParam); + while (offset < cb) + { + SQLLEN remaining = min(MAX_VARCHAR_BUFFER, cb - offset); + SQLPutData(cur->hstmt, &p[offset], remaining * 2); + offset += remaining; + } + } + else if (PyString_Check(pParam)) + { + const char* p = PyString_AS_STRING(pParam); + SQLLEN offset = 0; + SQLLEN cb = (SQLLEN)PyString_GET_SIZE(pParam); + while (offset < cb) + { + SQLLEN remaining = min(MAX_VARCHAR_BUFFER, cb - offset); + SQLPutData(cur->hstmt, (SQLPOINTER)&p[offset], remaining); + offset += remaining; + } + } + } + } + + FreeParameterData(cur); + + if (ret == SQL_NO_DATA) + { + // Example: A delete statement that did not delete anything. + cur->rowcount = 0; + return PyInt_FromLong(cur->rowcount); + } + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle(szLastFunction, cur->cnxn->hdbc, cur->hstmt); + + SQLLEN cRows = -1; + Py_BEGIN_ALLOW_THREADS + ret = SQLRowCount(cur->hstmt, &cRows); + Py_END_ALLOW_THREADS + + cur->rowcount = (int)cRows; + +#ifdef TRACE_ALL + printf("SQLRowCount: %d\n", cRows); +#endif + + SQLSMALLINT cCols = 0; + if (!SQL_SUCCEEDED(SQLNumResultCols(cur->hstmt, &cCols))) + { + // Note: The SQL Server driver sometimes returns HY007 here if multiple statements (separated by ;) were + // submitted. This is not documented, but I've seen it with multiple successful inserts. + + return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); + } + +#ifdef TRACE_ALL + printf("SQLNumResultCols: %d\n", cCols); +#endif + + if (cur->cnxn->hdbc == SQL_NULL_HANDLE) + { + // The connection was closed by another thread in the ALLOW_THREADS block above. + return RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); + } + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle("SQLRowCount", cur->cnxn->hdbc, cur->hstmt); + + if (cCols != 0) + { + // A result set was created. + + if (!PrepareResults(cur, cCols)) + return 0; + + if (!create_name_map(cur, cCols, lowercase())) + return 0; + + // Return the cursor so the results can be iterated over directly. + Py_INCREF(cur); + return (PyObject*)cur; + } + + return PyInt_FromLong(cur->rowcount); +} + +inline bool +IsSequence(PyObject* p) +{ + return PySequence_Check(p) && !PyString_Check(p) && !PyBuffer_Check(p) && !PyUnicode_Check(p); +} + +static char execute_doc[] = + "C.execute(sql, [params]) --> None | Cursor | count\n" + "\n" + "Prepare and execute a database query or command.\n" + "\n" + "Parameters may be provided as a sequence (as specified by the DB API) or\n" + "simply passed in one after another (non-standard):\n" + "\n" + " cursor.execute(sql, (param1, param2))\n" + "\n" + " or\n" + "\n" + " cursor.execute(sql, param1, param2)\n" + "\n" + "The return value for this method is not specified in the API, so any use is\n" + "non-standard. For convenience, the type depends on the operation performed.\n" + "A select statement will return `self` so the results can be iterated\n" + "conveniently:\n" + "\n" + " for row in cursor.execute('select * from tmp'):\n" + " print row.customer_id\n" + "\n" + "An update or delete statement will return the number of records affected as an\n" + "integer:\n" + "\n" + " count = cursor.execute('delete from tmp')\n" + "\n" + "If any other statement will return None."; + +PyObject* +Cursor_execute(PyObject* self, PyObject* args) +{ + Py_ssize_t cParams = PyTuple_Size(args) - 1; + + bool skip_first = false; + PyObject *pSql, *params = 0, *result = 0; + + Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); + if (!cursor) + return 0; + + if (cParams < 0) + { + PyErr_SetString(PyExc_TypeError, "execute() takes at least 1 argument (0 given)"); + goto done; + } + + pSql = PyTuple_GET_ITEM(args, 0); + + if (!PyString_Check(pSql) && !PyUnicode_Check(pSql)) + { + PyErr_SetString(PyExc_TypeError, "The first argument to execute must be a string or unicode query."); + goto done; + } + + // Figure out if there were parameters and how they were passed. Our optional parameter passing complicates this slightly. + + if (cParams == 1 && IsSequence(PyTuple_GET_ITEM(args, 1))) + { + // There is a single argument and it is a sequence, so we must treat it as a sequence of parameters. (This is + // the normal Cursor.execute behavior.) + + params = PyTuple_GET_ITEM(args, 1); + skip_first = false; + } + else if (cParams > 0) + { + params = args; + skip_first = true; + } + + // Execute. + + result = execute(cursor, pSql, params, skip_first); + + done: + + return result; +} + +static PyObject* +Cursor_executemany(PyObject* self, PyObject* args) +{ + Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); + if (!cursor) + return 0; + + cursor->rowcount = -1; + + PyObject *pSql, *param_seq; + if (!PyArg_ParseTuple(args, "OO", &pSql, ¶m_seq)) + return 0; + + if (!PyString_Check(pSql) && !PyUnicode_Check(pSql)) + { + PyErr_SetString(PyExc_TypeError, "The first argument to execute must be a string or unicode query."); + return 0; + } + + if (!IsSequence(param_seq)) + { + PyErr_SetString(ProgrammingError, "The second parameter to executemany must be a sequence."); + return 0; + } + + Py_ssize_t c = PySequence_Size(param_seq); + + if (c == 0) + { + PyErr_SetString(ProgrammingError, "The second parameter to executemany must not be empty."); + return 0; + } + + for (Py_ssize_t i = 0; i < c; i++) + { + PyObject* params = PySequence_GetItem(param_seq, i); + PyObject* result = execute(cursor, pSql, params, false); + bool success = result != 0; + Py_XDECREF(result); + Py_DECREF(params); + if (!success) + { + cursor->rowcount = -1; + return 0; + } + } + + cursor->rowcount = -1; + Py_RETURN_NONE; +} + + +static PyObject* +Cursor_fetch(Cursor* cur) +{ + // Internal function to fetch a single row and construct a Row object from it. Used by all of the fetching + // functions. + // + // Returns a Row object if successful. If there are no more rows, zero is returned. If an error occurs, an + // exception is set and zero is returned. (To differentiate between the last two, use PyErr_Occurred.) + + SQLRETURN ret = 0; + int field_count, i; + PyObject** apValues; + + Py_BEGIN_ALLOW_THREADS + ret = SQLFetch(cur->hstmt); + Py_END_ALLOW_THREADS + + if (cur->cnxn->hdbc == SQL_NULL_HANDLE) + { + // The connection was closed by another thread in the ALLOW_THREADS block above. + return RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); + } + + if (ret == SQL_NO_DATA) + return 0; + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle("SQLFetch", cur->cnxn->hdbc, cur->hstmt); + + field_count = PyTuple_GET_SIZE(cur->description); + + apValues = (PyObject**)malloc(sizeof(PyObject*) * field_count); + + if (apValues == 0) + return PyErr_NoMemory(); + + for (i = 0; i < field_count; i++) + { + PyObject* value = GetData(cur, i); + + if (!value) + { + FreeRowValues(i, apValues); + return 0; + } + + apValues[i] = value; + } + + return (PyObject*)Row_New(cur->description, cur->map_name_to_index, field_count, apValues); +} + + +static PyObject* +Cursor_fetchlist(Cursor* cur, Py_ssize_t max) +{ + // max + // The maximum number of rows to fetch. If -1, fetch all rows. + // + // Returns a list of Rows. If there are no rows, an empty list is returned. + + PyObject* results; + PyObject* row; + + results = PyList_New(0); + if (!results) + return 0; + + while (max == -1 || max > 0) + { + row = Cursor_fetch(cur); + + if (!row) + { + if (PyErr_Occurred()) + { + Py_DECREF(results); + return 0; + } + break; + } + + PyList_Append(results, row); + Py_DECREF(row); + + if (max != -1) + max--; + } + + return results; +} + +static PyObject* +Cursor_iter(PyObject* self) +{ + Py_INCREF(self); + return self; +} + + +static PyObject* +Cursor_iternext(PyObject* self) +{ + // Implements the iterator protocol for cursors. Fetches the next row. Returns zero without setting an exception + // when there are no rows. + + PyObject* result; + + Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); + + if (!cursor) + return 0; + + result = Cursor_fetch(cursor); + + return result; +} + +static PyObject* +Cursor_fetchone(PyObject* self, PyObject* args) +{ + UNUSED(args); + + PyObject* row; + Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); + if (!cursor) + return 0; + + row = Cursor_fetch(cursor); + + if (!row) + { + if (PyErr_Occurred()) + return 0; + Py_RETURN_NONE; + } + + return row; +} + +static PyObject* +Cursor_fetchall(PyObject* self, PyObject* args) +{ + UNUSED(args); + + PyObject* result; + Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); + if (!cursor) + return 0; + + result = Cursor_fetchlist(cursor, -1); + + return result; +} + +static PyObject* +Cursor_fetchmany(PyObject* self, PyObject* args) +{ + long rows; + PyObject* result; + + Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_RESULTS | CURSOR_RAISE_ERROR); + if (!cursor) + return 0; + + rows = cursor->arraysize; + if (!PyArg_ParseTuple(args, "|l", &rows)) + return 0; + + result = Cursor_fetchlist(cursor, rows); + + return result; +} + +static char tables_doc[] = + "C.tables(table=None, catalog=None, schema=None, tableType=None) --> self\n" + "\n" + "Executes SQLTables and creates a results set of tables defined in the data\n" + "source.\n" + "\n" + "The table, catalog, and schema interpret the '_' and '%' characters as\n" + "wildcards. The escape character is driver specific, so use\n" + "`Connection.searchescape`.\n" + "\n" + "Each row fetched has the following columns:\n" + " 0) table_cat: The catalog name.\n" + " 1) table_schem: The schema name.\n" + " 2) table_name: The table name.\n" + " 3) table_type: One of 'TABLE', 'VIEW', SYSTEM TABLE', 'GLOBAL TEMPORARY'\n" + " 'LOCAL TEMPORARY', 'ALIAS', 'SYNONYM', or a data source-specific type name."; + +char* Cursor_tables_kwnames[] = { "table", "catalog", "schema", "tableType", 0 }; + +static PyObject* +Cursor_tables(PyObject* self, PyObject* args, PyObject* kwargs) +{ + const char* szCatalog = 0; + const char* szSchema = 0; + const char* szTableName = 0; + const char* szTableType = 0; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ssss", Cursor_tables_kwnames, &szTableName, &szCatalog, &szSchema, &szTableType)) + return 0; + + Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); + + if (!free_results(cur, FREE_STATEMENT)) + return 0; + + SQLRETURN ret = 0; + + Py_BEGIN_ALLOW_THREADS + ret = SQLTables(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, + (SQLCHAR*)szTableName, SQL_NTS, (SQLCHAR*)szTableType, SQL_NTS); + Py_END_ALLOW_THREADS + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle("SQLTables", cur->cnxn->hdbc, cur->hstmt); + + SQLSMALLINT cCols; + if (!SQL_SUCCEEDED(SQLNumResultCols(cur->hstmt, &cCols))) + return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); + + if (!PrepareResults(cur, cCols)) + return 0; + + if (!create_name_map(cur, cCols, true)) + return 0; + + // Return the cursor so the results can be iterated over directly. + Py_INCREF(cur); + return (PyObject*)cur; +} + + +static char columns_doc[] = + "C.columns(table=None, catalog=None, schema=None, column=None)\n\n" + "Creates a results set of column names in specified tables by executing the ODBC SQLColumns function.\n" + "Each row fetched has the following columns:\n" + " 0) table_cat\n" + " 1) table_schem\n" + " 2) table_name\n" + " 3) column_name\n" + " 4) data_type\n" + " 5) type_name\n" + " 6) column_size\n" + " 7) buffer_length\n" + " 8) decimal_digits\n" + " 9) num_prec_radix\n" + " 10) nullable\n" + " 11) remarks\n" + " 12) column_def\n" + " 13) sql_data_type\n" + " 14) sql_datetime_sub\n" + " 15) char_octet_length\n" + " 16) ordinal_position\n" + " 17) is_nullable"; + +char* Cursor_column_kwnames[] = { "table", "catalog", "schema", "column", 0 }; + +static PyObject* +Cursor_columns(PyObject* self, PyObject* args, PyObject* kwargs) +{ + const char* szCatalog = 0; + const char* szSchema = 0; + const char* szTable = 0; + const char* szColumn = 0; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ssss", Cursor_column_kwnames, &szTable, &szCatalog, &szSchema, &szColumn)) + return 0; + + Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); + + if (!free_results(cur, FREE_STATEMENT)) + return 0; + + SQLRETURN ret = 0; + + Py_BEGIN_ALLOW_THREADS + ret = SQLColumns(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, (SQLCHAR*)szColumn, SQL_NTS); + Py_END_ALLOW_THREADS + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle("SQLColumns", cur->cnxn->hdbc, cur->hstmt); + + SQLSMALLINT cCols; + if (!SQL_SUCCEEDED(SQLNumResultCols(cur->hstmt, &cCols))) + return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); + + if (!PrepareResults(cur, cCols)) + return 0; + + if (!create_name_map(cur, cCols, true)) + return 0; + + // Return the cursor so the results can be iterated over directly. + Py_INCREF(cur); + return (PyObject*)cur; +} + + +static char statistics_doc[] = + "C.statistics(catalog=None, schema=None, unique=False, quick=True) --> self\n\n" + "Creates a results set of statistics about a single table and the indexes associated with \n" + "the table by executing SQLStatistics.\n" + "unique\n" + " If True, only unique indexes are retured. Otherwise all indexes are returned.\n" + "quick\n" + " If True, CARDINALITY and PAGES are returned only if they are readily available\n" + " from the server\n" + "\n" + "Each row fetched has the following columns:\n\n" + " 0) table_cat\n" + " 1) table_schem\n" + " 2) table_name\n" + " 3) non_unique\n" + " 4) index_qualifier\n" + " 5) index_name\n" + " 6) type\n" + " 7) ordinal_position\n" + " 8) column_name\n" + " 9) asc_or_desc\n" + " 10) cardinality\n" + " 11) pages\n" + " 12) filter_condition"; + +char* Cursor_statistics_kwnames[] = { "table", "catalog", "schema", "unique", "quick", 0 }; + +static PyObject* +Cursor_statistics(PyObject* self, PyObject* args, PyObject* kwargs) +{ + const char* szCatalog = 0; + const char* szSchema = 0; + const char* szTable = 0; + PyObject* pUnique = Py_False; + PyObject* pQuick = Py_True; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|ssOO", Cursor_statistics_kwnames, &szTable, &szCatalog, &szSchema, + &pUnique, &pQuick)) + return 0; + + Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); + + if (!free_results(cur, FREE_STATEMENT)) + return 0; + + SQLUSMALLINT nUnique = PyObject_IsTrue(pUnique) ? SQL_INDEX_UNIQUE : SQL_INDEX_ALL; + SQLUSMALLINT nReserved = PyObject_IsTrue(pQuick) ? SQL_QUICK : SQL_ENSURE; + + SQLRETURN ret = 0; + + Py_BEGIN_ALLOW_THREADS + ret = SQLStatistics(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, + nUnique, nReserved); + Py_END_ALLOW_THREADS + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle("SQLStatistics", cur->cnxn->hdbc, cur->hstmt); + + SQLSMALLINT cCols; + if (!SQL_SUCCEEDED(SQLNumResultCols(cur->hstmt, &cCols))) + return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); + + if (!PrepareResults(cur, cCols)) + return 0; + + if (!create_name_map(cur, cCols, true)) + return 0; + + // Return the cursor so the results can be iterated over directly. + Py_INCREF(cur); + return (PyObject*)cur; +} + + +static char rowIdColumns_doc[] = + "C.rowIdColumns(table, catalog=None, schema=None, nullable=True) -->\n\n" + "Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a result set of columns that\n" + "uniquely identify a row\n\n" + "Each row fetched has the following columns:\n" + " 0) scope\n" + " 1) column_name\n" + " 2) data_type\n" + " 3) type_name\n" + " 4) column_size\n" + " 5) buffer_length\n" + " 6) decimal_digits\n" + " 7) pseudo_column"; + +static char rowVerColumns_doc[] = + "C.rowIdColumns(table, catalog=None, schema=None, nullable=True) --> self\n\n" + "Executes SQLSpecialColumns with SQL_ROWVER which creates a result set of columns that\n" + "are automatically updated when any value in the row is updated.\n\n" + "Each row fetched has the following columns:\n" + " 0) scope\n" + " 1) column_name\n" + " 2) data_type\n" + " 3) type_name\n" + " 4) column_size\n" + " 5) buffer_length\n" + " 6) decimal_digits\n" + " 7) pseudo_column"; + +char* Cursor_specialColumn_kwnames[] = { "table", "catalog", "schema", "nullable", 0 }; + +static PyObject* +_specialColumns(PyObject* self, PyObject* args, PyObject* kwargs, SQLUSMALLINT nIdType) +{ + const char* szTable; + const char* szCatalog = 0; + const char* szSchema = 0; + PyObject* pNullable = Py_True; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|ssO", Cursor_specialColumn_kwnames, &szTable, &szCatalog, &szSchema, &pNullable)) + return 0; + + Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); + + if (!free_results(cur, FREE_STATEMENT)) + return 0; + + SQLRETURN ret = 0; + + SQLSMALLINT nNullable = PyObject_IsTrue(pNullable) ? SQL_NULLABLE : SQL_NO_NULLS; + + Py_BEGIN_ALLOW_THREADS + ret = SQLSpecialColumns(cur->hstmt, nIdType, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, + SQL_SCOPE_TRANSACTION, nNullable); + Py_END_ALLOW_THREADS + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle("SQLSpecialColumns", cur->cnxn->hdbc, cur->hstmt); + + SQLSMALLINT cCols; + if (!SQL_SUCCEEDED(SQLNumResultCols(cur->hstmt, &cCols))) + return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); + + if (!PrepareResults(cur, cCols)) + return 0; + + if (!create_name_map(cur, cCols, true)) + return 0; + + // Return the cursor so the results can be iterated over directly. + Py_INCREF(cur); + return (PyObject*)cur; +} + +static PyObject* +Cursor_rowIdColumns(PyObject* self, PyObject* args, PyObject* kwargs) +{ + return _specialColumns(self, args, kwargs, SQL_BEST_ROWID); +} + +static PyObject* +Cursor_rowVerColumns(PyObject* self, PyObject* args, PyObject* kwargs) +{ + return _specialColumns(self, args, kwargs, SQL_ROWVER); +} + + +static char primaryKeys_doc[] = + "C.primaryKeys(table, catalog=None, schema=None) --> self\n\n" + "Creates a results set of column names that make up the primary key for a table\n" + "by executing the SQLPrimaryKeys function.\n" + "Each row fetched has the following columns:\n" + " 0) table_cat\n" + " 1) table_schem\n" + " 2) table_name\n" + " 3) column_name\n" + " 4) key_seq\n" + " 5) pk_name"; + +char* Cursor_primaryKeys_kwnames[] = { "table", "catalog", "schema", 0 }; + +static PyObject* +Cursor_primaryKeys(PyObject* self, PyObject* args, PyObject* kwargs) +{ + const char* szTable; + const char* szCatalog = 0; + const char* szSchema = 0; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|ss", Cursor_primaryKeys_kwnames, &szTable, &szCatalog, &szSchema)) + return 0; + + Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); + + if (!free_results(cur, FREE_STATEMENT)) + return 0; + + SQLRETURN ret = 0; + + Py_BEGIN_ALLOW_THREADS + ret = SQLPrimaryKeys(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS); + Py_END_ALLOW_THREADS + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle("SQLPrimaryKeys", cur->cnxn->hdbc, cur->hstmt); + + SQLSMALLINT cCols; + if (!SQL_SUCCEEDED(SQLNumResultCols(cur->hstmt, &cCols))) + return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); + + if (!PrepareResults(cur, cCols)) + return 0; + + if (!create_name_map(cur, cCols, true)) + return 0; + + // Return the cursor so the results can be iterated over directly. + Py_INCREF(cur); + return (PyObject*)cur; +} + + +static char foreignKeys_doc[] = + "C.foreignKeys(table=None, catalog=None, schema=None,\n" + " foreignTable=None, foreignCatalog=None, foreignSchema=None) --> self\n\n" + "Executes the SQLForeignKeys function and creates a results set of column names\n" + "that are foreign keys in the specified table (columns in the specified table\n" + "that refer to primary keys in other tables) or foreign keys in other tables\n" + "that refer to the primary key in the specified table.\n\n" + "Each row fetched has the following columns:\n" + " 0) pktable_cat\n" + " 1) pktable_schem\n" + " 2) pktable_name\n" + " 3) pkcolumn_name\n" + " 4) fktable_cat\n" + " 5) fktable_schem\n" + " 6) fktable_name\n" + " 7) fkcolumn_name\n" + " 8) key_seq\n" + " 9) update_rule\n" + " 10) delete_rule\n" + " 11) fk_name\n" + " 12) pk_name\n" + " 13) deferrability"; + +char* Cursor_foreignKeys_kwnames[] = { "table", "catalog", "schema", "foreignTable", "foreignCatalog", "foreignSchema", 0 }; + +static PyObject* +Cursor_foreignKeys(PyObject* self, PyObject* args, PyObject* kwargs) +{ + const char* szTable = 0; + const char* szCatalog = 0; + const char* szSchema = 0; + const char* szForeignTable = 0; + const char* szForeignCatalog = 0; + const char* szForeignSchema = 0; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ssssss", Cursor_foreignKeys_kwnames, &szTable, &szCatalog, &szSchema, + &szForeignTable, &szForeignCatalog, &szForeignSchema)) + return 0; + + Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); + + if (!free_results(cur, FREE_STATEMENT)) + return 0; + + SQLRETURN ret = 0; + + Py_BEGIN_ALLOW_THREADS + ret = SQLForeignKeys(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szTable, SQL_NTS, + (SQLCHAR*)szForeignCatalog, SQL_NTS, (SQLCHAR*)szForeignSchema, SQL_NTS, (SQLCHAR*)szForeignTable, SQL_NTS); + Py_END_ALLOW_THREADS + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle("SQLForeignKeys", cur->cnxn->hdbc, cur->hstmt); + + SQLSMALLINT cCols; + if (!SQL_SUCCEEDED(SQLNumResultCols(cur->hstmt, &cCols))) + return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); + + if (!PrepareResults(cur, cCols)) + return 0; + + if (!create_name_map(cur, cCols, true)) + return 0; + + // Return the cursor so the results can be iterated over directly. + Py_INCREF(cur); + return (PyObject*)cur; +} + +static char getTypeInfo_doc[] = + "C.getTypeInfo(sqlType=None) --> self\n\n" + "Executes SQLGetTypeInfo a creates a result set with information about the\n" + "specified data type or all data types supported by the ODBC driver if not\n" + "specified.\n\n" + "Each row fetched has the following columns:\n" + " 0) type_name\n" + " 1) data_type\n" + " 2) column_size\n" + " 3) literal_prefix\n" + " 4) literal_suffix\n" + " 5) create_params\n" + " 6) nullable\n" + " 7) case_sensitive\n" + " 8) searchable\n" + " 9) unsigned_attribute\n" + "10) fixed_prec_scale\n" + "11) auto_unique_value\n" + "12) local_type_name\n" + "13) minimum_scale\n" + "14) maximum_scale\n" + "15) sql_data_type\n" + "16) sql_datetime_sub\n" + "17) num_prec_radix\n" + "18) interval_precision"; + +static PyObject* +Cursor_getTypeInfo(PyObject* self, PyObject* args, PyObject* kwargs) +{ + UNUSED(kwargs); + + SQLSMALLINT nDataType = SQL_ALL_TYPES; + + if (!PyArg_ParseTuple(args, "|i", &nDataType)) + return 0; + + Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); + + if (!free_results(cur, FREE_STATEMENT)) + return 0; + + SQLRETURN ret = 0; + + Py_BEGIN_ALLOW_THREADS + ret = SQLGetTypeInfo(cur->hstmt, nDataType); + Py_END_ALLOW_THREADS + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle("SQLGetTypeInfo", cur->cnxn->hdbc, cur->hstmt); + + SQLSMALLINT cCols; + if (!SQL_SUCCEEDED(SQLNumResultCols(cur->hstmt, &cCols))) + return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); + + if (!PrepareResults(cur, cCols)) + return 0; + + if (!create_name_map(cur, cCols, true)) + return 0; + + // Return the cursor so the results can be iterated over directly. + Py_INCREF(cur); + return (PyObject*)cur; +} + +static PyObject* +Cursor_nextset(PyObject* self, PyObject* args) +{ + UNUSED(args); + + Cursor* cur = Cursor_Validate(self, 0); + + if (!cur) + return 0; + + SQLRETURN ret = 0; + + Py_BEGIN_ALLOW_THREADS + ret = SQLMoreResults(cur->hstmt); + Py_END_ALLOW_THREADS + + if (ret == SQL_NO_DATA) + { + //#ifdef TRACE_ALL + //printf("Cursor_nextset: SQL_NO_DATA\r\n"); + //#endif + free_results(cur, FREE_STATEMENT); + Py_RETURN_FALSE; + } + + SQLSMALLINT cCols; + if (!SQL_SUCCEEDED(SQLNumResultCols(cur->hstmt, &cCols))) + { + // Note: The SQL Server driver sometimes returns HY007 here if multiple statements (separated by ;) were + // submitted. This is not documented, but I've seen it with multiple successful inserts. + + free_results(cur, FREE_STATEMENT); + return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); + } + free_results(cur, KEEP_STATEMENT); + + if (cCols != 0) + { + // A result set was created. + + if (!PrepareResults(cur, cCols)) + return 0; + + if (!create_name_map(cur, cCols, lowercase())) + return 0; + } + + SQLLEN cRows; + Py_BEGIN_ALLOW_THREADS + ret = SQLRowCount(cur->hstmt, &cRows); + Py_END_ALLOW_THREADS + cur->rowcount = (int)cRows; + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle("SQLRowCount", cur->cnxn->hdbc, cur->hstmt); + + Py_RETURN_TRUE; +} + + +static char procedureColumns_doc[] = + "C.procedureColumns(procedure=None, catalog=None, schema=None) --> self\n\n" + "Executes SQLProcedureColumns and creates a result set of information\n" + "about stored procedure columns and results.\n" + " 0) procedure_cat\n" + " 1) procedure_schem\n" + " 2) procedure_name\n" + " 3) column_name\n" + " 4) column_type\n" + " 5) data_type\n" + " 6) type_name\n" + " 7) column_size\n" + " 8) buffer_length\n" + " 9) decimal_digits\n" + " 10) num_prec_radix\n" + " 11) nullable\n" + " 12) remarks\n" + " 13) column_def\n" + " 14) sql_data_type\n" + " 15) sql_datetime_sub\n" + " 16) char_octet_length\n" + " 17) ordinal_position\n" + " 18) is_nullable"; + +char* Cursor_procedureColumns_kwnames[] = { "procedure", "catalog", "schema", 0 }; + +static PyObject* +Cursor_procedureColumns(PyObject* self, PyObject* args, PyObject* kwargs) +{ + const char* szProcedure = 0; + const char* szCatalog = 0; + const char* szSchema = 0; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|sss", Cursor_procedureColumns_kwnames, &szProcedure, &szCatalog, &szSchema)) + return 0; + + Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); + + if (!free_results(cur, FREE_STATEMENT)) + return 0; + + SQLRETURN ret = 0; + + Py_BEGIN_ALLOW_THREADS + ret = SQLProcedureColumns(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, + (SQLCHAR*)szProcedure, SQL_NTS, 0, 0); + Py_END_ALLOW_THREADS + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle("SQLProcedureColumns", cur->cnxn->hdbc, cur->hstmt); + + SQLSMALLINT cCols; + if (!SQL_SUCCEEDED(SQLNumResultCols(cur->hstmt, &cCols))) + return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); + + if (!PrepareResults(cur, cCols)) + return 0; + + if (!create_name_map(cur, cCols, true)) + return 0; + + // Return the cursor so the results can be iterated over directly. + Py_INCREF(cur); + return (PyObject*)cur; +} + + +static char procedures_doc[] = + "C.procedures(procedure=None, catalog=None, schema=None) --> self\n\n" + "Executes SQLProcedures and creates a result set of information about the\n" + "procedures in the data source.\n" + "Each row fetched has the following columns:\n" + " 0) procedure_cat\n" + " 1) procedure_schem\n" + " 2) procedure_name\n" + " 3) num_input_params\n" + " 4) num_output_params\n" + " 5) num_result_sets\n" + " 6) remarks\n" + " 7) procedure_type"; + +char* Cursor_procedures_kwnames[] = { "procedure", "catalog", "schema", 0 }; + +static PyObject* +Cursor_procedures(PyObject* self, PyObject* args, PyObject* kwargs) +{ + const char* szProcedure = 0; + const char* szCatalog = 0; + const char* szSchema = 0; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|sss", Cursor_procedures_kwnames, &szProcedure, &szCatalog, &szSchema)) + return 0; + + Cursor* cur = Cursor_Validate(self, CURSOR_REQUIRE_OPEN); + + if (!free_results(cur, FREE_STATEMENT)) + return 0; + + SQLRETURN ret = 0; + + Py_BEGIN_ALLOW_THREADS + ret = SQLProcedures(cur->hstmt, (SQLCHAR*)szCatalog, SQL_NTS, (SQLCHAR*)szSchema, SQL_NTS, (SQLCHAR*)szProcedure, SQL_NTS); + Py_END_ALLOW_THREADS + + if (!SQL_SUCCEEDED(ret)) + return RaiseErrorFromHandle("SQLProcedures", cur->cnxn->hdbc, cur->hstmt); + + SQLSMALLINT cCols; + if (!SQL_SUCCEEDED(SQLNumResultCols(cur->hstmt, &cCols))) + return RaiseErrorFromHandle("SQLNumResultCols", cur->cnxn->hdbc, cur->hstmt); + + if (!PrepareResults(cur, cCols)) + return 0; + + if (!create_name_map(cur, cCols, true)) + return 0; + + // Return the cursor so the results can be iterated over directly. + Py_INCREF(cur); + return (PyObject*)cur; +} + +static PyObject* +Cursor_ignored(PyObject* self, PyObject* args) +{ + UNUSED(self, args); + Py_RETURN_NONE; +} + +static char rowcount_doc[] = + "This read-only attribute specifies the number of rows the last DML statement\n" + " (INSERT, UPDATE, DELETE) affected. This is set to -1 for SELECT statements."; + +static char description_doc[] = + "This read-only attribute is a sequence of 7-item sequences. Each of these\n" \ + "sequences contains information describing one result column: (name, type_code,\n" \ + "display_size, internal_size, precision, scale, null_ok). All values except\n" \ + "name, type_code, and internal_size are None. The type_code entry will be the\n" \ + "type object used to create values for that column (e.g. `str` or\n" \ + "`datetime.datetime`).\n" \ + "\n" \ + "This attribute will be None for operations that do not return rows or if the\n" \ + "cursor has not had an operation invoked via the execute() method yet.\n" \ + "\n" \ + "The type_code can be interpreted by comparing it to the Type Objects defined in\n" \ + "the DB API and defined the pyodbc module: Date, Time, Timestamp, Binary,\n" \ + "STRING, BINARY, NUMBER, and DATETIME."; + +static char arraysize_doc[] = + "This read/write attribute specifies the number of rows to fetch at a time with\n" \ + "fetchmany(). It defaults to 1 meaning to fetch a single row at a time."; + +static char connection_doc[] = + "This read-only attribute return a reference to the Connection object on which\n" \ + "the cursor was created.\n" \ + "\n" \ + "The attribute simplifies writing polymorph code in multi-connection\n" \ + "environments."; + +static PyMemberDef Cursor_members[] = +{ + {"rowcount", T_INT, offsetof(Cursor, rowcount), READONLY, rowcount_doc }, + {"description", T_OBJECT_EX, offsetof(Cursor, description), READONLY, description_doc }, + {"arraysize", T_INT, offsetof(Cursor, arraysize), 0, arraysize_doc }, + {"connection", T_OBJECT_EX, offsetof(Cursor, cnxn), READONLY, connection_doc }, + { 0 } +}; + +static PyObject* Cursor_getnoscan(PyObject* self, void *closure) +{ + UNUSED(closure); + + Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); + if (!cursor) + return 0; + + SQLUINTEGER noscan = SQL_NOSCAN_OFF; + if (!SQL_SUCCEEDED(SQLGetStmtAttr(cursor->hstmt, SQL_ATTR_NOSCAN, (SQLPOINTER)&noscan, sizeof(SQLUINTEGER), 0))) + { + // Not supported? We're going to assume 'no'. + Py_RETURN_FALSE; + } + + if (noscan == SQL_NOSCAN_OFF) + Py_RETURN_FALSE; + + Py_RETURN_TRUE; +} + +static PyObject* Cursor_setnoscan(PyObject* self, PyObject* value, void *closure) +{ + UNUSED(closure); + + Cursor* cursor = Cursor_Validate(self, CURSOR_REQUIRE_OPEN | CURSOR_RAISE_ERROR); + if (!cursor) + return 0; + + if (value == 0) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the noscan attribute"); + return 0; + } + + SQLUINTEGER noscan = PyObject_IsTrue(value) ? SQL_NOSCAN_ON : SQL_NOSCAN_OFF; + if (!SQL_SUCCEEDED(SQLSetStmtAttr(cursor->hstmt, SQL_ATTR_NOSCAN, (SQLPOINTER)noscan, 0))) + { + printf("setattr failed!\n"); + return RaiseErrorFromHandle("SQLSetStmtAttr(SQL_ATTR_NOSCAN)", cursor->cnxn->hdbc, cursor->hstmt); + } + + return 0; +} + +static PyGetSetDef Cursor_getsetters[] = +{ + {"noscan", (getter)Cursor_getnoscan, (setter)Cursor_setnoscan, "NOSCAN statement attr", 0}, + { 0 } +}; + +static char executemany_doc[] = + "executemany(sql, seq_of_params) --> Cursor | count | None\n" \ + "\n" \ + "Prepare a database query or command and then execute it against all parameter\n" \ + "sequences found in the sequence seq_of_params.\n" \ + "\n" \ + "Only the result of the final execution is returned. See `execute` for a\n" \ + "description of parameter passing the return value."; + +static char nextset_doc[] = "nextset() --> True | None\n" \ + "\n" \ + "Jumps to the next resultset if the last sql has multiple resultset." \ + "Returns True if there is a next resultset otherwise None."; + +static char ignored_doc[] = "Ignored."; + +static char fetchone_doc[] = + "fetchone() --> Row | None\n" \ + "\n" \ + "Fetch the next row of a query result set, returning a single Row instance, or\n" \ + "None when no more data is available.\n" \ + "\n" \ + "A ProgrammingError exception is raised if the previous call to execute() did\n" \ + "not produce any result set or no call was issued yet."; + +static char fetchall_doc[] = + "fetchmany(size=cursor.arraysize) --> list of Rows\n" \ + "\n" \ + "Fetch the next set of rows of a query result, returning a list of Row\n" \ + "instances. An empty list is returned when no more rows are available.\n" \ + "\n" \ + "The number of rows to fetch per call is specified by the parameter. If it is\n" \ + "not given, the cursor's arraysize determines the number of rows to be\n" \ + "fetched. The method should try to fetch as many rows as indicated by the size\n" \ + "parameter. If this is not possible due to the specified number of rows not\n" \ + "being available, fewer rows may be returned.\n" \ + "\n" \ + "A ProgrammingError exception is raised if the previous call to execute() did\n" \ + "not produce any result set or no call was issued yet."; + +static char fetchmany_doc[] = + "fetchmany() --> list of Rows\n" \ + "\n" \ + "Fetch all remaining rows of a query result, returning them as a list of Rows.\n" \ + "An empty list is returned if there are no more rows.\n" \ + "\n" \ + "A ProgrammingError exception is raised if the previous call to execute() did\n" \ + "not produce any result set or no call was issued yet."; + +static PyMethodDef Cursor_methods[] = +{ + { "close", (PyCFunction)Cursor_close, METH_NOARGS, close_doc }, + { "execute", (PyCFunction)Cursor_execute, METH_VARARGS, execute_doc }, + { "executemany", (PyCFunction)Cursor_executemany, METH_VARARGS, executemany_doc }, + { "setinputsizes", (PyCFunction)Cursor_ignored, METH_VARARGS, ignored_doc }, + { "setoutputsize", (PyCFunction)Cursor_ignored, METH_VARARGS, ignored_doc }, + { "fetchone", (PyCFunction)Cursor_fetchone, METH_NOARGS, fetchone_doc }, + { "fetchall", (PyCFunction)Cursor_fetchall, METH_NOARGS, fetchall_doc }, + { "fetchmany", (PyCFunction)Cursor_fetchmany, METH_VARARGS, fetchmany_doc }, + { "nextset", (PyCFunction)Cursor_nextset, METH_NOARGS, nextset_doc }, + { "tables", (PyCFunction)Cursor_tables, METH_VARARGS|METH_KEYWORDS, tables_doc }, + { "columns", (PyCFunction)Cursor_columns, METH_VARARGS|METH_KEYWORDS, columns_doc }, + { "statistics", (PyCFunction)Cursor_statistics, METH_VARARGS|METH_KEYWORDS, statistics_doc }, + { "rowIdColumns", (PyCFunction)Cursor_rowIdColumns, METH_VARARGS|METH_KEYWORDS, rowIdColumns_doc }, + { "rowVerColumns", (PyCFunction)Cursor_rowVerColumns, METH_VARARGS|METH_KEYWORDS, rowVerColumns_doc }, + { "primaryKeys", (PyCFunction)Cursor_primaryKeys, METH_VARARGS|METH_KEYWORDS, primaryKeys_doc }, + { "foreignKeys", (PyCFunction)Cursor_foreignKeys, METH_VARARGS|METH_KEYWORDS, foreignKeys_doc }, + { "getTypeInfo", (PyCFunction)Cursor_getTypeInfo, METH_VARARGS|METH_KEYWORDS, getTypeInfo_doc }, + { "procedures", (PyCFunction)Cursor_procedures, METH_VARARGS|METH_KEYWORDS, procedures_doc }, + { "procedureColumns", (PyCFunction)Cursor_procedureColumns, METH_VARARGS|METH_KEYWORDS, procedureColumns_doc }, + { 0, 0, 0, 0 } +}; + +static char cursor_doc[] = + "Cursor objects represent a database cursor, which is used to manage the context\n" \ + "of a fetch operation. Cursors created from the same connection are not\n" \ + "isolated, i.e., any changes done to the database by a cursor are immediately\n" \ + "visible by the other cursors. Cursors created from different connections are\n" \ + "isolated.\n" \ + "\n" \ + "Cursors implement the iterator protocol, so results can be iterated:\n" \ + "\n" \ + " cursor.execute(sql)\n" \ + " for row in cursor:\n" \ + " print row[0]"; + +PyTypeObject CursorType = +{ + PyObject_HEAD_INIT(0) + 0, // ob_size + "pyodbc.Cursor", // tp_name + sizeof(Cursor), // tp_basicsize + 0, // tp_itemsize + (destructor)Cursor_dealloc, // destructor tp_dealloc + 0, // tp_print + 0, // tp_getattr + 0, // tp_setattr + 0, // tp_compare + 0, // tp_repr + 0, // tp_as_number + 0, // tp_as_sequence + 0, // tp_as_mapping + 0, // tp_hash + 0, // tp_call + 0, // tp_str + 0, // tp_getattro + 0, // tp_setattro + 0, // tp_as_buffer + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_ITER, // tp_flags + cursor_doc, // tp_doc + 0, // tp_traverse + 0, // tp_clear + 0, // tp_richcompare + 0, // tp_weaklistoffset + (getiterfunc)Cursor_iter, // tp_iter + (iternextfunc)Cursor_iternext, // tp_iternext + Cursor_methods, // tp_methods + Cursor_members, // tp_members + Cursor_getsetters, // tp_getset + 0, // tp_base + 0, // tp_dict + 0, // tp_descr_get + 0, // tp_descr_set + 0, // tp_dictoffset + 0, // tp_init + 0, // tp_alloc + 0, // tp_new + 0, // tp_free + 0, // tp_is_gc + 0, // tp_bases + 0, // tp_mro + 0, // tp_cache + 0, // tp_subclasses + 0, // tp_weaklist +}; + +Cursor* +Cursor_New(Connection* cnxn) +{ + // Exported to allow the connection class to create cursors. + + Cursor* cur = PyObject_NEW(Cursor, &CursorType); + + if (cur) + { + cur->cnxn = cnxn; + cur->hstmt = SQL_NULL_HANDLE; + cur->description = Py_None; + cur->pPreparedSQL = 0; + cur->paramcount = 0; + cur->paramdescs = 0; + cur->paramdata = 0; + cur->colinfos = 0; + cur->arraysize = 1; + cur->rowcount = -1; + cur->map_name_to_index = 0; + + Py_INCREF(cnxn); + Py_INCREF(cur->description); + + if (!SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_STMT, cnxn->hdbc, &cur->hstmt))) + { + RaiseErrorFromHandle("SQLAllocHandle", cnxn->hdbc, SQL_NULL_HANDLE); + Py_DECREF(cur); + return 0; + } + +#ifdef TRACE_ALL + printf("cursor.new cnxn=%p hdbc=%d cursor=%p hstmt=%d\n", (Connection*)cur->cnxn, ((Connection*)cur->cnxn)->hdbc, cur, cur->hstmt); +#endif + } + + return cur; +} + +void +Cursor_init() +{ + PyDateTime_IMPORT; +} + diff --git a/src/cursor.h b/src/cursor.h new file mode 100644 index 00000000..8a6173b1 --- /dev/null +++ b/src/cursor.h @@ -0,0 +1,113 @@ + +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS + * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef CURSOR_H +#define CURSOR_H + +struct Connection; +struct ParamDesc; + +struct ColumnInfo +{ + SQLSMALLINT sql_type; + + // The column size from SQLDescribeCol. For character types, this is the maximum length, not including the NULL + // terminator. For binary values, this is the maximum length. For numeric and decimal values, it is the defined + // number of digits. For example, the precision of a column defined as NUMERIC(10,3) is 10. + // + // This value can be SQL_NO_TOTAL in which case the driver doesn't know the maximum length, such as for LONGVARCHAR + // fields. + SQLULEN column_size; + + // Tells us if an integer type is signed or unsigned. This is determined after a query using SQLColAttribute. All + // of the integer types are the same size whether signed and unsigned, so we can allocate memory ahead of time + // without knowing this. We use this during the fetch when converting to a Python integer or long. + bool is_unsigned; +}; + + +struct Cursor +{ + PyObject_HEAD + + // The Connection object (which is a PyObject) that created this cursor. + Connection* cnxn; + + // Set to SQL_NULL_HANDLE when the cursor is closed. + HSTMT hstmt; + + // + // SQL Parameters + // + + // If non-zero, a pointer to the previously prepared SQL string, allowing us to skip the prepare and gathering of + // parameter data. + PyObject* pPreparedSQL; + + // The number of parameter markers in pPreparedSQL and the number of entries in paraminfos. This will be zero when + // pPreparedSQL is zero. + int paramcount; + + // If non-zero, a pointer to an array of ParamDescs, allocated via malloc. This will be zero when pPreparedSQL is + // zero. + ParamDesc* paramdescs; + + // If non-zero, a pointer to a buffer containing the actual parameters bound. If pPreparedSQL is zero, this should + // be freed using free and set to zero. + // + // Even if the same SQL statement is executed twice, the parameter bindings are redone from scratch since we try to + // bind into the Python objects directly. + byte* paramdata; + + // + // Result Information + // + + // An array of ColumnInfos, allocated via malloc. This will be zero when closed or when there are no query + // results. + ColumnInfo* colinfos; + + // The description tuple described in the DB API 2.0 specification. Set to None when there are no results. + PyObject* description; + + int arraysize; + + // The Cursor.rowcount attribute from the DB API specification. + int rowcount; + + // A dictionary that maps from column name (PyString) to index into the result columns (PyInteger). This is + // constructued during an execute and shared with each row (reference counted) to implement accessing results by + // column name. + // + // This duplicates some ODBC functionality, but allows us to use Row objects after the statement is closed and + // should use less memory than putting each column into the Row's __dict__. + // + // Since this is shared by Row objects, it cannot be reused. New dictionaries are created for every execute. This + // will be zero whenever there are no results. + PyObject* map_name_to_index; +}; + +void Cursor_init(); + +Cursor* Cursor_New(Connection* cnxn); +PyObject* Cursor_execute(PyObject* self, PyObject* args); + +enum +{ + // The parameter size we'll try to bind. If a buffer is larger than this, we'll use SQLPutData. + + MAX_VARCHAR_BUFFER = 255, // MS Access + MAX_VARBINARY_BUFFER = 510, // MS Access +}; + +#endif diff --git a/src/errors.cpp b/src/errors.cpp new file mode 100644 index 00000000..1cd351ca --- /dev/null +++ b/src/errors.cpp @@ -0,0 +1,300 @@ + +#include "pyodbc.h" +#include "errors.h" +#include "pyodbcmodule.h" + +// Exceptions + +struct SqlStateMapping +{ + char* prefix; + int prefix_len; + PyObject** pexc_class; // Note: Double indirection (pexc_class) necessary because the pointer values are not + // initialized during startup +}; + +static const struct SqlStateMapping sql_state_mapping[] = +{ + { "0A000", 5, &NotSupportedError }, + { "40002", 5, &IntegrityError }, + { "22", 2, &DataError }, + { "23", 2, &IntegrityError }, + { "24", 2, &ProgrammingError }, + { "25", 2, &ProgrammingError }, + { "42", 2, &ProgrammingError }, +}; + +static PyObject* +ExceptionFromSqlState(const char* sqlstate) +{ + // Returns the appropriate Python exception class given a SQLSTATE value. + + if (sqlstate && *sqlstate) + { + for (size_t i = 0; i < _countof(sql_state_mapping); i++) + if (memcmp(sqlstate, sql_state_mapping[i].prefix, sql_state_mapping[i].prefix_len) == 0) + return *sql_state_mapping[i].pexc_class; + } + + return Error; +} + +PyObject* +RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* format, ...) +{ + PyObject *pAttrs = 0, *pError = 0; + + if (!sqlstate || !*sqlstate) + sqlstate = "HY000"; + + if (!exc_class) + exc_class = ExceptionFromSqlState(sqlstate); + + // Note: Don't use any native strprintf routines. With Py_ssize_t, we need "%zd", but VC .NET doesn't support it. + // PyString_FromFormatV already takes this into account. + + va_list marker; + va_start(marker, format); + PyObject* pMsg = PyString_FromFormatV(format, marker); + va_end(marker); + if (!pMsg) + { + PyErr_NoMemory(); + return 0; + } + + // Create an exception with a 'sqlstate' attribute (set to None if we don't have one) whose 'args' attribute is a + // tuple containing the message and sqlstate value. The 'sqlstate' attribute ensures it is easy to access in + // Python (and more understandable to the reader than ex.args[1]), but putting it in the args ensures it shows up + // in logs because of the default repr/str. + + pAttrs = Py_BuildValue("(Os)", pMsg, sqlstate); + if (pAttrs) + { + pError = PyEval_CallObject(exc_class, pAttrs); + if (pError) + PyErr_SetObject(exc_class, pError); + } + + Py_DECREF(pMsg); + Py_XDECREF(pAttrs); + Py_XDECREF(pError); + + return 0; +} + +bool HasSqlState(PyObject* ex, const char* szSqlState) +{ + // Returns true if `ex` is an exception and has the given SQLSTATE. It is safe to pass 0 for ex. + + bool has = false; + + if (ex) + { + PyObject* args = PyObject_GetAttrString(ex, "args"); + if (args != 0) + { + PyObject* s = PySequence_GetItem(args, 1); + if (s != 0 && PyString_Check(s)) + { + const char* sz = PyString_AsString(s); + if (sz && strcmpi(sz, szSqlState) == 0) + has = true; + } + Py_XDECREF(s); + Py_DECREF(args); + } + } + + return has; +} + + +static PyObject* GetError(const char* sqlstate, PyObject* exc_class, PyObject* pMsg) +{ + // pMsg + // The error message. This function takes ownership of this object, so we'll free it if we fail to create an + // error. + + PyObject *pSqlState=0, *pAttrs=0, *pError=0; + + if (!sqlstate || !*sqlstate) + sqlstate = "HY000"; + + if (!exc_class) + exc_class = ExceptionFromSqlState(sqlstate); + + pAttrs = PyTuple_New(2); + if (!pAttrs) + { + Py_DECREF(pMsg); + return 0; + } + + PyTuple_SetItem(pAttrs, 1, pMsg); // pAttrs now owns the pMsg reference; steals a reference, does not increment + + pSqlState = PyString_FromString(sqlstate); + if (!pSqlState) + { + Py_DECREF(pAttrs); + return 0; + } + + PyTuple_SetItem(pAttrs, 0, pSqlState); // pAttrs now owns the pSqlState reference + + pError = PyEval_CallObject(exc_class, pAttrs); // pError will incref pAttrs + + Py_XDECREF(pAttrs); + + return pError; +} + +static const char* DEFAULT_ERROR = "The driver did not supply an error!"; + +PyObject* RaiseErrorFromHandle(const char* szFunction, HDBC hdbc, HSTMT hstmt) +{ + // The exception is "set" in the interpreter. This function returns 0 so this can be used in a return statement. + + PyObject* pError = GetErrorFromHandle(szFunction, hdbc, hstmt); + + if (pError) + { + PyErr_SetObject(PyObject_Type(pError), pError); + Py_DECREF(pError); + } + + return 0; +} + +PyObject* GetErrorFromHandle(const char* szFunction, HDBC hdbc, HSTMT hstmt) +{ +#ifdef TRACE_ALL + printf("In RaiseError!\n"); +#endif + // Creates and returns an exception from ODBC error information. + // + // ODBC can generate a chain of errors which we concatenate into one error message. We use the SQLSTATE from the + // first message, which seems to be the most detailed, to determine the class of exception. + // + // If the function fails, for example, if it runs out of memory, zero is returned. + // + // szFunction + // The name of the function that failed. Python generates a useful stack trace, but we often don't know where in + // the C++ code we failed. + + SQLSMALLINT nHandleType; + SQLHANDLE h; + + char sqlstate[6] = ""; + SQLINTEGER nNativeError; + SQLSMALLINT cchMsg; + + char sqlstateT[6]; + char szMsg[1024]; + + PyObject* pMsg = 0; + PyObject* pMsgPart = 0; + + if (hstmt != SQL_NULL_HANDLE) + { + nHandleType = SQL_HANDLE_STMT; + h = hstmt; + } + else if (hdbc != SQL_NULL_HANDLE) + { + nHandleType = SQL_HANDLE_DBC; + h = hdbc; + } + else + { + nHandleType = SQL_HANDLE_ENV; + h = henv; + } + + // unixODBC + PostgreSQL driver 07.01.0003 (Fedora 8 binaries from RPMs) crash if you call SQLGetDiagRec more + // than once. I hate to do this, but I'm going to only call it once for non-Windows platforms for now... + + SQLSMALLINT iRecord = 1; + + for (;;) + { + szMsg[0] = 0; + sqlstateT[0] = 0; + nNativeError = 0; + cchMsg = 0; + + SQLRETURN r = SQLGetDiagRec(nHandleType, h, iRecord, (SQLCHAR*)sqlstateT, &nNativeError, (SQLCHAR*)szMsg, (short)(_countof(szMsg)-1), &cchMsg); + if (!SQL_SUCCEEDED(r)) + break; + + // Not always NULL terminated (MS Access) + sqlstateT[5] = 0; + + if (cchMsg != 0) + { + if (iRecord == 1) + { + // This is the first error message, so save the SQLSTATE for determining the exception class and append + // the calling function name. + + memcpy(sqlstate, sqlstateT, sizeof(sqlstate[0]) * _countof(sqlstate)); + + pMsg = PyString_FromFormat("[%s] %s (%ld) (%s)", sqlstateT, szMsg, nNativeError, szFunction); + if (pMsg == 0) + return 0; + } + else + { + // This is not the first error message, so append to the existing one. + pMsgPart = PyString_FromFormat("; [%s] %s (%ld)", sqlstateT, szMsg, nNativeError); + if (pMsgPart == 0) + { + Py_XDECREF(pMsg); + return 0; + } + PyString_ConcatAndDel(&pMsg, pMsgPart); + } + } + + iRecord++; + +#ifndef _MSC_VER + // See non-Windows comment above + break; +#endif + } + + if (pMsg == 0) + { + // This only happens using unixODBC. (Haven't tried iODBC yet.) Either the driver or the driver manager is + // buggy and has signaled a fault without recording error information. + sqlstate[0] = '\0'; + pMsg = PyString_FromString(DEFAULT_ERROR); + if (pMsg == 0) + { + PyErr_NoMemory(); + return 0; + } + } + + return GetError(sqlstate, 0, pMsg); +} + +static bool GetSqlState(HSTMT hstmt, char* szSqlState) +{ + SQLCHAR szMsg[300]; + SQLSMALLINT cbMsg = (SQLSMALLINT)(_countof(szMsg) - 1); + SQLINTEGER nNative; + SQLSMALLINT cchMsg; + + SQLRETURN ret = SQLGetDiagRec(SQL_HANDLE_STMT, hstmt, 1, (SQLCHAR*)szSqlState, &nNative, szMsg, cbMsg, &cchMsg); + return SQL_SUCCEEDED(ret); +} + +bool HasSqlState(HSTMT hstmt, const char* szSqlState) +{ + char szActual[6]; + if (!GetSqlState(hstmt, szActual)) + return false; + return memcmp(szActual, szSqlState, 5) == 0; +} diff --git a/src/errors.h b/src/errors.h new file mode 100644 index 00000000..805cab30 --- /dev/null +++ b/src/errors.h @@ -0,0 +1,52 @@ + +#ifndef _ERRORS_H_ +#define _ERRORS_H_ + +// Sets an exception based on the ODBC SQLSTATE and error message and returns zero. If either handle is not available, +// pass SQL_NULL_HANDLE. +// +// szFunction +// The name of the function that failed. Python generates a useful stack trace, but we often don't know where in the +// C++ code we failed. +// +PyObject* RaiseErrorFromHandle(const char* szFunction, HDBC hdbc, HSTMT hstmt); + +// Sets an exception using a printf-like error message. +// +// szSqlState +// The optional SQLSTATE reported by ODBC. If not provided (sqlstate is NULL or sqlstate[0] is NULL), "HY000" +// (General Error) is used. Note that HY000 causes Error to be used if exc_class is not provided. +// +// exc_class +// The optional exception class (DatabaseError, etc.) to construct. If NULL, the appropriate class will be +// determined from the SQLSTATE. +// +PyObject* RaiseErrorV(const char* sqlstate, PyObject* exc_class, const char* format, ...); + + +// Constructs an exception and returns it. +// +// This function is like RaiseErrorFromHandle, but gives you the ability to examine the error first (in particular, +// used to examine the SQLSTATE using HasSqlState). If you want to use the error, call PyErr_SetObject(ex->ob_type, +// ex). Otherwise, dispose of the error using Py_DECREF(ex). +// +// szFunction +// The name of the function that failed. Python generates a useful stack trace, but we often don't know where in the +// C++ code we failed. +// +PyObject* GetErrorFromHandle(const char* szFunction, HDBC hdbc, HSTMT hstmt); + + +// Returns true if `ex` is a database exception with SQLSTATE `szSqlState`. Returns false otherwise. +// +// It is safe to call with ex set to zero. The SQLSTATE comparison is case-insensitive. +// +bool HasSqlState(PyObject* ex, const char* szSqlState); + + +// Returns true if the HSTMT has a diagnostic record with the given SQLSTATE. This is used after SQLGetData call that +// returned SQL_SUCCESS_WITH_INFO to see if it also has SQLSTATE 01004, indicating there is more data. +// +bool HasSqlState(HSTMT hstmt, const char* szSqlState); + +#endif // _ERRORS_H_ diff --git a/src/getdata.cpp b/src/getdata.cpp new file mode 100644 index 00000000..8f73c519 --- /dev/null +++ b/src/getdata.cpp @@ -0,0 +1,573 @@ + +// The functions for reading a single value from the database using SQLGetData. There is a different function for +// every data type. + +#include "pyodbc.h" +#include "pyodbcmodule.h" +#include "cursor.h" +#include "connection.h" +#include "errors.h" + +void GetData_init() +{ + PyDateTime_IMPORT; +} + +class DataBuffer +{ + // Manages memory that GetDataString uses to read data in chunks. We use the same function (GetDataString) to read + // variable length data for 3 different types of data: binary, ANSI, and Unicode. This class abstracts out the + // memory management details to keep the function simple. + // + // There are 3 potential data buffer types we deal with in GetDataString: + // + // 1) Binary, which is a simple array of 8-bit bytes. + // 2) ANSI text, which is an array of chars with a NULL terminator. + // 3) Unicode text, which is an array of wchar_ts with a NULL terminator. + // + // When dealing with Unicode, there are two widths we have to be aware of: (1) wchar_t and (2) Py_UNICODE. If + // these are the same we can use a PyUnicode object so we don't have to allocate our own buffer and then the + // Unicode object. If they are not the same (e.g. OS/X where wchar_t-->4 Py_UNICODE-->2) then we need to maintain + // our own buffer and pass it to the PyUnicode object later. + // + // To reduce heap fragmentation, we perform the initial read into an array on the stack since we don't know the + // length of the data. If the data doesn't fit, this class then allocates new memory. + +private: + SQLSMALLINT dataType; + + char* buffer; + Py_ssize_t bufferSize; // How big is the buffer. + int bytesUsed; // How many elements have been read into the buffer? + + PyObject* bufferOwner; // If possible, we bind into a PyString or PyUnicode object. + int element_size; // How wide is each character: ASCII/ANSI -> 1, Unicode -> 2 or 4, binary -> 1 + + bool usingStack; // Is buffer pointing to the initial stack buffer? + +public: + int null_size; // How much room to add for null terminator: binary -> 0, other -> same as a element_size + + DataBuffer(SQLSMALLINT dataType, char* stackBuffer, SQLLEN stackBufferSize) + { + // dataType + // The type of data we will be reading: SQL_C_CHAR, SQL_C_WCHAR, or SQL_C_BINARY. + + this->dataType = dataType; + + element_size = (dataType == SQL_C_WCHAR) ? sizeof(wchar_t) : sizeof(char); + null_size = (dataType == SQL_C_BINARY) ? 0 : element_size; + + buffer = stackBuffer; + bufferSize = stackBufferSize; + usingStack = true; + bufferOwner = 0; + bytesUsed = 0; + } + + ~DataBuffer() + { + if (!usingStack) + { + if (bufferOwner) + { + Py_DECREF(bufferOwner); + } + else + { + free(buffer); + } + } + } + + char* GetBuffer() + { + if (!buffer) + return 0; + + return buffer + bytesUsed; + } + + SQLLEN GetRemaining() + { + // Returns the amount of data remaining in the buffer, ready to be passed to SQLGetData. + return bufferSize - bytesUsed; + } + + void AddUsed(SQLLEN cbRead) + { + I(cbRead <= GetRemaining()); + bytesUsed += cbRead; + } + + bool AllocateMore(SQLLEN cbAdd) + { + if (cbAdd == 0) + return true; + + SQLLEN newSize = bufferSize + cbAdd; + + if (usingStack) + { + // This is the first call and `buffer` points to stack memory. Allocate a new object and copy the stack + // data into it. + + char* stackBuffer = buffer; + + if (dataType == SQL_C_CHAR || dataType == SQL_C_BINARY) + { + bufferOwner = PyString_FromStringAndSize(0, newSize); + buffer = bufferOwner ? PyString_AS_STRING(bufferOwner) : 0; + } + else if (sizeof(wchar_t) == Py_UNICODE_SIZE) + { + // Allocate directly into a Unicode object. + bufferOwner = PyUnicode_FromUnicode(0, newSize / element_size); + buffer = bufferOwner ? (char*)PyUnicode_AsUnicode(bufferOwner) : 0; + } + else + { + // We're Unicode, but wchar_t and Py_UNICODE don't match, so maintain our own wchar_t buffer. + buffer = (char*)malloc(newSize); + } + + usingStack = false; + + if (buffer == 0) + return false; + + memcpy(buffer, stackBuffer, bufferSize); + bufferSize = newSize; + return true; + } + + if (PyString_CheckExact(bufferOwner)) + { + if (_PyString_Resize(&bufferOwner, newSize) == -1) + return false; + buffer = PyString_AS_STRING(bufferOwner); + } + else if (PyUnicode_CheckExact(bufferOwner)) + { + if (PyUnicode_Resize(&bufferOwner, newSize / element_size) == -1) + return false; + buffer = (char*)PyUnicode_AsUnicode(bufferOwner); + } + else + { + char* tmp = (char*)realloc(buffer, newSize); + if (tmp == 0) + return false; + buffer = tmp; + } + + bufferSize = newSize; + + return true; + } + + PyObject* DetachValue() + { + // At this point, Trim should have been called by PostRead. + + if (bytesUsed == SQL_NULL_DATA || buffer == 0) + Py_RETURN_NONE; + + if (usingStack) + { + if (dataType == SQL_C_CHAR || dataType == SQL_C_BINARY) + return PyString_FromStringAndSize(buffer, bytesUsed); + + if (sizeof(wchar_t) == Py_UNICODE_SIZE) + return PyUnicode_FromUnicode((const Py_UNICODE*)buffer, bytesUsed / element_size); + + return PyUnicode_FromWideChar((const wchar_t*)buffer, bytesUsed / element_size); + } + + if (PyString_CheckExact(bufferOwner)) + { + if (_PyString_Resize(&bufferOwner, bytesUsed) == -1) + return 0; + PyObject* tmp = bufferOwner; + bufferOwner = 0; + buffer = 0; + return tmp; + } + + if (PyUnicode_CheckExact(bufferOwner)) + { + if (PyUnicode_Resize(&bufferOwner, bytesUsed / element_size) == -1) + return 0; + PyObject* tmp = bufferOwner; + bufferOwner = 0; + buffer = 0; + return tmp; + } + + // We have allocated our own wchar_t buffer and must now copy it to a Unicode object. + PyObject* result = PyUnicode_FromWideChar((const wchar_t*)buffer, bytesUsed / element_size); + if (result == 0) + return false; + free(buffer); + buffer = 0; + return result; + } +}; + +static PyObject* +GetDataString(Cursor* cur, int iCol) +{ + // Returns a String or Unicode object for character and binary data. + + // NULL terminator notes: + // + // * pinfo->column_size, from SQLDescribeCol, does not include a NULL terminator. For example, column_size for a + // char(10) column would be 10. (Also, when dealing with wchar_t, it is the number of *characters*, not bytes.) + // + // * When passing a length to PyString_FromStringAndSize and similar Unicode functions, do not add the NULL + // terminator -- it will be added automatically. See objects/stringobject.c + // + // * SQLGetData does not return the NULL terminator in the length indicator. (Therefore, you can pass this value + // directly to the Python string functions.) + // + // * SQLGetData will write a NULL terminator in the output buffer, so you must leave room for it. You must also + // include the NULL terminator in the buffer length passed to SQLGetData. + // + // ODBC generalization: + // 1) Include NULL terminators in input buffer lengths. + // 2) NULL terminators are not used in data lengths. + + ColumnInfo* pinfo = &cur->colinfos[iCol]; + + // Some Unix ODBC drivers do not return the correct length. + if (pinfo->sql_type == SQL_GUID) + pinfo->column_size = 36; + + SQLSMALLINT nTargetType; + + switch (pinfo->sql_type) + { + case SQL_CHAR: + case SQL_VARCHAR: + case SQL_LONGVARCHAR: + case SQL_GUID: + nTargetType = SQL_C_CHAR; + break; + + case SQL_WCHAR: + case SQL_WVARCHAR: + case SQL_WLONGVARCHAR: + nTargetType = SQL_C_WCHAR; + break; + + default: + nTargetType = SQL_C_BINARY; + break; + } + + char tempBuffer[1024]; + DataBuffer buffer(nTargetType, tempBuffer, sizeof(tempBuffer)); + + for (int iDbg = 0; iDbg < 10; iDbg++) // failsafe + { + SQLRETURN ret; + SQLLEN cbData = 0; + + Py_BEGIN_ALLOW_THREADS + ret = SQLGetData(cur->hstmt, (SQLSMALLINT)(iCol+1), nTargetType, buffer.GetBuffer(), buffer.GetRemaining(), &cbData); + Py_END_ALLOW_THREADS; + + if (cbData == SQL_NULL_DATA) + Py_RETURN_NONE; + + if (!SQL_SUCCEEDED(ret) && ret != SQL_NO_DATA) + return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); + + // The SQLGetData behavior is incredibly quirky. It doesn't tell us the total, the total we've read, or even + // the amount just read. It returns the amount just read, plus any remaining. Unfortunately, the only way to + // pick them apart is to subtract out the amount of buffer we supplied. + + SQLLEN cbBuffer = buffer.GetRemaining(); // how much we gave SQLGetData + + if (ret == SQL_SUCCESS_WITH_INFO) + { + // There is more data than fits in the buffer. The amount of data equals the amount of data in the buffer + // minus a NULL terminator. + + SQLLEN cbRead; + SQLLEN cbMore; + + if (cbData == SQL_NO_TOTAL) + { + // We don't know how much more, so just guess. + cbRead = cbBuffer - buffer.null_size; + cbMore = 2048; + } + else if (cbData >= cbBuffer) + { + // There is more data. We supplied cbBuffer, but there was cbData (more). We received cbBuffer, so we + // need to subtract that, allocate enough to read the rest (cbData-cbBuffer). + + cbRead = cbBuffer - buffer.null_size; + cbMore = cbData - cbRead; + } + else + { + // I'm not really sure why I would be here ... I would have expected SQL_SUCCESS + cbRead = cbData - buffer.null_size; + cbMore = 0; + } + + buffer.AddUsed(cbRead); + if (!buffer.AllocateMore(cbMore)) + return PyErr_NoMemory(); + } + else if (ret == SQL_SUCCESS) + { + // For some reason, the NULL terminator is used in intermediate buffers but not in this final one. + buffer.AddUsed(cbData); + } + + if (ret == SQL_SUCCESS || ret == SQL_NO_DATA) + return buffer.DetachValue(); + } + + // REVIEW: Add an error message. + return 0; +} + +static PyObject* +GetDataBuffer(Cursor* cur, Py_ssize_t iCol) +{ + PyObject* str = GetDataString(cur, iCol); + + if (str == Py_None) + return str; + + PyObject* buffer = 0; + + if (str) + { + buffer = PyBuffer_FromObject(str, 0, PyString_GET_SIZE(str)); + Py_DECREF(str); // If no buffer, release it. If buffer, the buffer owns it. + } + + return buffer; +} + +static PyObject* +GetDataDecimal(Cursor* cur, int iCol) +{ + // The SQL_NUMERIC_STRUCT support is hopeless (SQL Server ignores scale on input parameters and output columns), so + // we'll rely on the Decimal's string parsing. Unfortunately, the Decimal author does not pay attention to the + // locale, so we have to modify the string ourselves. + // + // Oracle inserts group separators (commas in US, periods in some countries), so leave room for that too. + + ColumnInfo* pinfo = &cur->colinfos[iCol]; + + SQLLEN cbNeeded = pinfo->column_size + 3 + // sign, decimal, NULL + (pinfo->column_size / 3) + 2; // grouping. I believe this covers all cases. + + SQLLEN cbFetched = 0; + char* sz = (char*)_alloca(cbNeeded); + + if (sz == 0) + return PyErr_NoMemory(); + + if (!SQL_SUCCEEDED(SQLGetData(cur->hstmt, (SQLSMALLINT)(iCol+1), SQL_C_CHAR, sz, cbNeeded, &cbFetched))) + return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); + + if (cbFetched == SQL_NULL_DATA) + Py_RETURN_NONE; + + // The decimal class requires the decimal to be a period and does not allow thousands separators. Clean it up. + // + // Unfortunately this code only handles single-character values, which might be good enough for decimals and + // separators, but is certainly not good enough for currency symbols. + // + // Note: cbFetched does not include the NULL terminator. + + for (int i = cbFetched - 1; i >=0; i--) + { + if (sz[i] == chGroupSeparator || sz[i] == '$' || sz[i] == chCurrencySymbol) + { + memmove(&sz[i], &sz[i] + 1, cbFetched - i); + cbFetched--; + } + else if (sz[i] == chDecimal) + { + sz[i] = '.'; + } + } + + return PyObject_CallFunction(decimal_type, "s", sz); +} + +static PyObject* +GetDataBit(Cursor* cur, int iCol) +{ + SQLCHAR ch; + SQLLEN cbFetched; + + if (!SQL_SUCCEEDED(SQLGetData(cur->hstmt, (SQLSMALLINT)(iCol+1), SQL_C_BIT, &ch, sizeof(ch), &cbFetched))) + return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); + + if (cbFetched == SQL_NULL_DATA) + Py_RETURN_NONE; + + if (ch == SQL_TRUE) + Py_RETURN_TRUE; + + Py_RETURN_FALSE; +} + +static PyObject* +GetDataLong(Cursor* cur, int iCol) +{ + ColumnInfo* pinfo = &cur->colinfos[iCol]; + + SQLINTEGER value = 0; + SQLLEN cbFetched = 0; + + SQLSMALLINT nCType = pinfo->is_unsigned ? SQL_C_ULONG : SQL_C_LONG; + + if (!SQL_SUCCEEDED(SQLGetData(cur->hstmt, (SQLSMALLINT)(iCol+1), nCType, &value, sizeof(value), &cbFetched))) + return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); + + if (cbFetched == SQL_NULL_DATA) + Py_RETURN_NONE; + + if (pinfo->is_unsigned) + return PyInt_FromLong(*(SQLINTEGER*)&value); + + return PyInt_FromLong(value); +} + +static PyObject* +GetDataLongLong(Cursor* cur, int iCol) +{ + ColumnInfo* pinfo = &cur->colinfos[iCol]; + + INT64 value = 0; + SQLLEN cbFetched = 0; + + SQLSMALLINT nCType = pinfo->is_unsigned ? SQL_C_UBIGINT : SQL_C_SBIGINT; + + if (!SQL_SUCCEEDED(SQLGetData(cur->hstmt, (SQLSMALLINT)(iCol+1), nCType, &value, sizeof(value), &cbFetched))) + return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); + + if (cbFetched == SQL_NULL_DATA) + Py_RETURN_NONE; + + if (pinfo->is_unsigned) + return PyLong_FromLongLong(*(UINT64*)&value); + + return PyLong_FromLongLong(*(INT64*)&value); +} + +static PyObject* +GetDataDouble(Cursor* cur, int iCol) +{ + double value; + SQLLEN cbFetched = 0; + + if (!SQL_SUCCEEDED(SQLGetData(cur->hstmt, (SQLSMALLINT)(iCol+1), SQL_C_DOUBLE, &value, sizeof(value), &cbFetched))) + return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); + + if (cbFetched == SQL_NULL_DATA) + Py_RETURN_NONE; + + return PyFloat_FromDouble(value); +} + + +static PyObject* +GetDataTimestamp(Cursor* cur, int iCol) +{ + TIMESTAMP_STRUCT value; + + SQLLEN cbFetched = 0; + + if (!SQL_SUCCEEDED(SQLGetData(cur->hstmt, (SQLSMALLINT)(iCol+1), SQL_C_TYPE_TIMESTAMP, &value, sizeof(value), &cbFetched))) + return RaiseErrorFromHandle("SQLGetData", cur->cnxn->hdbc, cur->hstmt); + + if (cbFetched == SQL_NULL_DATA) + Py_RETURN_NONE; + + switch (cur->colinfos[iCol].sql_type) + { + case SQL_TYPE_TIME: + return PyTime_FromTime(value.hour, value.minute, value.second, 0); + + case SQL_TYPE_DATE: + return PyDate_FromDate(value.year, value.month, value.day); + } + + // The fraction field is in nanoseconds. + int micros = value.fraction / 1000; + + return PyDateTime_FromDateAndTime(value.year, value.month, value.day, value.hour, value.minute, value.second, micros); +} + + +PyObject* +GetData(Cursor* cur, Py_ssize_t iCol) +{ + // Returns an object representing the value in the row/field. If 0 is returned, an exception has already been set. + // + // The data is assumed to be the default C type for the column's SQL type. + + ColumnInfo* pinfo = &cur->colinfos[iCol]; + + switch (pinfo->sql_type) + { + case SQL_WCHAR: + case SQL_WVARCHAR: + case SQL_WLONGVARCHAR: + case SQL_CHAR: + case SQL_VARCHAR: + case SQL_LONGVARCHAR: + case SQL_GUID: + return GetDataString(cur, iCol); + + case SQL_BINARY: + case SQL_VARBINARY: + case SQL_LONGVARBINARY: + return GetDataBuffer(cur, iCol); + + case SQL_DECIMAL: + case SQL_NUMERIC: + { + if (decimal_type == 0) + break; + + return GetDataDecimal(cur, iCol); + } + + case SQL_BIT: + return GetDataBit(cur, iCol); + + case SQL_TINYINT: + case SQL_SMALLINT: + case SQL_INTEGER: + return GetDataLong(cur, iCol); + + case SQL_BIGINT: + return GetDataLongLong(cur, iCol); + + case SQL_REAL: + case SQL_FLOAT: + case SQL_DOUBLE: + return GetDataDouble(cur, iCol); + + + case SQL_TYPE_DATE: + case SQL_TYPE_TIME: + case SQL_TYPE_TIMESTAMP: + return GetDataTimestamp(cur, iCol); + } + + return RaiseErrorV("HY106", ProgrammingError, "ODBC SQL type %d is not yet supported. column-index=%zd type=%d", + (int)pinfo->sql_type, iCol, (int)pinfo->sql_type); +} diff --git a/src/getdata.h b/src/getdata.h new file mode 100644 index 00000000..99dfe8f6 --- /dev/null +++ b/src/getdata.h @@ -0,0 +1,9 @@ + +#ifndef _GETDATA_H_ +#define _GETDATA_H_ + +void GetData_init(); + +PyObject* GetData(Cursor* cur, Py_ssize_t iCol); + +#endif // _GETDATA_H_ diff --git a/src/params.cpp b/src/params.cpp new file mode 100644 index 00000000..b4ffe355 --- /dev/null +++ b/src/params.cpp @@ -0,0 +1,746 @@ + +#include "pyodbc.h" +#include "pyodbcmodule.h" +#include "params.h" +#include "cursor.h" +#include "connection.h" +#include "buffer.h" +#include "wrapper.h" +#include "errors.h" + +struct ParamDesc +{ + SQLSMALLINT sql_type; + SQLULEN column_size; + SQLSMALLINT decimal_digits; +}; + +inline Connection* GetConnection(Cursor* cursor) +{ + return (Connection*)cursor->cnxn; +} + +static bool CacheParamDesc(Cursor* cur); +static int GetParamBufferSize(PyObject* param, Py_ssize_t iParam); +static bool BindParam(Cursor* cur, int iParam, const ParamDesc* pDesc, PyObject* param, byte** ppbParam); + +void FreeParameterData(Cursor* cur) +{ + // Unbinds the parameters and frees the parameter buffer. + + if (cur->paramdata) + { + SQLFreeStmt(cur->hstmt, SQL_RESET_PARAMS); + free(cur->paramdata); + cur->paramdata = 0; + } +} + +void FreeParameterInfo(Cursor* cur) +{ + // Internal function to free just the cached parameter information. This is not used by the general cursor code + // since this information is also freed in the less granular free_results function that clears everything. + + Py_XDECREF(cur->pPreparedSQL); + free(cur->paramdescs); + cur->pPreparedSQL = 0; + cur->paramdescs = 0; + cur->paramcount = 0; +} + + +struct ObjectArrayHolder +{ + Py_ssize_t count; + PyObject** objs; + ObjectArrayHolder(Py_ssize_t count, PyObject** objs) + { + this->count = count; + this->objs = objs; + } + ~ObjectArrayHolder() + { + for (Py_ssize_t i = 0; i < count; i++) + Py_XDECREF(objs[i]); + free(objs); + } +}; + +bool PrepareAndBind(Cursor* cur, PyObject* pSql, PyObject* original_params, bool skip_first) +{ + // + // Normalize the parameter variables. + // + + // Since we may replace parameters (we replace objects with Py_True/Py_False when writing to a bit/bool column), + // allocate an array and use it instead of the original sequence. Since we don't change ownership we don't bother + // with incref. (That is, PySequence_GetItem will INCREF and ~ObjectArrayHolder will DECREF.) + + int params_offset = skip_first ? 1 : 0; + Py_ssize_t cParams = original_params == 0 ? 0 : PySequence_Length(original_params) - params_offset; + + PyObject** params = (PyObject**)malloc(sizeof(PyObject*) * cParams); + if (!params) + { + PyErr_NoMemory(); + return 0; + } + + for (Py_ssize_t i = 0; i < cParams; i++) + params[i] = PySequence_GetItem(original_params, i + params_offset); + + ObjectArrayHolder holder(cParams, params); + + // + // Prepare the SQL if necessary. + // + + if (pSql == cur->pPreparedSQL) + { + // We've already prepared this SQL, so we don't need to do so again. We've also cached the parameter + // information in cur->paramdescs. + + if (cParams != cur->paramcount) + { + RaiseErrorV(0, ProgrammingError, "The SQL contains %d parameter markers, but %d parameters were supplied", + cur->paramcount, cParams); + return false; + } + } + else + { + FreeParameterInfo(cur); + + SQLRETURN ret; + if (PyString_Check(pSql)) + { + Py_BEGIN_ALLOW_THREADS + ret = SQLPrepare(cur->hstmt, (SQLCHAR*)PyString_AS_STRING(pSql), SQL_NTS); + Py_END_ALLOW_THREADS + } + else + { + Py_BEGIN_ALLOW_THREADS + ret = SQLPrepareW(cur->hstmt, (SQLWCHAR*)PyUnicode_AsUnicode(pSql), SQL_NTS); + Py_END_ALLOW_THREADS + } + + if (cur->cnxn->hdbc == SQL_NULL_HANDLE) + { + // The connection was closed by another thread in the ALLOW_THREADS block above. + RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); + return false; + } + + if (!SQL_SUCCEEDED(ret)) + { + RaiseErrorFromHandle("SQLPrepare", GetConnection(cur)->hdbc, cur->hstmt); + return false; + } + + if (!CacheParamDesc(cur)) + return false; + + cur->pPreparedSQL = pSql; + Py_INCREF(cur->pPreparedSQL); + } + + // + // Convert parameters if necessary + // + + // If we were able to get the parameter descriptions (the target columns), we'll convert objects being written to + // bit/bool columns. Drivers that don't give us the target descriptions will require users to pass in booleans or + // ints, but we hope drivers will add support. + + if (cur->paramdescs) + { + for (Py_ssize_t i = 0; i < cParams; i++) + { + if (cur->paramdescs[i].sql_type == SQL_BIT && !PyBool_Check(params[i])) + params[i] = PyObject_IsTrue(params[i]) ? Py_True : Py_False; + } + } + + // Calculate the amount of memory we need for param_buffer. We can't allow it to reallocate on the fly since + // we will bind directly into its memory. (We only use a vector so its destructor will free the memory.) + // We'll set aside one SQLLEN for each column to be used as the StrLen_or_IndPtr. + + int cb = 0; + + for (Py_ssize_t i = 0; i < cParams; i++) + { + int cbT = GetParamBufferSize(params[i], i + 1) + sizeof(SQLLEN); // +1 to map to ODBC one-based index + + if (cbT < 0) + return 0; + + cb += cbT; + } + + cur->paramdata = reinterpret_cast(malloc(cb)); + if (cur->paramdata == 0) + { + PyErr_NoMemory(); + return false; + } + + // Bind each parameter. If possible, items will be bound directly into the Python object. Otherwise, + // param_buffer will be used and ibNext will be updated. + + byte* pbParam = cur->paramdata; + + for (Py_ssize_t i = 0; i < cParams; i++) + { + ParamDesc* pDesc = (cur->paramdescs != 0) ? &cur->paramdescs[i] : 0; + + if (!BindParam(cur, i + 1, pDesc, params[i], &pbParam)) + { + free(cur->paramdata); + cur->paramdata = 0; + return false; + } + } + + return true; +} + +static bool CacheParamDesc(Cursor* cur) +{ + // Called after a SQL statement is prepared to cache the number of parameters and some information about each. + // + // If successful, true is returned. Otherwise, the appropriate exception will be registered with the Python system and + // false is returned. + + cur->paramcount = 0; + cur->paramdescs = 0; + + SQLSMALLINT cT; + SQLRETURN ret; + Py_BEGIN_ALLOW_THREADS + ret = SQLNumParams(cur->hstmt, &cT); + Py_END_ALLOW_THREADS + + if (!SQL_SUCCEEDED(ret)) + { + RaiseErrorFromHandle("SQLNumParams", GetConnection(cur)->hdbc, cur->hstmt); + return false; + } + + cur->paramcount = (int)cT; + + if (!GetConnection(cur)->supports_describeparam) + { + // The driver can't describe the parameters to us, so we'll do without. They are helpful, but are only really + // required when binding a None (NULL) since we don't know the required data type. + return true; + } + + ParamDesc* pT = reinterpret_cast(malloc(sizeof(ParamDesc) * cT)); + if (pT == 0) + { + PyErr_NoMemory(); + return false; + } + + for (SQLSMALLINT i = 0; i < cT; i++) + { + SQLSMALLINT Nullable; + Py_BEGIN_ALLOW_THREADS + ret = SQLDescribeParam(cur->hstmt, static_cast(i + 1), &pT[i].sql_type, &pT[i].column_size, + &pT[i].decimal_digits, &Nullable); + Py_END_ALLOW_THREADS + + if (!SQL_SUCCEEDED(ret)) + { + // This used to trigger an error, but SQLDescribeParam just isn't as supported or robust as I had hoped. + // There are a couple of old bugs that cause "Invalid Descriptor index" that are supposed to be fixed, but + // one also mentions that SQLDescribeParam is not supported for subquery parameters (!). + // + // Once I find a way to bind None (NULL) consistently, I'll remove SQLDescribeParam completely. + + pT[i].sql_type = SQL_VARCHAR; + pT[i].column_size = 1; + pT[i].decimal_digits = 0; + } + } + + cur->paramdescs = pT; + + return true; +} + +static int GetParamBufferSize(PyObject* param, Py_ssize_t iParam) +{ + // Returns the size in bytes needed to hold the parameter in a format for binding, used to allocate the parameter + // buffer. (The value is not passed to ODBC. Values passed to ODBC are in BindParam.) + // + // If we can bind directly into the Python object (e.g., using PyString_AsString), zero is returned since no extra + // memory is required. If the data will be provided at execution time (e.g. SQL_DATA_AT_EXEC), zero is returned + // since the parameter value is not stored at all. If the data type is not recognized, -1 is returned. + + if (param == Py_None) + return 0; + + if (PyString_Check(param) || PyUnicode_Check(param)) + return 0; + + if (param == Py_True || param == Py_False) + return 1; + + if (PyInt_Check(param)) + return sizeof(long int); + + if (PyLong_Check(param)) + return sizeof(INT64); + + if (PyFloat_Check(param)) + return sizeof(double); + + if (PyDecimal_Check(param)) + { + // There isn't an efficient way of getting the precision, but it's there and it's obvious. + + Object digits = PyObject_GetAttrString(param, "_int"); + if (digits) + return PySequence_Length(digits) + 3; // sign, decimal, null + + // _int doesn't exist any more? + return 42; + } + + if (PyBuffer_Check(param)) + { + // If the buffer has a single segment, we can bind directly to it, so we need 0 bytes. Otherwise, we'll use + // SQL_DATA_AT_EXEC, so we still need 0 bytes. + return 0; + } + + if (PyDateTime_Check(param)) + return sizeof(TIMESTAMP_STRUCT); + + if (PyDate_Check(param)) + return sizeof(DATE_STRUCT); + + if (PyTime_Check(param)) + return sizeof(TIME_STRUCT); + + RaiseErrorV("HY105", ProgrammingError, "Invalid parameter type. param-index=%zd param-type=%s", iParam, param->ob_type->tp_name); + + return -1; +} + +#ifdef TRACE_ALL +#define _MAKESTR(n) case n: return #n +static const char* SqlTypeName(SQLSMALLINT n) +{ + switch (n) + { + _MAKESTR(SQL_UNKNOWN_TYPE); + _MAKESTR(SQL_CHAR); + _MAKESTR(SQL_NUMERIC); + _MAKESTR(SQL_DECIMAL); + _MAKESTR(SQL_INTEGER); + _MAKESTR(SQL_SMALLINT); + _MAKESTR(SQL_FLOAT); + _MAKESTR(SQL_REAL); + _MAKESTR(SQL_DOUBLE); + _MAKESTR(SQL_DATETIME); + _MAKESTR(SQL_VARCHAR); + _MAKESTR(SQL_TYPE_DATE); + _MAKESTR(SQL_TYPE_TIME); + _MAKESTR(SQL_TYPE_TIMESTAMP); + } + return "unknown"; +} + +static const char* CTypeName(SQLSMALLINT n) +{ + switch (n) + { + _MAKESTR(SQL_C_CHAR); + _MAKESTR(SQL_C_LONG); + _MAKESTR(SQL_C_SHORT); + _MAKESTR(SQL_C_FLOAT); + _MAKESTR(SQL_C_DOUBLE); + _MAKESTR(SQL_C_NUMERIC); + _MAKESTR(SQL_C_DEFAULT); + _MAKESTR(SQL_C_DATE); + _MAKESTR(SQL_C_TIME); + _MAKESTR(SQL_C_TIMESTAMP); + _MAKESTR(SQL_C_TYPE_DATE); + _MAKESTR(SQL_C_TYPE_TIME); + _MAKESTR(SQL_C_TYPE_TIMESTAMP); + _MAKESTR(SQL_C_INTERVAL_YEAR); + _MAKESTR(SQL_C_INTERVAL_MONTH); + _MAKESTR(SQL_C_INTERVAL_DAY); + _MAKESTR(SQL_C_INTERVAL_HOUR); + _MAKESTR(SQL_C_INTERVAL_MINUTE); + _MAKESTR(SQL_C_INTERVAL_SECOND); + _MAKESTR(SQL_C_INTERVAL_YEAR_TO_MONTH); + _MAKESTR(SQL_C_INTERVAL_DAY_TO_HOUR); + _MAKESTR(SQL_C_INTERVAL_DAY_TO_MINUTE); + _MAKESTR(SQL_C_INTERVAL_DAY_TO_SECOND); + _MAKESTR(SQL_C_INTERVAL_HOUR_TO_MINUTE); + _MAKESTR(SQL_C_INTERVAL_HOUR_TO_SECOND); + _MAKESTR(SQL_C_INTERVAL_MINUTE_TO_SECOND); + _MAKESTR(SQL_C_BINARY); + _MAKESTR(SQL_C_BIT); + _MAKESTR(SQL_C_SBIGINT); + _MAKESTR(SQL_C_UBIGINT); + _MAKESTR(SQL_C_TINYINT); + _MAKESTR(SQL_C_SLONG); + _MAKESTR(SQL_C_SSHORT); + _MAKESTR(SQL_C_STINYINT); + _MAKESTR(SQL_C_ULONG); + _MAKESTR(SQL_C_USHORT); + _MAKESTR(SQL_C_UTINYINT); + _MAKESTR(SQL_C_GUID); + } + return "unknown"; +} + +#endif + +static bool BindParam(Cursor* cur, int iParam, const ParamDesc* pDesc, PyObject* param, byte** ppbParam) +{ + // Called to bind a single parameter. + // + // iParam + // The one-based index of the parameter being bound. + // + // param + // The parameter to bind. + // + // ppbParam + // On entry, *ppbParam points to the memory available for binding the current parameter. It should be + // incremented by the amount of memory used. + // + // Each parameter saves room for a length-indicator. If the Python object is not in a format that we can bind to + // directly, the memory immediately after the length indicator is used to copy the parameter data in a usable + // format. + // + // The memory used is determined by the type of PyObject. The total required is calculated, a buffer is + // allocated, and passed repeatedly to this function. It is essential that the amount pre-calculated (from + // GetParamBufferSize) match the amount used by this function. Any changes to either function must be + // coordinated. (It might be wise to build a table. I would do a lot more with assertions, but building a debug + // version of Python is a real pain. It would be great if the Python for Windows team provided a pre-built + // version.) + + // When binding, ODBC requires 2 values: the column size and the buffer size. The column size is related to the + // destination (the SQL type of the column being written to) and the buffer size refers to the source (the size of + // the C data being written). If you send the wrong column size, data may be truncated. For example, if you send + // sizeof(TIMESTAMP_STRUCT) as the column size when writing a timestamp, it will be rounded to the nearest minute + // because that is the precision that would fit into a string of that size. + + // Every parameter reserves space for a length-indicator. Either set *pcbData to the actual input data length or + // set pcbData to zero (not *pcbData) if you have a fixed-length parameter and don't need it. + + SQLLEN* pcbValue = reinterpret_cast(*ppbParam); + *ppbParam += sizeof(SQLLEN); + + // (I've made the parameter a pointer-to-a-pointer (ergo, the "pp") so that it is obvious at the call-site that we + // are modifying it (&p). Here we save a pointer into the buffer which we can compare to pbValue later to see if + // we bound into the buffer (pbValue == pbParam) or bound directly into `param` (pbValue != pbParam). + // + // (This const means that the data the pointer points to is not const, you can change *pbParam, but the actual + // pointer itself is. We will be comparing the address to pbValue, not the contents.) + + byte* const pbParam = *ppbParam; + + SQLSMALLINT fCType = 0; + SQLSMALLINT fSqlType = 0; + SQLULEN cbColDef = 0; + SQLSMALLINT decimalDigits = 0; + SQLPOINTER pbValue = 0; // Set to the data to bind, either into `param` or set to pbParam. + SQLLEN cbValueMax = 0; + + if (pDesc != 0 && pDesc->sql_type == SQL_BIT) + { + // We know the target type is a Boolean, so we'll use Python semantics and ask the object if it is 'true'. + // (When using a database that won't give us the target types (pDesc == 0), users will have to pass a boolean + // or an int. I don't like having different behavior, but we hope all ODBC drivers are improving and will + // support the feature.) + // + // However, unlike Python, a database also supports NULL, so if the value is None, we'll keep it and write a + // NULL to the database. + + if (param != Py_None) + param = PyObject_IsTrue(param) ? Py_True : Py_False; + + // I'm not going to addref these since we aren't going to decref them either. Since they are global/singletons + // we know they'll be around for the duration of this function. + } + + if (param == Py_None) + { + fSqlType = pDesc ? pDesc->sql_type : SQL_VARCHAR; + fCType = SQL_C_DEFAULT; + *pcbValue = SQL_NULL_DATA; + cbColDef = 1; + } + else if (PyString_Check(param)) + { + char* pch = PyString_AS_STRING(param); + int len = PyString_GET_SIZE(param); + + if (len <= MAX_VARCHAR_BUFFER) + { + fSqlType = SQL_VARCHAR; + fCType = SQL_C_CHAR; + pbValue = pch; + cbColDef = max(len, 1); + cbValueMax = len + 1; + *pcbValue = (SQLLEN)len; + } + else + { + fSqlType = SQL_LONGVARCHAR; + fCType = SQL_C_CHAR; + pbValue = param; + cbColDef = max(len, 1); + cbValueMax = sizeof(PyObject*); + *pcbValue = SQL_LEN_DATA_AT_EXEC((SQLLEN)len); + } + } + else if (PyUnicode_Check(param)) + { + Py_UNICODE* pch = PyUnicode_AsUnicode(param); + int len = PyUnicode_GET_SIZE(param); + + if (len <= MAX_VARCHAR_BUFFER) + { + fSqlType = SQL_WVARCHAR; + fCType = SQL_C_WCHAR; + pbValue = pch; + cbColDef = max(len, 1); + cbValueMax = (len + 1) * Py_UNICODE_SIZE; + *pcbValue = (SQLLEN)(len * Py_UNICODE_SIZE); + } + else + { + fSqlType = SQL_WLONGVARCHAR; + fCType = SQL_C_WCHAR; + pbValue = param; + cbColDef = max(len, 1) * sizeof(SQLWCHAR); + cbValueMax = sizeof(PyObject*); + *pcbValue = SQL_LEN_DATA_AT_EXEC((SQLLEN)(len * Py_UNICODE_SIZE)); + } + } + else if (param == Py_True || param == Py_False) + { + *pbParam = (unsigned char)(param == Py_True ? 1 : 0); + + fSqlType = SQL_BIT; + fCType = SQL_C_BIT; + pbValue = pbParam; + cbValueMax = 1; + pcbValue = 0; + } + else if (PyDateTime_Check(param)) + { + TIMESTAMP_STRUCT* value = (TIMESTAMP_STRUCT*)pbParam; + + value->year = (SQLSMALLINT) PyDateTime_GET_YEAR(param); + value->month = (SQLUSMALLINT)PyDateTime_GET_MONTH(param); + value->day = (SQLUSMALLINT)PyDateTime_GET_DAY(param); + value->hour = (SQLUSMALLINT)PyDateTime_DATE_GET_HOUR(param); + value->minute = (SQLUSMALLINT)PyDateTime_DATE_GET_MINUTE(param); + value->second = (SQLUSMALLINT)PyDateTime_DATE_GET_SECOND(param); + + // SQL Server chokes if the fraction has more data than the database supports. We expect other databases to be + // the same, so we reduce the value to what the database supports. + // http://support.microsoft.com/kb/263872 + + int precision = ((Connection*)cur->cnxn)->datetime_precision - 20; // (20 includes a separating period) + if (precision <= 0) + { + value->fraction = 0; + } + else + { + value->fraction = (SQLUINTEGER)(PyDateTime_DATE_GET_MICROSECOND(param) * 1000); // 1000 == micro -> nano + + // (How many leading digits do we want to keep? With SQL Server 2005, this should be 3: 123000000) + int keep = (int)pow(10.0, 9-min(9, precision)); + value->fraction = value->fraction / keep * keep; + decimalDigits = precision; + } + + fSqlType = SQL_TIMESTAMP; + fCType = SQL_C_TIMESTAMP; + pbValue = pbParam; + cbColDef = ((Connection*)cur->cnxn)->datetime_precision; + cbValueMax = sizeof(TIMESTAMP_STRUCT); + pcbValue = 0; + } + else if (PyDate_Check(param)) + { + DATE_STRUCT* value = (DATE_STRUCT*)pbParam; + value->year = (SQLSMALLINT) PyDateTime_GET_YEAR(param); + value->month = (SQLUSMALLINT)PyDateTime_GET_MONTH(param); + value->day = (SQLUSMALLINT)PyDateTime_GET_DAY(param); + + fSqlType = SQL_TYPE_DATE; + fCType = SQL_C_TYPE_DATE; + pbValue = pbParam; + cbColDef = 10; // The size of date represented as a string (yyyy-mm-dd) + cbValueMax = sizeof(DATE_STRUCT); + pcbValue = 0; + } + else if (PyTime_Check(param)) + { + TIME_STRUCT* value = (TIME_STRUCT*)pbParam; + value->hour = (SQLUSMALLINT)PyDateTime_TIME_GET_HOUR(param); + value->minute = (SQLUSMALLINT)PyDateTime_TIME_GET_MINUTE(param); + value->second = (SQLUSMALLINT)PyDateTime_TIME_GET_SECOND(param); + + fSqlType = SQL_TYPE_TIME; + fCType = SQL_C_TIME; + pbValue = pbParam; + cbColDef = 8; + cbValueMax = sizeof(TIME_STRUCT); + pcbValue = 0; + } + else if (PyInt_Check(param)) + { + long* value = (long*)pbParam; + + *value = PyInt_AsLong(param); + + fSqlType = SQL_INTEGER; + fCType = SQL_C_LONG; + pbValue = pbParam; + cbValueMax = sizeof(long); + pcbValue = 0; + } + else if (PyLong_Check(param)) + { + INT64* value = (INT64*)pbParam; + + *value = PyLong_AsLongLong(param); + + fSqlType = SQL_BIGINT; + fCType = SQL_C_SBIGINT; + pbValue = pbParam; + cbValueMax = sizeof(INT64); + pcbValue = 0; + } + else if (PyFloat_Check(param)) + { + double* value = (double*)pbParam; + + *value = PyFloat_AsDouble(param); + + fSqlType = SQL_DOUBLE; + fCType = SQL_C_DOUBLE; + pbValue = pbParam; + cbValueMax = sizeof(double); + pcbValue = 0; + } + else if (PyDecimal_Check(param)) + { + // Using the ODBC binary format would eliminate issues of whether to use '.' vs ',', but I've had unending + // problems attemting to bind the decimal using the binary struct. In particular, the scale is never honored + // properly. It appears that drivers have lots of bugs. For now, we'll copy the value into a string, manually + // change '.' to the database's decimal value. (At this point, it appears that the decimal class *always* uses + // '.', regardless of the user's locale.) + + // GetParamBufferSize reserved room for the string length, which may include a sign and decimal. + + Object str = PyObject_CallMethod(param, "__str__", 0); + if (!str) + return false; + + char* pch = PyString_AS_STRING(str.Get()); + int len = PyString_GET_SIZE(str.Get()); + + *pcbValue = (SQLLEN)len; + + // Note: SQL_DECIMAL here works for SQL Server but is not handled by MS Access. SQL_NUMERIC seems to work for + // both, probably because I am providing exactly NUMERIC(p,s) anyway. + fSqlType = SQL_NUMERIC; + fCType = SQL_C_CHAR; + pbValue = pbParam; + cbColDef = len; + memcpy(pbValue, pch, len + 1); + cbValueMax = len + 1; + + char* pchDecimal = strchr((char*)pbValue, '.'); + if (pchDecimal) + { + decimalDigits = (SQLSMALLINT)(len - (pchDecimal - (char*)pbValue) - 1); + + if (chDecimal != '.') + *pchDecimal = chDecimal; // (pointing into our own copy in pbValue) + } + } + else if (PyBuffer_Check(param)) + { + const char* pb; + int cb = PyBuffer_GetMemory(param, &pb); + + if (cb != -1 && cb <= MAX_VARBINARY_BUFFER) + { + // There is one segment, so we can bind directly into the buffer object. + + fCType = SQL_C_BINARY; + fSqlType = SQL_VARBINARY; + + pbValue = (SQLPOINTER)pb; + cbValueMax = cb; + cbColDef = max(cb, 1); + *pcbValue = cb; + } + else + { + // There are multiple segments, so we'll provide the data at execution time. Pass the PyObject pointer as + // the parameter value which will be pased back to us when the data is needed. (If we release threads, we + // need to up the refcount!) + + fCType = SQL_C_BINARY; + fSqlType = SQL_LONGVARBINARY; + + pbValue = param; + cbColDef = PyBuffer_Size(param); + cbValueMax = sizeof(PyObject*); // How big is pbValue; ODBC copies it and gives it back in SQLParamData + *pcbValue = SQL_LEN_DATA_AT_EXEC(PyBuffer_Size(param)); + } + } + else + { + RaiseErrorV("HY097", NotSupportedError, "Python type %s not supported. param=%d", param->ob_type->tp_name, iParam); + return false; + } + + #ifdef TRACE_ALL + printf("BIND: param=%d fCType=%d (%s) fSqlType=%d (%s) cbColDef=%d DecimalDigits=%d cbValueMax=%d *pcb=%d\n", iParam, + fCType, CTypeName(fCType), fSqlType, SqlTypeName(fSqlType), cbColDef, decimalDigits, cbValueMax, pcbValue ? *pcbValue : 0); + #endif + + SQLRETURN ret = -1; + Py_BEGIN_ALLOW_THREADS + ret = SQLBindParameter(cur->hstmt, (SQLUSMALLINT)iParam, SQL_PARAM_INPUT, fCType, fSqlType, cbColDef, decimalDigits, pbValue, cbValueMax, pcbValue); + Py_END_ALLOW_THREADS; + + if (GetConnection(cur)->hdbc == SQL_NULL_HANDLE) + { + // The connection was closed by another thread in the ALLOW_THREADS block above. + RaiseErrorV(0, ProgrammingError, "The cursor's connection was closed."); + return false; + } + + if (!SQL_SUCCEEDED(ret)) + { + RaiseErrorFromHandle("SQLBindParameter", GetConnection(cur)->hdbc, cur->hstmt); + return false; + } + + if (pbValue == pbParam) + { + // We are using the passed in buffer to bind; skip past the amount of buffer we used. + *ppbParam += cbValueMax; + } + + return true; +} diff --git a/src/params.h b/src/params.h new file mode 100644 index 00000000..d8869ff2 --- /dev/null +++ b/src/params.h @@ -0,0 +1,11 @@ + +#ifndef PARAMS_H +#define PARAMS_H + +struct Cursor; + +bool PrepareAndBind(Cursor* cur, PyObject* pSql, PyObject* params, bool skip_first); +void FreeParameterData(Cursor* cur); +void FreeParameterInfo(Cursor* cur); + +#endif diff --git a/src/pyodbc.h b/src/pyodbc.h new file mode 100644 index 00000000..7b24e17e --- /dev/null +++ b/src/pyodbc.h @@ -0,0 +1,136 @@ + +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS + * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef PYODBC_H +#define PYODBC_H + +#ifdef _MSC_VER +#include +#include +typedef __int64 INT64; +typedef unsigned __int64 UINT64; +#else +typedef unsigned char byte; +typedef unsigned int UINT; +typedef long long INT64; +typedef unsigned long long UINT64; +#define strcmpi strcasecmp +inline int max(int lhs, int rhs) { return (rhs > lhs) ? rhs : lhs; } +#endif + +#define PY_SSIZE_T_CLEAN 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Whoever wrote the datetime C module declared a static variable in the header file. A properly conforming C/C++ +// compiler will create a new copy in every source file, meaning you can't set the value globally. Criminy. We'll +// declare our own global which will be set during initialization. +// +// We could initialize PyDateTimeAPI in each module, but we don't have a function in each module that is guaranteed to +// be called first and I don't want to create an Init function just for this datetime bug. + +#undef PyDate_Check +#undef PyDate_CheckExact +#undef PyDateTime_Check +#undef PyDateTime_CheckExact +#undef PyTime_Check +#undef PyTime_CheckExact + +extern _typeobject* OurDateTimeType; +extern _typeobject* OurDateType; +extern _typeobject* OurTimeType; + +#define PyDate_Check(op) PyObject_TypeCheck(op, OurDateType) +#define PyDate_CheckExact(op) ((op)->ob_type == OurDateType) +#define PyDateTime_Check(op) PyObject_TypeCheck(op, OurDateTimeType) +#define PyDateTime_CheckExact(op) ((op)->ob_type == OurDateTimeType) +#define PyTime_Check(op) PyObject_TypeCheck(op, OurTimeType) +#define PyTime_CheckExact(op) ((op)->ob_type == OurTimeType) + +#include +#include + +#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) +typedef int Py_ssize_t; +#define PY_SSIZE_T_MAX INT_MAX +#define PY_SSIZE_T_MIN INT_MIN +#define PyInt_AsSsize_t PyInt_AsLong +#define lenfunc inquiry +#define ssizeargfunc intargfunc +#define ssizeobjargproc intobjargproc +#endif + +#ifndef _countof +#define _countof(a) (sizeof(a) / sizeof(a[0])) +#endif + +inline bool IsSet(DWORD grf, DWORD flags) +{ + return (grf & flags) == flags; +} + +#ifdef UNUSED +#undef UNUSED +#endif + +inline void UNUSED(...) { } + +#include + +#ifdef __GNUC__ +#define min(X,Y) ((X) < (Y) ? (X) : (Y)) +#define max(X,Y) ((X) > (Y) ? (X) : (Y)) +#define _alloca alloca +inline void _strlwr(char* name) +{ + while (*name) { *name = tolower(*name); name++; } +} +#endif + + +// Building an actual debug version of Python is so much of a pain that it never happens. I'm providing release-build +// versions of assertions. + +// REVIEW: Put these into the setup script command line (or setup.cfg) +// #define PYODBC_ASSERT 1 +// #define TRACE_ALL 1 + +#ifdef PYODBC_ASSERT + #ifdef _MSC_VER + #include + inline void FailAssert(const char* szFile, size_t line, const char* szExpr) + { + printf("assertion failed: %s(%d)\n%s\n", szFile, line, szExpr); + __debugbreak(); // _CrtDbgBreak(); + } + #define I(expr) if (!(expr)) FailAssert(__FILE__, __LINE__, #expr); + #define N(expr) if (expr) FailAssert(__FILE__, __LINE__, #expr); + #else + #define I(expr) + #define N(expr) + #endif +#else + #define I(expr) + #define N(expr) +#endif + +#endif // pyodbc_h diff --git a/src/pyodbc.rc b/src/pyodbc.rc new file mode 100644 index 00000000..35c911a8 --- /dev/null +++ b/src/pyodbc.rc @@ -0,0 +1,100 @@ +// Microsoft Visual C++ generated resource script. +// +#include "resource.h" + +#define APSTUDIO_READONLY_SYMBOLS +///////////////////////////////////////////////////////////////////////////// +// +// Generated from the TEXTINCLUDE 2 resource. +// +#include "afxres.h" + +///////////////////////////////////////////////////////////////////////////// +#undef APSTUDIO_READONLY_SYMBOLS + +///////////////////////////////////////////////////////////////////////////// +// English (U.S.) resources + +#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU) +#ifdef _WIN32 +LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US +#pragma code_page(1252) +#endif //_WIN32 + +#ifdef APSTUDIO_INVOKED +///////////////////////////////////////////////////////////////////////////// +// +// TEXTINCLUDE +// + +1 TEXTINCLUDE +BEGIN + "resource.h\0" +END + +2 TEXTINCLUDE +BEGIN + "#include ""afxres.h""\r\n" + "\0" +END + +3 TEXTINCLUDE +BEGIN + "\r\n" + "\0" +END + +#endif // APSTUDIO_INVOKED + + +///////////////////////////////////////////////////////////////////////////// +// +// Version +// + +VS_VERSION_INFO VERSIONINFO + FILEVERSION PYODBC_MAJOR,PYODBC_MINOR,PYODBC_MICRO,PYODBC_BUILD + PRODUCTVERSION PYODBC_MAJOR,PYODBC_MINOR,PYODBC_MICRO,PYODBC_BUILD + FILEFLAGSMASK 0x17L +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0x0L +#endif + FILEOS 0x4L + FILETYPE 0x2L + FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904b0" + BEGIN + VALUE "FileDescription", "ODBC DB API 2.0 Module" + VALUE "FileVersion", "PYODBC_MAJOR,PYODBC_MINOR,PYODBC_MICRO,PYODBC_BUILD" + VALUE "InternalName", "pyodbc" + VALUE "OriginalFilename", "pyodbc.pyd" + VALUE "ProductName", "ODBC DB API 2.0 Module" + VALUE "ProductVersion", "2.0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1200 + END +END + +#endif // English (U.S.) resources +///////////////////////////////////////////////////////////////////////////// + + + +#ifndef APSTUDIO_INVOKED +///////////////////////////////////////////////////////////////////////////// +// +// Generated from the TEXTINCLUDE 3 resource. +// + + +///////////////////////////////////////////////////////////////////////////// +#endif // not APSTUDIO_INVOKED + diff --git a/src/pyodbcmodule.cpp b/src/pyodbcmodule.cpp new file mode 100644 index 00000000..71fd641e --- /dev/null +++ b/src/pyodbcmodule.cpp @@ -0,0 +1,813 @@ + +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +#include "pyodbc.h" +#include "pyodbcmodule.h" +#include "connection.h" +#include "cursor.h" +#include "row.h" +#include "wrapper.h" +#include "errors.h" +#include "getdata.h" + +#include +#include + +_typeobject* OurDateTimeType = 0; +_typeobject* OurDateType = 0; +_typeobject* OurTimeType = 0; + +PyObject* pModule = 0; + +static char module_doc[] = + "A DB API 2.0 module for ODBC databases.\n" + "\n" + "This module conforms to the DB API 2.0 specification while providing\n" + "non-standard convenience features. Only standard Python data types are used\n" + "so additional DLLs are not required.\n" + "\n" + "Static Variables:\n\n" + "version\n" + " The module version string in the format major.minor.revision\n" + "\n" + "apilevel\n" + " The string constant '2.0' indicating this module supports DB API level 2.0.\n" + "\n" + "lowercase\n" + " A Boolean that controls whether column names in result rows are lowercased.\n" + " This can be changed any time and affects queries executed after the change.\n" + " The default is False. This can be useful when database columns have\n" + " inconsistent capitalization.\n" + "pooling\n" + " A Boolean indicating whether connection pooling is enabled. This is a\n" + " global (HENV) setting, so it can only be modified before the first\n" + " connection is made. The default is True, which enables ODBC connection\n" + " pooling.\n" + "\n" + "threadsafety\n" + " The integer 1, indicating that threads may share the module but not\n" + " connections. Note that connections and cursors may be used by different\n" + " threads, just not at the same time.\n" + "\n" + "qmark\n" + " The string constant 'qmark' to indicate parameters are identified using\n" + " question marks."; + +PyObject* Error; +PyObject* Warning; +PyObject* InterfaceError; +PyObject* DatabaseError; +PyObject* InternalError; +PyObject* OperationalError; +PyObject* ProgrammingError; +PyObject* IntegrityError; +PyObject* DataError; +PyObject* NotSupportedError; + +struct ExcInfo +{ + const char* szName; + const char* szFullName; + PyObject** ppexc; + PyObject** ppexcParent; + const char* szDoc; +}; + +#define MAKEEXCINFO(name, parent, doc) { #name, "pyodbc." #name, &name, &parent, doc } + +static ExcInfo aExcInfos[] = { + MAKEEXCINFO(Error, PyExc_StandardError, + "Exception that is the base class of all other error exceptions. You can use\n" + "this to catch all errors with one single 'except' statement."), + MAKEEXCINFO(Warning, PyExc_StandardError, + "Exception raised for important warnings like data truncations while inserting,\n" + " etc."), + MAKEEXCINFO(InterfaceError, Error, + "Exception raised for errors that are related to the database interface rather\n" + "than the database itself."), + MAKEEXCINFO(DatabaseError, Error, "Exception raised for errors that are related to the database."), + MAKEEXCINFO(DataError, DatabaseError, + "Exception raised for errors that are due to problems with the processed data\n" + "like division by zero, numeric value out of range, etc."), + MAKEEXCINFO(OperationalError, DatabaseError, + "Exception raised for errors that are related to the database's operation and\n" + "not necessarily under the control of the programmer, e.g. an unexpected\n" + "disconnect occurs, the data source name is not found, a transaction could not\n" + "be processed, a memory allocation error occurred during processing, etc."), + MAKEEXCINFO(IntegrityError, DatabaseError, + "Exception raised when the relational integrity of the database is affected,\n" + "e.g. a foreign key check fails."), + MAKEEXCINFO(InternalError, DatabaseError, + "Exception raised when the database encounters an internal error, e.g. the\n" + "cursor is not valid anymore, the transaction is out of sync, etc."), + MAKEEXCINFO(ProgrammingError, DatabaseError, + "Exception raised for programming errors, e.g. table not found or already\n" + "exists, syntax error in the SQL statement, wrong number of parameters\n" + "specified, etc."), + MAKEEXCINFO(NotSupportedError, DatabaseError, + "Exception raised in case a method or database API was used which is not\n" + "supported by the database, e.g. requesting a .rollback() on a connection that\n" + "does not support transaction or has transactions turned off.") +}; + + +PyObject* decimal_type; + +HENV henv = SQL_NULL_HANDLE; + +char chDecimal = '.'; +char chGroupSeparator = ','; +char chCurrencySymbol = '$'; + +// Initialize the global decimal character and thousands separator character, used when parsing decimal +// objects. +// +static void init_locale_info() +{ + Object module = PyImport_ImportModule("locale"); + if (!module) + { + PyErr_Clear(); + return; + } + + Object ldict = PyObject_CallMethod(module, "localeconv", 0); + if (!ldict) + { + PyErr_Clear(); + return; + } + + PyObject* value = PyDict_GetItemString(ldict, "decimal"); + if (value && PyString_Check(value) && PyString_Size(value) == 1) + { + chDecimal = PyString_AsString(value)[0]; + } + + value = PyDict_GetItemString(ldict, "thousands_sep"); + if (value && PyString_Check(value) && PyString_Size(value) == 1) + { + chGroupSeparator = PyString_AsString(value)[0]; + + if (chGroupSeparator == '\0') + { + // I don't know why, but the default locale isn't setting ','. We're going to make the assumption that the + // most common values are ',' and '.', and we'll take the opposite of the decimal value. + chGroupSeparator = (chDecimal == ',') ? '.' : ','; + } + } + + value = PyDict_GetItemString(ldict, "currency_symbol"); + if (value && PyString_Check(value) && PyString_Size(value) == 1) + { + chCurrencySymbol = PyString_AsString(value)[0]; + } +} + + +static bool import_types() +{ + // In Python 2.5 final, PyDateTime_IMPORT no longer works unless the datetime module was previously + // imported (among other problems). + + PyObject* pdt = PyImport_ImportModule("datetime"); + + if (!pdt) + return false; + + PyDateTime_IMPORT; + + if (!PyDateTimeAPI) + { + PyErr_SetString(PyExc_RuntimeError, "Unable to import the datetime module."); + return false; + } + + OurDateTimeType = PyDateTimeAPI->DateTimeType; + OurDateType = PyDateTimeAPI->DateType; + OurTimeType = PyDateTimeAPI->TimeType; + + Cursor_init(); + GetData_init(); + + PyObject* decimalmod = PyImport_ImportModule("decimal"); + if (!decimalmod) + { + PyErr_SetString(PyExc_RuntimeError, "Unable to import decimal"); + return false; + } + + decimal_type = PyObject_GetAttrString(decimalmod, "Decimal"); + Py_DECREF(decimalmod); + + if (decimal_type == 0) + PyErr_SetString(PyExc_RuntimeError, "Unable to import decimal.Decimal."); + + return decimal_type != 0; +} + + +static bool AllocateEnv() +{ + PyObject* pooling = PyObject_GetAttrString(pModule, "pooling"); + bool bPooling = pooling == Py_True; + Py_DECREF(pooling); + + if (bPooling) + { + if (!SQL_SUCCEEDED(SQLSetEnvAttr(SQL_NULL_HANDLE, SQL_ATTR_CONNECTION_POOLING, (SQLPOINTER)SQL_CP_ONE_PER_HENV, sizeof(int)))) + { + Py_FatalError("Unable to set SQL_ATTR_CONNECTION_POOLING attribute."); + return false; + } + } + + if (!SQL_SUCCEEDED(SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &henv))) + { + Py_FatalError("Can't initialize module pyodbc. SQLAllocEnv failed."); + return false; + } + + if (!SQL_SUCCEEDED(SQLSetEnvAttr(henv, SQL_ATTR_ODBC_VERSION, (SQLPOINTER)SQL_OV_ODBC3, sizeof(int)))) + { + Py_FatalError("Unable to set SQL_ATTR_ODBC_VERSION attribute."); + return false; + } + + return true; +} + +char* connect_kwnames[] = { "connectstring", "autocommit", "ansi", 0 }; + +static PyObject* +mod_connect(PyObject* self, PyObject* args, PyObject* kwargs) +{ + UNUSED(self); + + PyObject* pConnectString; // Can be str or unicode. + int fAutoCommit = 0; + int fAnsi = 0; // force ansi + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|ii", connect_kwnames, &pConnectString, &fAutoCommit, &fAnsi)) + return 0; + + if (!PyString_Check(pConnectString) && !PyUnicode_Check(pConnectString)) + { + PyErr_SetString(PyExc_TypeError, "argument 1 must be a string or unicode object"); + return 0; + } + + if (henv == SQL_NULL_HANDLE) + { + if (!AllocateEnv()) + return 0; + } + + return (PyObject*)Connection_New(pConnectString, fAutoCommit != 0, fAnsi != 0); +} + + +static PyObject* +mod_datasources(PyObject* self) +{ + UNUSED(self); + + if (henv == SQL_NULL_HANDLE && !AllocateEnv()) + return 0; + + PyObject* result = PyDict_New(); + if (!result) + return 0; + + SQLCHAR szDSN[SQL_MAX_DSN_LENGTH]; + SWORD cbDSN; + SQLCHAR szDesc[200]; + SWORD cbDesc; + + SQLUSMALLINT nDirection = SQL_FETCH_FIRST; + + RETCODE retcode=SQL_SUCCESS; + while (SQL_SUCCEEDED(retcode=SQLDataSources(henv, SQL_FETCH_NEXT, + szDSN, _countof(szDSN), &cbDSN, + szDesc, _countof(szDesc), &cbDesc))) + { + PyDict_SetItemString(result, (const char*)szDSN, PyString_FromString((const char*)szDesc)); + nDirection = SQL_FETCH_NEXT; + } + + if (retcode != SQL_NO_DATA) + { + Py_DECREF(result); + return RaiseErrorFromHandle("SQLDataSources", SQL_NULL_HANDLE, SQL_NULL_HANDLE); + } + + return result; +} + + + +static PyObject* +mod_timefromticks(PyObject* self, PyObject* args) +{ + UNUSED(self); + + time_t t = 0; + struct tm* fields; + + // Sigh... If a float is passed but we ask for a long, we get a deprecation warning printed to the screen instead + // of a failure. Not only is this not documented, it means we can't reliably use PyArg_ParseTuple('l') anywhere! + + // if (PyArg_ParseTuple(args, "l", &ticks)) + + PyObject* num; + if (!PyArg_ParseTuple(args, "O", &num)) + return 0; + + if (PyInt_Check(num)) + t = PyInt_AS_LONG(num); + else if (PyLong_Check(num)) + t = PyLong_AsLong(num); + else if (PyFloat_Check(num)) + t = (long)PyFloat_AS_DOUBLE(num); + else + { + PyErr_SetString(PyExc_TypeError, "TimeFromTicks requires a number."); + return 0; + } + + fields = localtime(&t); + + return PyTime_FromTime(fields->tm_hour, fields->tm_min, fields->tm_sec, 0); +} + +static PyObject* +mod_datefromticks(PyObject* self, PyObject* args) +{ + UNUSED(self); + return PyDate_FromTimestamp(args); +} + +static PyObject* +mod_timestampfromticks(PyObject* self, PyObject* args) +{ + UNUSED(self); + return PyDateTime_FromTimestamp(args); +} + +static char connect_doc[] = + "connect(str, autocommit=False, ansi=False) --> Connection\n" + "\n" + "Accepts an ODBC connection string and returns a new Connection object.\n" + "\n" + "The connection string will be passed to SQLDriverConnect, so a DSN connection\n" + "can be created using:\n" + "\n" + " DSN=DataSourceName;UID=user;PWD=password\n" + "\n" + "To connect without requiring a DSN, specify the driver and connection\n" + "information:\n" + "\n" + " DRIVER={SQL Server};SERVER=localhost;DATABASE=testdb;UID=user;PWD=password\n" + "\n" + "Note the use of braces when a value contains spaces. Refer to SQLDriverConnect\n" + "documentation or the documentation of your ODBC driver for details.\n" + "\n" + "autocommit\n" + " If False or zero, the default, transactions are created automatically as defined in the \n" + " DB API 2. If True or non-zero, the connection is put into ODBC autocommit mode and statements\n" + " are committed automatically.\n" + "\n" + "ansi\n" + " By default, pyodbc first attempts to connect using the Unicode version of SQLDriverConnectW.\n" + " If the driver returns IM001 indicating it does not support the Unicode version, the ANSI\n" + " version is tried. Any other SQLSTATE is turned into an exception. Setting ansi to true\n" + " skips the Unicode attempt and only connects using the ANSI version. This is useful for\n" + " drivers that return the wrong SQLSTATE (or if pyodbc is out of date and should support\n" + " other SQLSTATEs)."; + +static char timefromticks_doc[] = + "TimeFromTicks(ticks) --> datetime.time\n" \ + "\n" \ + "Returns a time object initialized from the given ticks value (number of seconds\n" \ + "since the epoch; see the documentation of the standard Python time module for\n" \ + "details)."; + +static char datefromticks_doc[] = + "DateFromTicks(ticks) --> datetime.date\n" \ + "\n" \ + "Returns a date object initialized from the given ticks value (number of seconds\n" \ + "since the epoch; see the documentation of the standard Python time module for\n" \ + "details)."; + +static char timestampfromticks_doc[] = + "TimestampFromTicks(ticks) --> datetime.datetime\n" \ + "\n" \ + "Returns a datetime object initialized from the given ticks value (number of\n" \ + "seconds since the epoch; see the documentation of the standard Python time\n" \ + "module for details"; + +static char datasources_doc[] = + "dataSources() -> { DSN : Description }\n" \ + "\n" \ + "Returns a dictionary mapping available DSNs to their descriptions."; + + +static PyMethodDef pyodbc_methods[] = +{ + { "connect", (PyCFunction)mod_connect, METH_VARARGS|METH_KEYWORDS, connect_doc }, + { "TimeFromTicks", (PyCFunction)mod_timefromticks, METH_VARARGS, timefromticks_doc }, + { "DateFromTicks", (PyCFunction)mod_datefromticks, METH_VARARGS, datefromticks_doc }, + { "TimestampFromTicks", (PyCFunction)mod_timestampfromticks, METH_VARARGS, timestampfromticks_doc }, + { "dataSources", (PyCFunction)mod_datasources, METH_NOARGS, datasources_doc }, + { 0, 0, 0, 0 } +}; + + +static void ErrorInit() +{ + // Called during startup to initialize any variables that will be freed by ErrorCleanup. + + Error = 0; + Warning = 0; + InterfaceError = 0; + DatabaseError = 0; + InternalError = 0; + OperationalError = 0; + ProgrammingError = 0; + IntegrityError = 0; + DataError = 0; + NotSupportedError = 0; + decimal_type = 0; +} + + +static void ErrorCleanup() +{ + // Called when an error occurs during initialization to release any objects we may have accessed. Make sure each + // item released was initialized to zero. (Static objects are -- non-statics should be initialized in ErrorInit.) + + Py_XDECREF(Error); + Py_XDECREF(Warning); + Py_XDECREF(InterfaceError); + Py_XDECREF(DatabaseError); + Py_XDECREF(InternalError); + Py_XDECREF(OperationalError); + Py_XDECREF(ProgrammingError); + Py_XDECREF(IntegrityError); + Py_XDECREF(DataError); + Py_XDECREF(NotSupportedError); + Py_XDECREF(decimal_type); +} + +struct ConstantDef +{ + const char* szName; + int value; +}; + +#define MAKECONST(v) { #v, v } + +static const ConstantDef aConstants[] = { + MAKECONST(SQL_UNKNOWN_TYPE), + MAKECONST(SQL_CHAR), + MAKECONST(SQL_VARCHAR), + MAKECONST(SQL_LONGVARCHAR), + MAKECONST(SQL_WCHAR), + MAKECONST(SQL_WVARCHAR), + MAKECONST(SQL_WLONGVARCHAR), + MAKECONST(SQL_DECIMAL), + MAKECONST(SQL_NUMERIC), + MAKECONST(SQL_SMALLINT), + MAKECONST(SQL_INTEGER), + MAKECONST(SQL_REAL), + MAKECONST(SQL_FLOAT), + MAKECONST(SQL_DOUBLE), + MAKECONST(SQL_BIT), + MAKECONST(SQL_TINYINT), + MAKECONST(SQL_BIGINT), + MAKECONST(SQL_BINARY), + MAKECONST(SQL_VARBINARY), + MAKECONST(SQL_LONGVARBINARY), + MAKECONST(SQL_TYPE_DATE), + MAKECONST(SQL_TYPE_TIME), + MAKECONST(SQL_TYPE_TIMESTAMP), + MAKECONST(SQL_INTERVAL_MONTH), + MAKECONST(SQL_INTERVAL_YEAR), + MAKECONST(SQL_INTERVAL_YEAR_TO_MONTH), + MAKECONST(SQL_INTERVAL_DAY), + MAKECONST(SQL_INTERVAL_HOUR), + MAKECONST(SQL_INTERVAL_MINUTE), + MAKECONST(SQL_INTERVAL_SECOND), + MAKECONST(SQL_INTERVAL_DAY_TO_HOUR), + MAKECONST(SQL_INTERVAL_DAY_TO_MINUTE), + MAKECONST(SQL_INTERVAL_DAY_TO_SECOND), + MAKECONST(SQL_INTERVAL_HOUR_TO_MINUTE), + MAKECONST(SQL_INTERVAL_HOUR_TO_SECOND), + MAKECONST(SQL_INTERVAL_MINUTE_TO_SECOND), + MAKECONST(SQL_GUID), + MAKECONST(SQL_NULLABLE), + MAKECONST(SQL_NO_NULLS), + MAKECONST(SQL_NULLABLE_UNKNOWN), + // MAKECONST(SQL_INDEX_BTREE), + // MAKECONST(SQL_INDEX_CLUSTERED), + // MAKECONST(SQL_INDEX_CONTENT), + // MAKECONST(SQL_INDEX_HASHED), + // MAKECONST(SQL_INDEX_OTHER), + MAKECONST(SQL_SCOPE_CURROW), + MAKECONST(SQL_SCOPE_TRANSACTION), + MAKECONST(SQL_SCOPE_SESSION), + MAKECONST(SQL_PC_UNKNOWN), + MAKECONST(SQL_PC_NOT_PSEUDO), + MAKECONST(SQL_PC_PSEUDO), + + // SQLGetInfo + MAKECONST(SQL_ACCESSIBLE_PROCEDURES), + MAKECONST(SQL_ACCESSIBLE_TABLES), + MAKECONST(SQL_ACTIVE_ENVIRONMENTS), + MAKECONST(SQL_AGGREGATE_FUNCTIONS), + MAKECONST(SQL_ALTER_DOMAIN), + MAKECONST(SQL_ALTER_TABLE), + MAKECONST(SQL_ASYNC_MODE), + MAKECONST(SQL_BATCH_ROW_COUNT), + MAKECONST(SQL_BATCH_SUPPORT), + MAKECONST(SQL_BOOKMARK_PERSISTENCE), + MAKECONST(SQL_CATALOG_LOCATION), + MAKECONST(SQL_CATALOG_NAME), + MAKECONST(SQL_CATALOG_NAME_SEPARATOR), + MAKECONST(SQL_CATALOG_TERM), + MAKECONST(SQL_CATALOG_USAGE), + MAKECONST(SQL_COLLATION_SEQ), + MAKECONST(SQL_COLUMN_ALIAS), + MAKECONST(SQL_CONCAT_NULL_BEHAVIOR), + MAKECONST(SQL_CONVERT_FUNCTIONS), + MAKECONST(SQL_CONVERT_VARCHAR), + MAKECONST(SQL_CORRELATION_NAME), + MAKECONST(SQL_CREATE_ASSERTION), + MAKECONST(SQL_CREATE_CHARACTER_SET), + MAKECONST(SQL_CREATE_COLLATION), + MAKECONST(SQL_CREATE_DOMAIN), + MAKECONST(SQL_CREATE_SCHEMA), + MAKECONST(SQL_CREATE_TABLE), + MAKECONST(SQL_CREATE_TRANSLATION), + MAKECONST(SQL_CREATE_VIEW), + MAKECONST(SQL_CURSOR_COMMIT_BEHAVIOR), + MAKECONST(SQL_CURSOR_ROLLBACK_BEHAVIOR), + // MAKECONST(SQL_CURSOR_ROLLBACK_SQL_CURSOR_SENSITIVITY), + MAKECONST(SQL_DATABASE_NAME), + MAKECONST(SQL_DATA_SOURCE_NAME), + MAKECONST(SQL_DATA_SOURCE_READ_ONLY), + MAKECONST(SQL_DATETIME_LITERALS), + MAKECONST(SQL_DBMS_NAME), + MAKECONST(SQL_DBMS_VER), + MAKECONST(SQL_DDL_INDEX), + MAKECONST(SQL_DEFAULT_TXN_ISOLATION), + MAKECONST(SQL_DESCRIBE_PARAMETER), + MAKECONST(SQL_DM_VER), + MAKECONST(SQL_DRIVER_HDESC), + MAKECONST(SQL_DRIVER_HENV), + MAKECONST(SQL_DRIVER_HLIB), + MAKECONST(SQL_DRIVER_HSTMT), + MAKECONST(SQL_DRIVER_NAME), + MAKECONST(SQL_DRIVER_ODBC_VER), + MAKECONST(SQL_DRIVER_VER), + MAKECONST(SQL_DROP_ASSERTION), + MAKECONST(SQL_DROP_CHARACTER_SET), + MAKECONST(SQL_DROP_COLLATION), + MAKECONST(SQL_DROP_DOMAIN), + MAKECONST(SQL_DROP_SCHEMA), + MAKECONST(SQL_DROP_TABLE), + MAKECONST(SQL_DROP_TRANSLATION), + MAKECONST(SQL_DROP_VIEW), + MAKECONST(SQL_DYNAMIC_CURSOR_ATTRIBUTES1), + MAKECONST(SQL_DYNAMIC_CURSOR_ATTRIBUTES2), + MAKECONST(SQL_EXPRESSIONS_IN_ORDERBY), + MAKECONST(SQL_FILE_USAGE), + MAKECONST(SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1), + MAKECONST(SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2), + MAKECONST(SQL_GETDATA_EXTENSIONS), + MAKECONST(SQL_GROUP_BY), + MAKECONST(SQL_IDENTIFIER_CASE), + MAKECONST(SQL_IDENTIFIER_QUOTE_CHAR), + MAKECONST(SQL_INDEX_KEYWORDS), + MAKECONST(SQL_INFO_SCHEMA_VIEWS), + MAKECONST(SQL_INSERT_STATEMENT), + MAKECONST(SQL_INTEGRITY), + MAKECONST(SQL_KEYSET_CURSOR_ATTRIBUTES1), + MAKECONST(SQL_KEYSET_CURSOR_ATTRIBUTES2), + MAKECONST(SQL_KEYWORDS), + MAKECONST(SQL_LIKE_ESCAPE_CLAUSE), + MAKECONST(SQL_MAX_ASYNC_CONCURRENT_STATEMENTS), + MAKECONST(SQL_MAX_BINARY_LITERAL_LEN), + MAKECONST(SQL_MAX_CATALOG_NAME_LEN), + MAKECONST(SQL_MAX_CHAR_LITERAL_LEN), + MAKECONST(SQL_MAX_COLUMNS_IN_GROUP_BY), + MAKECONST(SQL_MAX_COLUMNS_IN_INDEX), + MAKECONST(SQL_MAX_COLUMNS_IN_ORDER_BY), + MAKECONST(SQL_MAX_COLUMNS_IN_SELECT), + MAKECONST(SQL_MAX_COLUMNS_IN_TABLE), + MAKECONST(SQL_MAX_COLUMN_NAME_LEN), + MAKECONST(SQL_MAX_CONCURRENT_ACTIVITIES), + MAKECONST(SQL_MAX_CURSOR_NAME_LEN), + MAKECONST(SQL_MAX_DRIVER_CONNECTIONS), + MAKECONST(SQL_MAX_IDENTIFIER_LEN), + MAKECONST(SQL_MAX_INDEX_SIZE), + MAKECONST(SQL_MAX_PROCEDURE_NAME_LEN), + MAKECONST(SQL_MAX_ROW_SIZE), + MAKECONST(SQL_MAX_ROW_SIZE_INCLUDES_LONG), + MAKECONST(SQL_MAX_SCHEMA_NAME_LEN), + MAKECONST(SQL_MAX_STATEMENT_LEN), + MAKECONST(SQL_MAX_TABLES_IN_SELECT), + MAKECONST(SQL_MAX_TABLE_NAME_LEN), + MAKECONST(SQL_MAX_USER_NAME_LEN), + MAKECONST(SQL_MULTIPLE_ACTIVE_TXN), + MAKECONST(SQL_MULT_RESULT_SETS), + MAKECONST(SQL_NEED_LONG_DATA_LEN), + MAKECONST(SQL_NON_NULLABLE_COLUMNS), + MAKECONST(SQL_NULL_COLLATION), + MAKECONST(SQL_NUMERIC_FUNCTIONS), + MAKECONST(SQL_ODBC_INTERFACE_CONFORMANCE), + MAKECONST(SQL_ODBC_VER), + MAKECONST(SQL_OJ_CAPABILITIES), + MAKECONST(SQL_ORDER_BY_COLUMNS_IN_SELECT), + MAKECONST(SQL_PARAM_ARRAY_ROW_COUNTS), + MAKECONST(SQL_PARAM_ARRAY_SELECTS), + MAKECONST(SQL_PARAM_TYPE_UNKNOWN), + MAKECONST(SQL_PARAM_INPUT), + MAKECONST(SQL_PARAM_INPUT_OUTPUT), + MAKECONST(SQL_PARAM_OUTPUT), + MAKECONST(SQL_RETURN_VALUE), + MAKECONST(SQL_RESULT_COL), + MAKECONST(SQL_PROCEDURES), + MAKECONST(SQL_PROCEDURE_TERM), + MAKECONST(SQL_QUOTED_IDENTIFIER_CASE), + MAKECONST(SQL_ROW_UPDATES), + MAKECONST(SQL_SCHEMA_TERM), + MAKECONST(SQL_SCHEMA_USAGE), + MAKECONST(SQL_SCROLL_OPTIONS), + MAKECONST(SQL_SEARCH_PATTERN_ESCAPE), + MAKECONST(SQL_SERVER_NAME), + MAKECONST(SQL_SPECIAL_CHARACTERS), + MAKECONST(SQL_SQL92_DATETIME_FUNCTIONS), + MAKECONST(SQL_SQL92_FOREIGN_KEY_DELETE_RULE), + MAKECONST(SQL_SQL92_FOREIGN_KEY_UPDATE_RULE), + MAKECONST(SQL_SQL92_GRANT), + MAKECONST(SQL_SQL92_NUMERIC_VALUE_FUNCTIONS), + MAKECONST(SQL_SQL92_PREDICATES), + MAKECONST(SQL_SQL92_RELATIONAL_JOIN_OPERATORS), + MAKECONST(SQL_SQL92_REVOKE), + MAKECONST(SQL_SQL92_ROW_VALUE_CONSTRUCTOR), + MAKECONST(SQL_SQL92_STRING_FUNCTIONS), + MAKECONST(SQL_SQL92_VALUE_EXPRESSIONS), + MAKECONST(SQL_SQL_CONFORMANCE), + MAKECONST(SQL_STANDARD_CLI_CONFORMANCE), + MAKECONST(SQL_STATIC_CURSOR_ATTRIBUTES1), + MAKECONST(SQL_STATIC_CURSOR_ATTRIBUTES2), + MAKECONST(SQL_STRING_FUNCTIONS), + MAKECONST(SQL_SUBQUERIES), + MAKECONST(SQL_SYSTEM_FUNCTIONS), + MAKECONST(SQL_TABLE_TERM), + MAKECONST(SQL_TIMEDATE_ADD_INTERVALS), + MAKECONST(SQL_TIMEDATE_DIFF_INTERVALS), + MAKECONST(SQL_TIMEDATE_FUNCTIONS), + MAKECONST(SQL_TXN_CAPABLE), + MAKECONST(SQL_TXN_ISOLATION_OPTION), + MAKECONST(SQL_UNION), + MAKECONST(SQL_USER_NAME), + MAKECONST(SQL_XOPEN_CLI_YEAR), +}; + + +static bool CreateExceptions() +{ + for (unsigned int i = 0; i < _countof(aExcInfos); i++) + { + ExcInfo& info = aExcInfos[i]; + + PyObject* classdict = PyDict_New(); + if (!classdict) + return false; + + PyObject* doc = PyString_FromString(info.szDoc); + if (!doc) + { + Py_DECREF(classdict); + return false; + } + + PyDict_SetItemString(classdict, "__doc__", doc); + Py_DECREF(doc); + + *info.ppexc = PyErr_NewException((char*)info.szFullName, *info.ppexcParent, classdict); + if (*info.ppexc == 0) + { + Py_DECREF(classdict); + return false; + } + + // Keep a reference for our internal (C++) use. + Py_INCREF(*info.ppexc); + + PyModule_AddObject(pModule, (char*)info.szName, *info.ppexc); + } + + return true; +} + + +PyMODINIT_FUNC +initpyodbc() +{ +#ifdef _DEBUG + #ifndef Py_REF_DEBUG + #error Py_REF_DEBUG not set! + #endif + + int grfDebugFlags = _CRTDBG_ALLOC_MEM_DF | _CRTDBG_CHECK_ALWAYS_DF; + _CrtSetDbgFlag(grfDebugFlags); +#endif + + ErrorInit(); + + if (PyType_Ready(&ConnectionType) < 0 || PyType_Ready(&CursorType) < 0 || PyType_Ready(&RowType) < 0) + return; + + pModule = Py_InitModule4("pyodbc", pyodbc_methods, module_doc, NULL, PYTHON_API_VERSION); + + if (!import_types()) + return; + + init_locale_info(); + + if (!CreateExceptions()) + return; + + PyObject* pVersion; + if (PYODBC_BUILD == 0) + pVersion = PyString_FromFormat("%d.%d.%d", PYODBC_MAJOR, PYODBC_MINOR, PYODBC_MICRO); + else + pVersion = PyString_FromFormat("%d.%d.%d-%d", PYODBC_MAJOR, PYODBC_MINOR, PYODBC_MICRO, PYODBC_BUILD); + if (!pVersion) + return; + PyModule_AddObject(pModule, "version", pVersion); + + PyModule_AddIntConstant(pModule, "threadsafety", 1); + PyModule_AddStringConstant(pModule, "apilevel", "2.0"); + PyModule_AddStringConstant(pModule, "paramstyle", "qmark"); + PyModule_AddObject(pModule, "pooling", Py_True); + Py_INCREF(Py_True); + PyModule_AddObject(pModule, "lowercase", Py_False); + Py_INCREF(Py_False); + + PyModule_AddObject(pModule, "Connection", (PyObject*)&ConnectionType); + Py_INCREF((PyObject*)&ConnectionType); + PyModule_AddObject(pModule, "Cursor", (PyObject*)&CursorType); + Py_INCREF((PyObject*)&CursorType); + PyModule_AddObject(pModule, "Row", (PyObject*)&RowType); + Py_INCREF((PyObject*)&RowType); + + // Add the SQL_XXX defines from ODBC. + for (unsigned int i = 0; i < _countof(aConstants); i++) + PyModule_AddIntConstant(pModule, (char*)aConstants[i].szName, aConstants[i].value); + + PyModule_AddObject(pModule, "Date", (PyObject*)PyDateTimeAPI->DateType); + Py_INCREF((PyObject*)PyDateTimeAPI->DateType); + PyModule_AddObject(pModule, "Time", (PyObject*)PyDateTimeAPI->TimeType); + Py_INCREF((PyObject*)PyDateTimeAPI->TimeType); + PyModule_AddObject(pModule, "Timestamp", (PyObject*)PyDateTimeAPI->DateTimeType); + Py_INCREF((PyObject*)PyDateTimeAPI->DateTimeType); + PyModule_AddObject(pModule, "DATETIME", (PyObject*)PyDateTimeAPI->DateTimeType); + Py_INCREF((PyObject*)PyDateTimeAPI->DateTimeType); + PyModule_AddObject(pModule, "STRING", (PyObject*)&PyString_Type); + Py_INCREF((PyObject*)&PyString_Type); + PyModule_AddObject(pModule, "NUMBER", (PyObject*)&PyFloat_Type); + Py_INCREF((PyObject*)&PyFloat_Type); + PyModule_AddObject(pModule, "ROWID", (PyObject*)&PyInt_Type); + Py_INCREF((PyObject*)&PyInt_Type); + PyModule_AddObject(pModule, "BINARY", (PyObject*)&PyBuffer_Type); + Py_INCREF((PyObject*)&PyBuffer_Type); + PyModule_AddObject(pModule, "Binary", (PyObject*)&PyBuffer_Type); + Py_INCREF((PyObject*)&PyBuffer_Type); + + if (PyErr_Occurred()) + ErrorCleanup(); +} + +#ifdef WINVER +BOOL WINAPI DllMain( + HINSTANCE hMod, + DWORD fdwReason, + LPVOID lpvReserved + ) +{ + UNUSED(hMod, fdwReason, lpvReserved); + return TRUE; +} +#endif diff --git a/src/pyodbcmodule.h b/src/pyodbcmodule.h new file mode 100644 index 00000000..eb79e222 --- /dev/null +++ b/src/pyodbcmodule.h @@ -0,0 +1,62 @@ + +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS + * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _PYPGMODULE_H +#define _PYPGMODULE_H + +extern PyObject* Error; +extern PyObject* Warning; +extern PyObject* InterfaceError; +extern PyObject* DatabaseError; +extern PyObject* InternalError; +extern PyObject* OperationalError; +extern PyObject* ProgrammingError; +extern PyObject* IntegrityError; +extern PyObject* DataError; +extern PyObject* NotSupportedError; + +// Type objects such as 'int()'. + +extern PyObject* str_type; +extern PyObject* float_type; +extern PyObject* int_type; +extern PyObject* buffer_type; +extern PyObject* bool_type; +extern PyObject* long_type; +extern PyObject* decimal_type; + +inline bool PyDecimal_Check(PyObject* p) +{ + return p->ob_type == (_typeobject*)decimal_type; +} + +extern HENV henv; + +extern PyTypeObject RowType; +extern PyTypeObject CursorType; +extern PyTypeObject ConnectionType; + +// Thd pyodbc module. +extern PyObject* pModule; + +inline bool lowercase() +{ + return PyObject_GetAttrString(pModule, "lowercase") == Py_True; +} + +extern char chDecimal; +extern char chGroupSeparator; +extern char chCurrencySymbol; + + +#endif // _PYPGMODULE_H diff --git a/src/resource.h b/src/resource.h new file mode 100644 index 00000000..dc3971ab --- /dev/null +++ b/src/resource.h @@ -0,0 +1,14 @@ +//{{NO_DEPENDENCIES}} +// Microsoft Visual C++ generated include file. +// Used by pyodbc.rc + +// Next default values for new objects +// +#ifdef APSTUDIO_INVOKED +#ifndef APSTUDIO_READONLY_SYMBOLS +#define _APS_NEXT_RESOURCE_VALUE 101 +#define _APS_NEXT_COMMAND_VALUE 40001 +#define _APS_NEXT_CONTROL_VALUE 1001 +#define _APS_NEXT_SYMED_VALUE 101 +#endif +#endif diff --git a/src/row.cpp b/src/row.cpp new file mode 100644 index 00000000..222576be --- /dev/null +++ b/src/row.cpp @@ -0,0 +1,343 @@ + +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +#include "pyodbc.h" +#include "pyodbcmodule.h" +#include "row.h" +#include "wrapper.h" + +struct Row +{ + // A Row must act like a sequence (a tuple of results) to meet the DB API specification, but we also allow values + // to be accessed via lowercased column names. We also supply a `columns` attribute which returns the list of + // column names. + + PyObject_HEAD + + // cursor.description, accessed as _description + PyObject* description; + + // A Python dictionary mapping from column name to a PyInteger, used to access columns by name. + PyObject* map_name_to_index; + + // The number of values in apValues. + Py_ssize_t cValues; + + // The column values, stored as an array. + PyObject** apValues; +}; + +#define Row_Check(op) PyObject_TypeCheck(op, &RowType) +#define Row_CheckExact(op) ((op)->ob_type == &RowType) + +void +FreeRowValues(int cValues, PyObject** apValues) +{ + if (apValues) + { + for (Py_ssize_t i = 0; i < cValues; i++) + Py_XDECREF(apValues[i]); + free(apValues); + } +} + +static void +Row_dealloc(Row* self) +{ + // Note: Now that __newobj__ is available, our variables could be zero... + + Py_XDECREF(self->description); + Py_XDECREF(self->map_name_to_index); + FreeRowValues(self->cValues, self->apValues); + PyObject_Del(self); +} + +Row* +Row_New(PyObject* description, PyObject* map_name_to_index, Py_ssize_t cValues, PyObject** apValues) +{ + // Called by other modules to create rows. Takes ownership of apValues. + + Row* row = PyObject_NEW(Row, &RowType); + + if (row) + { + Py_INCREF(description); + row->description = description; + Py_INCREF(map_name_to_index); + row->map_name_to_index = map_name_to_index; + row->apValues = apValues; + row->cValues = cValues; + } + else + { + FreeRowValues(cValues, apValues); + } + + return row; +} + +static PyObject* +Row_getattro(PyObject* o, PyObject* name) +{ + // Called to handle 'row.colname'. + + Row* self = (Row*)o; + + PyObject* index = PyDict_GetItem(self->map_name_to_index, name); + + if (index) + { + // REVIEW: How is this going to work on a 64-bit system? First, will the value be a PyInt or a PyLong? + Py_ssize_t i = PyInt_AsSsize_t(index); + Py_INCREF(self->apValues[i]); + return self->apValues[i]; + } + + return PyObject_GenericGetAttr(o, name); +} + +static Py_ssize_t +Row_length(Row* self) +{ + return self->cValues; +} + + +static int +Row_contains(Row *self, PyObject *el) +{ + // Implementation of contains. The documentation is not good (non-existent?), so I copied the following from the + // PySequence_Contains documentation: Return -1 if error; 1 if ob in seq; 0 if ob not in seq. + + int cmp = 0; + + for (Py_ssize_t i = 0, c = self->cValues ; cmp == 0 && i < c; ++i) + cmp = PyObject_RichCompareBool(el, self->apValues[i], Py_EQ); + + return cmp; +} + +static PyObject * +Row_item(Row* self, Py_ssize_t i) +{ + // Apparently, negative indexes are handled by magic ;) -- they never make it here. + + if (i < 0 || i >= self->cValues) + { + PyErr_SetString(PyExc_IndexError, "tuple index out of range"); + return NULL; + } + + Py_INCREF(self->apValues[i]); + return self->apValues[i]; +} + + +static int +Row_ass_item(Row* self, Py_ssize_t i, PyObject* v) +{ + // Implements row[i] = value. + + if (i < 0 || i >= self->cValues) + { + PyErr_SetString(PyExc_IndexError, "Row assignment index out of range"); + return -1; + } + + Py_XDECREF(self->apValues[i]); + Py_INCREF(v); + self->apValues[i] = v; + + return 0; +} + +static int +Row_setattro(PyObject* o, PyObject *name, PyObject* v) +{ + Row* self = (Row*)o; + + PyObject* index = PyDict_GetItem(self->map_name_to_index, name); + + if (index) + return Row_ass_item(self, PyInt_AsSsize_t(index), v); + + return PyObject_GenericSetAttr(o, name, v); +} + +static PyObject * +Row_slice(PyObject* o, Py_ssize_t iFirst, Py_ssize_t iMax) +{ + // Note: Negative indexes will have already been converted to positive ones before this is called. It is possible + // for the iMax value to be too high if row[:] or row[1:] is used. + // + // I don't think iFirst can ever be below zero, but the tuple slice function checks for it, so we will too. + + Row* self = (Row*)o; + + if (iFirst < 0) + iFirst = 0; + if (iMax > self->cValues) + iMax = self->cValues; + if (iMax < iFirst) + iMax = iFirst; + + if (iFirst == 0 && iMax == self->cValues) + { + Py_INCREF(o); + return o; + } + + Py_ssize_t len = iMax - iFirst; + PyObject* result = PyTuple_New(len); + if (!result) + return 0; + + for (Py_ssize_t i = 0; i < len; i++) + { + PyObject* item = self->apValues[iFirst + i]; + PyTuple_SET_ITEM(result, i, item); + Py_INCREF(item); + } + + return result; +} + +static PyObject * +Row_repr(PyObject* o) +{ + Row* self = (Row*)o; + + if (self->cValues == 0) + return PyString_FromString("()"); + + Object pieces = PyTuple_New(self->cValues); + if (!pieces) + return 0; + + for (Py_ssize_t i = 0; i < self->cValues; i++) + { + PyObject* piece = PyObject_Repr(self->apValues[i]); + if (!piece) + return 0; + PyTuple_SET_ITEM(pieces.Get(), i, piece); + } + + Object sep = PyString_FromString(", "); + if (!sep) + return 0; + + Object s = _PyString_Join(sep, pieces); + if (!s) + return 0; + + const char* szWrapper = (self->cValues == 1) ? "(%s, )" : "(%s)"; + + Object result = PyString_FromFormat(szWrapper, PyString_AsString(s.Get())); + return result.Detach(); +} + + + +static PySequenceMethods row_as_sequence = +{ + (lenfunc)Row_length, // sq_length + 0, // sq_concat + 0, // sq_repeat + (ssizeargfunc)Row_item, // sq_item + Row_slice, // sq_slice + (ssizeobjargproc)Row_ass_item, // sq_ass_item + 0, // sq_ass_slice + (objobjproc)Row_contains, // sq_contains +}; + +static char description_doc[] = "The Cursor.description sequence from the Cursor that created this row."; + +static PyMemberDef Row_members[] = +{ + { "cursor_description", T_OBJECT_EX, offsetof(Row, description), READONLY, description_doc }, + { 0 } +}; + + +static char row_doc[] = + "Row objects are sequence objects that hold query results.\n" + "\n" + "They are similar to tuples in that they cannot be resized and new attributes\n" + "cannot be added, but individual elements can be replaced. This allows data to\n" + "be \"fixed up\" after being fetched. (For example, datetimes may be replaced by\n" + "those with time zones attached.)\n" + "\n" + " row[0] = row[0].replace(tzinfo=timezone)\n" + " print row[0]\n" + "\n" + "Additionally, individual values can be optionally be accessed or replaced by\n" + "name. Non-alphanumeric characters are replaced with an underscore.\n" + "\n" + " cursor.execute(\"select customer_id, [Name With Spaces] from tmp\")\n" + " row = cursor.fetchone()\n" + " print row.customer_id, row.Name_With_Spaces\n" + "\n" + "If using this non-standard feature, it is often convenient to specifiy the name\n" + "using the SQL 'as' keyword:\n" + "\n" + " cursor.execute(\"select count(*) as total from tmp\")\n" + " row = cursor.fetchone()\n" + " print row.total"; + +PyTypeObject RowType = +{ + PyObject_HEAD_INIT(0) + 0, // ob_size + "pyodbc.Row", // tp_name + sizeof(Row), // tp_basicsize + 0, // tp_itemsize + (destructor)Row_dealloc, // destructor tp_dealloc + 0, // tp_print + 0, // tp_getattr + 0, // tp_setattr + 0, // tp_compare + Row_repr, // tp_repr + 0, // tp_as_number + &row_as_sequence, // tp_as_sequence + 0, // tp_as_mapping + 0, // tp_hash + 0, // tp_call + 0, // tp_str + Row_getattro, // tp_getattro + Row_setattro, // tp_setattro + 0, // tp_as_buffer + Py_TPFLAGS_DEFAULT, // tp_flags + row_doc, // tp_doc + 0, // tp_traverse + 0, // tp_clear + 0, // tp_richcompare + 0, // tp_weaklistoffset + 0, // tp_iter + 0, // tp_iternext + 0, // tp_methods + Row_members, // tp_members + 0, // tp_getset + 0, // tp_base + 0, // tp_dict + 0, // tp_descr_get + 0, // tp_descr_set + 0, // tp_dictoffset + 0, // tp_init + 0, // tp_alloc + 0, // tp_new + 0, // tp_free + 0, // tp_is_gc + 0, // tp_bases + 0, // tp_mro + 0, // tp_cache + 0, // tp_subclasses + 0, // tp_weaklist +}; diff --git a/src/row.h b/src/row.h new file mode 100644 index 00000000..bd80852b --- /dev/null +++ b/src/row.h @@ -0,0 +1,34 @@ + +/* + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS + * OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef ROW_H +#define ROW_H + +struct Row; + +/* + * Used to make a new row from an array of column values. + */ +Row* Row_New(PyObject* description, PyObject* map_name_to_index, Py_ssize_t cValues, PyObject** apValues); + +/* + * Dereferences each object in apValues and frees apValue. This is the internal format used by rows. + * + * cValues: The number of items to free in apValues. + * + * apValues: The array of values. This can be NULL. + */ +void FreeRowValues(int cValues, PyObject** apValues); + +#endif + diff --git a/src/wrapper.h b/src/wrapper.h new file mode 100644 index 00000000..628efa56 --- /dev/null +++ b/src/wrapper.h @@ -0,0 +1,50 @@ + +#ifndef _WRAPPER_H_ +#define _WRAPPER_H_ + +class Object +{ +private: + PyObject* p; + + // GCC freaks out if these are private, but it doesn't use them (?) + // Object(const Object& illegal); + // void operator=(const Object& illegal); + +public: + Object(PyObject* _p = 0) + { + p = _p; + } + + ~Object() + { + Py_XDECREF(p); + } + + Object& operator=(PyObject* pNew) + { + Py_XDECREF(p); + p = pNew; + return *this; + } + + PyObject* Detach() + { + PyObject* pT = p; + p = 0; + return pT; + } + + operator PyObject*() + { + return p; + } + + PyObject* Get() + { + return p; + } +}; + +#endif // _WRAPPER_H_ diff --git a/tests/accesstests.py b/tests/accesstests.py new file mode 100644 index 00000000..8ea419dd --- /dev/null +++ b/tests/accesstests.py @@ -0,0 +1,648 @@ +#!/usr/bin/python + +usage="""\ +usage: %prog [options] filename + +Unit tests for Microsoft Access + +These run using the version from the 'build' directory, not the version +installed into the Python directories. You must run python setup.py build +before running the tests. + +To run, pass the filename of an Access database on the command line: + + accesstests test.accdb + +An empty Access 2000 database (empty.mdb) and an empty Access 2007 database +(empty.accdb), are provided. + +To run a single test, use the -t option: + + accesstests test.accdb -t unicode_null + +If you want to report an error, it would be helpful to include the driver information +by using the verbose flag and redirecting the output to a file: + + accesstests test.accdb -v >& results.txt + +You can pass the verbose flag twice for more verbose output: + + accesstests test.accdb -vv +""" + +# Access SQL data types: http://msdn2.microsoft.com/en-us/library/bb208866.aspx + +import sys, os, re +import unittest +from decimal import Decimal +from datetime import datetime, date, time +from os.path import abspath +from testutils import * + +CNXNSTRING = None + +_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' + +def _generate_test_string(length): + """ + Returns a string of composed of `seed` to make a string `length` characters long. + + To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are + tested with 3 lengths. This function helps us generate the test data. + + We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will + be hidden and to help us manually identify where a break occurs. + """ + if length <= len(_TESTSTR): + return _TESTSTR[:length] + + c = (length + len(_TESTSTR)-1) / len(_TESTSTR) + v = _TESTSTR * c + return v[:length] + + +class AccessTestCase(unittest.TestCase): + + SMALL_FENCEPOST_SIZES = [ 0, 1, 254, 255 ] # text fields <= 255 + LARGE_FENCEPOST_SIZES = [ 256, 270, 304, 508, 510, 511, 512, 1023, 1024, 2047, 2048, 4000, 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] + + ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] + UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] + IMAGE_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] + + def __init__(self, method_name): + unittest.TestCase.__init__(self, method_name) + + def setUp(self): + self.cnxn = pyodbc.connect(CNXNSTRING) + self.cursor = self.cnxn.cursor() + + for i in range(3): + try: + self.cursor.execute("drop table t%d" % i) + self.cnxn.commit() + except: + pass + + self.cnxn.rollback() + + def tearDown(self): + try: + self.cursor.close() + self.cnxn.close() + except: + # If we've already closed the cursor or connection, exceptions are thrown. + pass + + def test_multiple_bindings(self): + "More than one bind and select on a cursor" + self.cursor.execute("create table t1(n int)") + self.cursor.execute("insert into t1 values (?)", 1) + self.cursor.execute("insert into t1 values (?)", 2) + self.cursor.execute("insert into t1 values (?)", 3) + for i in range(3): + self.cursor.execute("select n from t1 where n < ?", 10) + self.cursor.execute("select n from t1 where n < 3") + + + def test_different_bindings(self): + self.cursor.execute("create table t1(n int)") + self.cursor.execute("create table t2(d datetime)") + self.cursor.execute("insert into t1 values (?)", 1) + self.cursor.execute("insert into t2 values (?)", datetime.now()) + + def test_datasources(self): + p = pyodbc.dataSources() + self.assert_(isinstance(p, dict)) + + def test_getinfo_string(self): + value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) + self.assert_(isinstance(value, str)) + + def test_getinfo_bool(self): + value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) + self.assert_(isinstance(value, bool)) + + def test_getinfo_int(self): + value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) + self.assert_(isinstance(value, (int, long))) + + def test_getinfo_smallint(self): + value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) + self.assert_(isinstance(value, int)) + + def _test_strtype(self, sqltype, value, colsize=None): + """ + The implementation for string, Unicode, and binary tests. + """ + assert colsize is None or (value is None or colsize >= len(value)), 'colsize=%s value=%s' % (colsize, 'none' if value is None else len(value)) + + if colsize: + sql = "create table t1(n1 int not null, s1 %s(%s), s2 %s(%s))" % (sqltype, colsize, sqltype, colsize) + else: + sql = "create table t1(n1 int not null, s1 %s, s2 %s)" % (sqltype, sqltype) + + self.cursor.execute(sql) + self.cursor.execute("insert into t1 values(1, ?, ?)", (value, value)) + row = self.cursor.execute("select s1, s2 from t1").fetchone() + + # Access only uses Unicode, but strings might have been passed in to see if they can be written. When we read + # them back, they'll be unicode, so compare our results to a Unicode version of `value`. + if type(value) is str: + value = unicode(value) + + for i in range(2): + v = row[i] + + self.assertEqual(type(v), type(value)) + + if value is not None: + self.assertEqual(len(v), len(value)) + + self.assertEqual(v, value) + + # + # unicode + # + + def test_unicode_null(self): + self._test_strtype('varchar', None, 255) + + # Generate a test for each fencepost size: test_varchar_0, etc. + def _maketest(value): + def t(self): + self._test_strtype('varchar', value, len(value)) + t.__doc__ = 'unicode %s' % len(value) + return t + for value in UNICODE_FENCEPOSTS: + locals()['test_unicode_%s' % len(value)] = _maketest(value) + + # + # ansi -> varchar + # + + # Access only stores Unicode text but it should accept ASCII text. + + # Generate a test for each fencepost size: test_varchar_0, etc. + def _maketest(value): + def t(self): + self._test_strtype('varchar', value, len(value)) + t.__doc__ = 'ansi %s' % len(value) + return t + for value in ANSI_FENCEPOSTS: + locals()['test_ansivarchar_%s' % len(value)] = _maketest(value) + + # + # binary + # + + # Generate a test for each fencepost size: test_varchar_0, etc. + def _maketest(value): + def t(self): + self._test_strtype('varbinary', buffer(value), len(value)) + t.__doc__ = 'binary %s' % len(value) + return t + for value in ANSI_FENCEPOSTS: + locals()['test_binary_%s' % len(value)] = _maketest(value) + + + # + # image + # + + def test_null_image(self): + self._test_strtype('image', None) + + # Generate a test for each fencepost size: test_varchar_0, etc. + def _maketest(value): + def t(self): + self._test_strtype('image', buffer(value)) + t.__doc__ = 'image %s' % len(value) + return t + for value in IMAGE_FENCEPOSTS: + locals()['test_image_%s' % len(value)] = _maketest(value) + + # + # memo + # + + def test_null_memo(self): + self._test_strtype('memo', None) + + # Generate a test for each fencepost size: test_varchar_0, etc. + def _maketest(value): + def t(self): + self._test_strtype('memo', unicode(value)) + t.__doc__ = 'Unicode to memo %s' % len(value) + return t + for value in IMAGE_FENCEPOSTS: + locals()['test_memo_%s' % len(value)] = _maketest(value) + + # ansi -> memo + def _maketest(value): + def t(self): + self._test_strtype('memo', value) + t.__doc__ = 'ANSI to memo %s' % len(value) + return t + for value in IMAGE_FENCEPOSTS: + locals()['test_ansimemo_%s' % len(value)] = _maketest(value) + + def test_subquery_params(self): + """Ensure parameter markers work in a subquery""" + self.cursor.execute("create table t1(id integer, s varchar(20))") + self.cursor.execute("insert into t1 values (?,?)", 1, 'test') + row = self.cursor.execute(""" + select x.id + from ( + select id + from t1 + where s = ? + and id between ? and ? + ) x + """, 'test', 1, 10).fetchone() + self.assertNotEqual(row, None) + self.assertEqual(row[0], 1) + + def _exec(self): + self.cursor.execute(self.sql) + + def test_close_cnxn(self): + """Make sure using a Cursor after closing its connection doesn't crash.""" + + self.cursor.execute("create table t1(id integer, s varchar(20))") + self.cursor.execute("insert into t1 values (?,?)", 1, 'test') + self.cursor.execute("select * from t1") + + self.cnxn.close() + + # Now that the connection is closed, we expect an exception. (If the code attempts to use + # the HSTMT, we'll get an access violation instead.) + self.sql = "select * from t1" + self.assertRaises(pyodbc.ProgrammingError, self._exec) + + + def test_unicode_query(self): + self.cursor.execute(u"select 1") + + def test_negative_row_index(self): + self.cursor.execute("create table t1(s varchar(20))") + self.cursor.execute("insert into t1 values(?)", "1") + row = self.cursor.execute("select * from t1").fetchone() + self.assertEquals(row[0], "1") + self.assertEquals(row[-1], "1") + + def test_version(self): + self.assertEquals(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. + + # + # date, time, datetime + # + + def test_datetime(self): + value = datetime(2007, 1, 15, 3, 4, 5) + + self.cursor.execute("create table t1(dt datetime)") + self.cursor.execute("insert into t1 values (?)", value) + + result = self.cursor.execute("select dt from t1").fetchone()[0] + self.assertEquals(value, result) + + # + # ints and floats + # + + def test_int(self): + value = 1234 + self.cursor.execute("create table t1(n int)") + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEquals(result, value) + + def test_negative_int(self): + value = -1 + self.cursor.execute("create table t1(n int)") + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEquals(result, value) + + def test_smallint(self): + value = 32767 + self.cursor.execute("create table t1(n smallint)") + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEquals(result, value) + + def test_real(self): + value = 1234.5 + self.cursor.execute("create table t1(n real)") + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEquals(result, value) + + def test_negative_real(self): + value = -200.5 + self.cursor.execute("create table t1(n real)") + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEqual(value, result) + + def test_float(self): + value = 1234.567 + self.cursor.execute("create table t1(n float)") + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEquals(result, value) + + def test_negative_float(self): + value = -200.5 + self.cursor.execute("create table t1(n float)") + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEqual(value, result) + + def test_tinyint(self): + self.cursor.execute("create table t1(n tinyint)") + value = 10 + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEqual(type(result), type(value)) + self.assertEqual(value, result) + + # + # decimal & money + # + + def test_decimal(self): + value = Decimal('12345.6789') + self.cursor.execute("create table t1(n numeric(10,4))") + self.cursor.execute("insert into t1 values(?)", value) + v = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEqual(type(v), Decimal) + self.assertEqual(v, value) + + def test_money(self): + self.cursor.execute("create table t1(n money)") + value = Decimal('1234.45') + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEqual(type(result), type(value)) + self.assertEqual(value, result) + + def test_negative_decimal_scale(self): + value = Decimal('-10.0010') + self.cursor.execute("create table t1(d numeric(19,4))") + self.cursor.execute("insert into t1 values(?)", value) + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), Decimal) + self.assertEqual(v, value) + + # + # bit + # + + def test_bit(self): + self.cursor.execute("create table t1(b bit)") + + value = True + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select b from t1").fetchone()[0] + self.assertEqual(type(result), bool) + self.assertEqual(value, result) + + def test_bit_null(self): + self.cursor.execute("create table t1(b bit)") + + value = None + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select b from t1").fetchone()[0] + self.assertEqual(type(result), bool) + self.assertEqual(False, result) + + def test_guid(self): + # REVIEW: Python doesn't (yet) have a UUID type so the value is returned as a string. Access, however, only + # really supports Unicode. For now, we'll have to live with this difference. All strings in Python 3.x will + # be Unicode -- pyodbc 3.x will have different defaults. + value = "de2ac9c6-8676-4b0b-b8a6-217a8580cbee" + self.cursor.execute("create table t1(g1 uniqueidentifier)") + self.cursor.execute("insert into t1 values (?)", value) + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), type(value)) + self.assertEqual(len(v), len(value)) + + + # + # rowcount + # + + def test_rowcount_delete(self): + self.assertEquals(self.cursor.rowcount, -1) + self.cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + self.cursor.execute("insert into t1 values (?)", i) + self.cursor.execute("delete from t1") + self.assertEquals(self.cursor.rowcount, count) + + def test_rowcount_nodata(self): + """ + This represents a different code path than a delete that deleted something. + + The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over + the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a + zero return value. + """ + self.cursor.execute("create table t1(i int)") + # This is a different code path internally. + self.cursor.execute("delete from t1") + self.assertEquals(self.cursor.rowcount, 0) + + def test_rowcount_select(self): + """ + Ensure Cursor.rowcount is set properly after a select statement. + + pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a + select statement, so we'll test for that behavior. This is valid behavior according to the DB API + specification, but people don't seem to like it. + """ + self.cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + self.cursor.execute("insert into t1 values (?)", i) + self.cursor.execute("select * from t1") + self.assertEquals(self.cursor.rowcount, -1) + + rows = self.cursor.fetchall() + self.assertEquals(len(rows), count) + self.assertEquals(self.cursor.rowcount, -1) + + def test_rowcount_reset(self): + "Ensure rowcount is reset to -1" + + self.cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + self.cursor.execute("insert into t1 values (?)", i) + self.assertEquals(self.cursor.rowcount, 1) + + self.cursor.execute("create table t2(i int)") + self.assertEquals(self.cursor.rowcount, -1) + + # + # Misc + # + + def test_lower_case(self): + "Ensure pyodbc.lowercase forces returned column names to lowercase." + + # Has to be set before creating the cursor, so we must recreate self.cursor. + + pyodbc.lowercase = True + self.cursor = self.cnxn.cursor() + + self.cursor.execute("create table t1(Abc int, dEf int)") + self.cursor.execute("select * from t1") + + names = [ t[0] for t in self.cursor.description ] + names.sort() + + self.assertEquals(names, [ "abc", "def" ]) + + # Put it back so other tests don't fail. + pyodbc.lowercase = False + + def test_row_description(self): + """ + Ensure Cursor.description is accessible as Row.cursor_description. + """ + self.cursor = self.cnxn.cursor() + self.cursor.execute("create table t1(a int, b char(3))") + self.cnxn.commit() + self.cursor.execute("insert into t1 values(1, 'abc')") + + row = self.cursor.execute("select * from t1").fetchone() + self.assertEquals(self.cursor.description, row.cursor_description) + + + def test_executemany(self): + self.cursor.execute("create table t1(a int, b varchar(10))") + + params = [ (i, str(i)) for i in range(1, 6) ] + + self.cursor.executemany("insert into t1(a, b) values (?,?)", params) + + count = self.cursor.execute("select count(*) from t1").fetchone()[0] + self.assertEqual(count, len(params)) + + self.cursor.execute("select a, b from t1 order by a") + rows = self.cursor.fetchall() + self.assertEqual(count, len(rows)) + + for param, row in zip(params, rows): + self.assertEqual(param[0], row[0]) + self.assertEqual(param[1], row[1]) + + + def test_executemany_failure(self): + """ + Ensure that an exception is raised if one query in an executemany fails. + """ + self.cursor.execute("create table t1(a int, b varchar(10))") + + params = [ (1, 'good'), + ('error', 'not an int'), + (3, 'good') ] + + self.failUnlessRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) + + + def test_row_slicing(self): + self.cursor.execute("create table t1(a int, b int, c int, d int)"); + self.cursor.execute("insert into t1 values(1,2,3,4)") + + row = self.cursor.execute("select * from t1").fetchone() + + result = row[:] + self.failUnless(result is row) + + result = row[:-1] + self.assertEqual(result, (1,2,3)) + + result = row[0:4] + self.failUnless(result is row) + + + def test_row_repr(self): + self.cursor.execute("create table t1(a int, b int, c int, d int)"); + self.cursor.execute("insert into t1 values(1,2,3,4)") + + row = self.cursor.execute("select * from t1").fetchone() + + result = str(row) + self.assertEqual(result, "(1, 2, 3, 4)") + + result = str(row[:-1]) + self.assertEqual(result, "(1, 2, 3)") + + result = str(row[:1]) + self.assertEqual(result, "(1,)") + + + def test_concatenation(self): + v2 = u'0123456789' * 25 + v3 = u'9876543210' * 25 + value = v2 + 'x' + v3 + + self.cursor.execute("create table t1(c2 varchar(250), c3 varchar(250))") + self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) + + row = self.cursor.execute("select c2 + 'x' + c3 from t1").fetchone() + + self.assertEqual(row[0], value) + + + def test_autocommit(self): + self.assertEqual(self.cnxn.autocommit, False) + + othercnxn = pyodbc.connect(CNXNSTRING, autocommit=True) + self.assertEqual(othercnxn.autocommit, True) + + othercnxn.autocommit = False + self.assertEqual(othercnxn.autocommit, False) + + +def main(): + from optparse import OptionParser + parser = OptionParser(usage=usage) + parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") + parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") + parser.add_option("-t", "--test", help="Run only the named test") + + (options, args) = parser.parse_args() + + if len(args) != 1: + parser.error('dbfile argument required') + + if args[0].endswith('.accdb'): + driver = 'Microsoft Access Driver (*.mdb, *.accdb)' + else: + driver = 'Microsoft Access Driver (*.mdb)' + + global CNXNSTRING + CNXNSTRING = 'DRIVER={%s};DBQ=%s;ExtendedAnsiSQL=1' % (driver, abspath(args[0])) + + cnxn = pyodbc.connect(CNXNSTRING) + print_library_info(cnxn) + cnxn.close() + + suite = load_tests(AccessTestCase, options.test) + + testRunner = unittest.TextTestRunner(verbosity=options.verbose) + result = testRunner.run(suite) + + +if __name__ == '__main__': + + # Add the build directory to the path so we're testing the latest build, not the installed version. + add_to_path() + import pyodbc + main() diff --git a/tests/dbapi20.py b/tests/dbapi20.py new file mode 100644 index 00000000..36d4edfb --- /dev/null +++ b/tests/dbapi20.py @@ -0,0 +1,850 @@ +#!/usr/bin/env python +''' Python DB API 2.0 driver compliance unit test suite. + + This software is Public Domain and may be used without restrictions. + + "Now we have booze and barflies entering the discussion, plus rumours of + DBAs on drugs... and I won't tell you what flashes through my mind each + time I read the subject line with 'Anal Compliance' in it. All around + this is turning out to be a thoroughly unwholesome unit test." + + -- Ian Bicking +''' + +__rcs_id__ = '$Id: dbapi20.py,v 1.10 2003/10/09 03:14:14 zenzen Exp $' +__version__ = '$Revision: 1.10 $'[11:-2] +__author__ = 'Stuart Bishop ' + +import unittest +import time + +# $Log: dbapi20.py,v $ +# Revision 1.10 2003/10/09 03:14:14 zenzen +# Add test for DB API 2.0 optional extension, where database exceptions +# are exposed as attributes on the Connection object. +# +# Revision 1.9 2003/08/13 01:16:36 zenzen +# Minor tweak from Stefan Fleiter +# +# Revision 1.8 2003/04/10 00:13:25 zenzen +# Changes, as per suggestions by M.-A. Lemburg +# - Add a table prefix, to ensure namespace collisions can always be avoided +# +# Revision 1.7 2003/02/26 23:33:37 zenzen +# Break out DDL into helper functions, as per request by David Rushby +# +# Revision 1.6 2003/02/21 03:04:33 zenzen +# Stuff from Henrik Ekelund: +# added test_None +# added test_nextset & hooks +# +# Revision 1.5 2003/02/17 22:08:43 zenzen +# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize +# defaults to 1 & generic cursor.callproc test added +# +# Revision 1.4 2003/02/15 00:16:33 zenzen +# Changes, as per suggestions and bug reports by M.-A. Lemburg, +# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar +# - Class renamed +# - Now a subclass of TestCase, to avoid requiring the driver stub +# to use multiple inheritance +# - Reversed the polarity of buggy test in test_description +# - Test exception heirarchy correctly +# - self.populate is now self._populate(), so if a driver stub +# overrides self.ddl1 this change propogates +# - VARCHAR columns now have a width, which will hopefully make the +# DDL even more portible (this will be reversed if it causes more problems) +# - cursor.rowcount being checked after various execute and fetchXXX methods +# - Check for fetchall and fetchmany returning empty lists after results +# are exhausted (already checking for empty lists if select retrieved +# nothing +# - Fix bugs in test_setoutputsize_basic and test_setinputsizes +# + +class DatabaseAPI20Test(unittest.TestCase): + ''' Test a database self.driver for DB API 2.0 compatibility. + This implementation tests Gadfly, but the TestCase + is structured so that other self.drivers can subclass this + test case to ensure compiliance with the DB-API. It is + expected that this TestCase may be expanded in the future + if ambiguities or edge conditions are discovered. + + The 'Optional Extensions' are not yet being tested. + + self.drivers should subclass this test, overriding setUp, tearDown, + self.driver, connect_args and connect_kw_args. Class specification + should be as follows: + + import dbapi20 + class mytest(dbapi20.DatabaseAPI20Test): + [...] + + Don't 'import DatabaseAPI20Test from dbapi20', or you will + confuse the unit tester - just 'import dbapi20'. + ''' + + # The self.driver module. This should be the module where the 'connect' + # method is to be found + driver = None + connect_args = () # List of arguments to pass to connect + connect_kw_args = {} # Keyword arguments for connect + table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables + + ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix + ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix + xddl1 = 'drop table %sbooze' % table_prefix + xddl2 = 'drop table %sbarflys' % table_prefix + + lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase + + # Some drivers may need to override these helpers, for example adding + # a 'commit' after the execute. + def executeDDL1(self,cursor): + cursor.execute(self.ddl1) + + def executeDDL2(self,cursor): + cursor.execute(self.ddl2) + + def setUp(self): + ''' self.drivers should override this method to perform required setup + if any is necessary, such as creating the database. + ''' + pass + + def tearDown(self): + ''' self.drivers should override this method to perform required cleanup + if any is necessary, such as deleting the test database. + The default drops the tables that may be created. + ''' + con = self._connect() + try: + cur = con.cursor() + for i, ddl in enumerate((self.xddl1,self.xddl2)): + try: + cur.execute(ddl) + con.commit() + except self.driver.Error: + # Assume table didn't exist. Other tests will check if + # execute is busted. + pass + finally: + con.close() + + def _connect(self): + try: + return self.driver.connect( + *self.connect_args,**self.connect_kw_args + ) + except AttributeError: + self.fail("No connect method found in self.driver module") + + def test_connect(self): + con = self._connect() + con.close() + + def test_apilevel(self): + try: + # Must exist + apilevel = self.driver.apilevel + # Must equal 2.0 + self.assertEqual(apilevel,'2.0') + except AttributeError: + self.fail("Driver doesn't define apilevel") + + def test_threadsafety(self): + try: + # Must exist + threadsafety = self.driver.threadsafety + # Must be a valid value + self.failUnless(threadsafety in (0,1,2,3)) + except AttributeError: + self.fail("Driver doesn't define threadsafety") + + def test_paramstyle(self): + try: + # Must exist + paramstyle = self.driver.paramstyle + # Must be a valid value + self.failUnless(paramstyle in ( + 'qmark','numeric','named','format','pyformat' + )) + except AttributeError: + self.fail("Driver doesn't define paramstyle") + + def test_Exceptions(self): + # Make sure required exceptions exist, and are in the + # defined heirarchy. + self.failUnless(issubclass(self.driver.Warning,StandardError)) + self.failUnless(issubclass(self.driver.Error,StandardError)) + self.failUnless( + issubclass(self.driver.InterfaceError,self.driver.Error) + ) + self.failUnless( + issubclass(self.driver.DatabaseError,self.driver.Error) + ) + self.failUnless( + issubclass(self.driver.OperationalError,self.driver.Error) + ) + self.failUnless( + issubclass(self.driver.IntegrityError,self.driver.Error) + ) + self.failUnless( + issubclass(self.driver.InternalError,self.driver.Error) + ) + self.failUnless( + issubclass(self.driver.ProgrammingError,self.driver.Error) + ) + self.failUnless( + issubclass(self.driver.NotSupportedError,self.driver.Error) + ) + + def test_ExceptionsAsConnectionAttributes(self): + # OPTIONAL EXTENSION + # Test for the optional DB API 2.0 extension, where the exceptions + # are exposed as attributes on the Connection object + # I figure this optional extension will be implemented by any + # driver author who is using this test suite, so it is enabled + # by default. + con = self._connect() + drv = self.driver + self.failUnless(con.Warning is drv.Warning) + self.failUnless(con.Error is drv.Error) + self.failUnless(con.InterfaceError is drv.InterfaceError) + self.failUnless(con.DatabaseError is drv.DatabaseError) + self.failUnless(con.OperationalError is drv.OperationalError) + self.failUnless(con.IntegrityError is drv.IntegrityError) + self.failUnless(con.InternalError is drv.InternalError) + self.failUnless(con.ProgrammingError is drv.ProgrammingError) + self.failUnless(con.NotSupportedError is drv.NotSupportedError) + + + def test_commit(self): + con = self._connect() + try: + # Commit must work, even if it doesn't do anything + con.commit() + finally: + con.close() + + def test_rollback(self): + con = self._connect() + # If rollback is defined, it should either work or throw + # the documented exception + if hasattr(con,'rollback'): + try: + con.rollback() + except self.driver.NotSupportedError: + pass + + def test_cursor(self): + con = self._connect() + try: + cur = con.cursor() + finally: + con.close() + + def test_cursor_isolation(self): + con = self._connect() + try: + # Make sure cursors created from the same connection have + # the documented transaction isolation level + cur1 = con.cursor() + cur2 = con.cursor() + self.executeDDL1(cur1) + cur1.execute("insert into %sbooze values ('Victoria Bitter')" % ( + self.table_prefix + )) + cur2.execute("select name from %sbooze" % self.table_prefix) + booze = cur2.fetchall() + self.assertEqual(len(booze),1) + self.assertEqual(len(booze[0]),1) + self.assertEqual(booze[0][0],'Victoria Bitter') + finally: + con.close() + + def test_description(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + self.assertEqual(cur.description,None, + 'cursor.description should be none after executing a ' + 'statement that can return no rows (such as DDL)' + ) + cur.execute('select name from %sbooze' % self.table_prefix) + self.assertEqual(len(cur.description),1, + 'cursor.description describes too many columns' + ) + self.assertEqual(len(cur.description[0]),7, + 'cursor.description[x] tuples must have 7 elements' + ) + self.assertEqual(cur.description[0][0].lower(),'name', + 'cursor.description[x][0] must return column name' + ) + self.assertEqual(cur.description[0][1],self.driver.STRING, + 'cursor.description[x][1] must return column type. Got %r' + % cur.description[0][1] + ) + + # Make sure self.description gets reset + self.executeDDL2(cur) + self.assertEqual(cur.description,None, + 'cursor.description not being set to None when executing ' + 'no-result statements (eg. DDL)' + ) + finally: + con.close() + + def test_rowcount(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + self.assertEqual(cur.rowcount,-1, + 'cursor.rowcount should be -1 after executing no-result ' + 'statements' + ) + cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( + self.table_prefix + )) + self.failUnless(cur.rowcount in (-1,1), + 'cursor.rowcount should == number or rows inserted, or ' + 'set to -1 after executing an insert statement' + ) + cur.execute("select name from %sbooze" % self.table_prefix) + self.failUnless(cur.rowcount in (-1,1), + 'cursor.rowcount should == number of rows returned, or ' + 'set to -1 after executing a select statement' + ) + self.executeDDL2(cur) + self.assertEqual(cur.rowcount,-1, + 'cursor.rowcount not being reset to -1 after executing ' + 'no-result statements' + ) + finally: + con.close() + + lower_func = 'lower' + def test_callproc(self): + con = self._connect() + try: + cur = con.cursor() + if self.lower_func and hasattr(cur,'callproc'): + r = cur.callproc(self.lower_func,('FOO',)) + self.assertEqual(len(r),1) + self.assertEqual(r[0],'FOO') + r = cur.fetchall() + self.assertEqual(len(r),1,'callproc produced no result set') + self.assertEqual(len(r[0]),1, + 'callproc produced invalid result set' + ) + self.assertEqual(r[0][0],'foo', + 'callproc produced invalid results' + ) + finally: + con.close() + + def test_close(self): + con = self._connect() + try: + cur = con.cursor() + finally: + con.close() + + # cursor.execute should raise an Error if called after connection + # closed + self.assertRaises(self.driver.Error,self.executeDDL1,cur) + + # connection.commit should raise an Error if called after connection' + # closed.' + self.assertRaises(self.driver.Error,con.commit) + + # connection.close should raise an Error if called more than once + self.assertRaises(self.driver.Error,con.close) + + def test_execute(self): + con = self._connect() + try: + cur = con.cursor() + self._paraminsert(cur) + finally: + con.close() + + def _paraminsert(self,cur): + self.executeDDL1(cur) + cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( + self.table_prefix + )) + self.failUnless(cur.rowcount in (-1,1)) + + if self.driver.paramstyle == 'qmark': + cur.execute( + 'insert into %sbooze values (?)' % self.table_prefix, + ("Cooper's",) + ) + elif self.driver.paramstyle == 'numeric': + cur.execute( + 'insert into %sbooze values (:1)' % self.table_prefix, + ("Cooper's",) + ) + elif self.driver.paramstyle == 'named': + cur.execute( + 'insert into %sbooze values (:beer)' % self.table_prefix, + {'beer':"Cooper's"} + ) + elif self.driver.paramstyle == 'format': + cur.execute( + 'insert into %sbooze values (%%s)' % self.table_prefix, + ("Cooper's",) + ) + elif self.driver.paramstyle == 'pyformat': + cur.execute( + 'insert into %sbooze values (%%(beer)s)' % self.table_prefix, + {'beer':"Cooper's"} + ) + else: + self.fail('Invalid paramstyle') + self.failUnless(cur.rowcount in (-1,1)) + + cur.execute('select name from %sbooze' % self.table_prefix) + res = cur.fetchall() + self.assertEqual(len(res),2,'cursor.fetchall returned too few rows') + beers = [res[0][0],res[1][0]] + beers.sort() + self.assertEqual(beers[0],"Cooper's", + 'cursor.fetchall retrieved incorrect data, or data inserted ' + 'incorrectly' + ) + self.assertEqual(beers[1],"Victoria Bitter", + 'cursor.fetchall retrieved incorrect data, or data inserted ' + 'incorrectly' + ) + + def test_executemany(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + largs = [ ("Cooper's",) , ("Boag's",) ] + margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ] + if self.driver.paramstyle == 'qmark': + cur.executemany( + 'insert into %sbooze values (?)' % self.table_prefix, + largs + ) + elif self.driver.paramstyle == 'numeric': + cur.executemany( + 'insert into %sbooze values (:1)' % self.table_prefix, + largs + ) + elif self.driver.paramstyle == 'named': + cur.executemany( + 'insert into %sbooze values (:beer)' % self.table_prefix, + margs + ) + elif self.driver.paramstyle == 'format': + cur.executemany( + 'insert into %sbooze values (%%s)' % self.table_prefix, + largs + ) + elif self.driver.paramstyle == 'pyformat': + cur.executemany( + 'insert into %sbooze values (%%(beer)s)' % ( + self.table_prefix + ), + margs + ) + else: + self.fail('Unknown paramstyle') + self.failUnless(cur.rowcount in (-1,2), + 'insert using cursor.executemany set cursor.rowcount to ' + 'incorrect value %r' % cur.rowcount + ) + cur.execute('select name from %sbooze' % self.table_prefix) + res = cur.fetchall() + self.assertEqual(len(res),2, + 'cursor.fetchall retrieved incorrect number of rows' + ) + beers = [res[0][0],res[1][0]] + beers.sort() + self.assertEqual(beers[0],"Boag's",'incorrect data retrieved') + self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved') + finally: + con.close() + + def test_fetchone(self): + con = self._connect() + try: + cur = con.cursor() + + # cursor.fetchone should raise an Error if called before + # executing a select-type query + self.assertRaises(self.driver.Error,cur.fetchone) + + # cursor.fetchone should raise an Error if called after + # executing a query that cannnot return rows + self.executeDDL1(cur) + self.assertRaises(self.driver.Error,cur.fetchone) + + cur.execute('select name from %sbooze' % self.table_prefix) + self.assertEqual(cur.fetchone(),None, + 'cursor.fetchone should return None if a query retrieves ' + 'no rows' + ) + self.failUnless(cur.rowcount in (-1,0)) + + # cursor.fetchone should raise an Error if called after + # executing a query that cannnot return rows + cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( + self.table_prefix + )) + self.assertRaises(self.driver.Error,cur.fetchone) + + cur.execute('select name from %sbooze' % self.table_prefix) + r = cur.fetchone() + self.assertEqual(len(r),1, + 'cursor.fetchone should have retrieved a single row' + ) + self.assertEqual(r[0],'Victoria Bitter', + 'cursor.fetchone retrieved incorrect data' + ) + self.assertEqual(cur.fetchone(),None, + 'cursor.fetchone should return None if no more rows available' + ) + self.failUnless(cur.rowcount in (-1,1)) + finally: + con.close() + + samples = [ + 'Carlton Cold', + 'Carlton Draft', + 'Mountain Goat', + 'Redback', + 'Victoria Bitter', + 'XXXX' + ] + + def _populate(self): + ''' Return a list of sql commands to setup the DB for the fetch + tests. + ''' + populate = [ + "insert into %sbooze values ('%s')" % (self.table_prefix,s) + for s in self.samples + ] + return populate + + def test_fetchmany(self): + con = self._connect() + try: + cur = con.cursor() + + # cursor.fetchmany should raise an Error if called without + #issuing a query + self.assertRaises(self.driver.Error,cur.fetchmany,4) + + self.executeDDL1(cur) + for sql in self._populate(): + cur.execute(sql) + + cur.execute('select name from %sbooze' % self.table_prefix) + r = cur.fetchmany() + self.assertEqual(len(r),1, + 'cursor.fetchmany retrieved incorrect number of rows, ' + 'default of arraysize is one.' + ) + cur.arraysize=10 + r = cur.fetchmany(3) # Should get 3 rows + self.assertEqual(len(r),3, + 'cursor.fetchmany retrieved incorrect number of rows' + ) + r = cur.fetchmany(4) # Should get 2 more + self.assertEqual(len(r),2, + 'cursor.fetchmany retrieved incorrect number of rows' + ) + r = cur.fetchmany(4) # Should be an empty sequence + self.assertEqual(len(r),0, + 'cursor.fetchmany should return an empty sequence after ' + 'results are exhausted' + ) + self.failUnless(cur.rowcount in (-1,6)) + + # Same as above, using cursor.arraysize + cur.arraysize=4 + cur.execute('select name from %sbooze' % self.table_prefix) + r = cur.fetchmany() # Should get 4 rows + self.assertEqual(len(r),4, + 'cursor.arraysize not being honoured by fetchmany' + ) + r = cur.fetchmany() # Should get 2 more + self.assertEqual(len(r),2) + r = cur.fetchmany() # Should be an empty sequence + self.assertEqual(len(r),0) + self.failUnless(cur.rowcount in (-1,6)) + + cur.arraysize=6 + cur.execute('select name from %sbooze' % self.table_prefix) + rows = cur.fetchmany() # Should get all rows + self.failUnless(cur.rowcount in (-1,6)) + self.assertEqual(len(rows),6) + self.assertEqual(len(rows),6) + rows = [r[0] for r in rows] + rows.sort() + + # Make sure we get the right data back out + for i in range(0,6): + self.assertEqual(rows[i],self.samples[i], + 'incorrect data retrieved by cursor.fetchmany' + ) + + rows = cur.fetchmany() # Should return an empty list + self.assertEqual(len(rows),0, + 'cursor.fetchmany should return an empty sequence if ' + 'called after the whole result set has been fetched' + ) + self.failUnless(cur.rowcount in (-1,6)) + + self.executeDDL2(cur) + cur.execute('select name from %sbarflys' % self.table_prefix) + r = cur.fetchmany() # Should get empty sequence + self.assertEqual(len(r),0, + 'cursor.fetchmany should return an empty sequence if ' + 'query retrieved no rows' + ) + self.failUnless(cur.rowcount in (-1,0)) + + finally: + con.close() + + def test_fetchall(self): + con = self._connect() + try: + cur = con.cursor() + # cursor.fetchall should raise an Error if called + # without executing a query that may return rows (such + # as a select) + self.assertRaises(self.driver.Error, cur.fetchall) + + self.executeDDL1(cur) + for sql in self._populate(): + cur.execute(sql) + + # cursor.fetchall should raise an Error if called + # after executing a a statement that cannot return rows + self.assertRaises(self.driver.Error,cur.fetchall) + + cur.execute('select name from %sbooze' % self.table_prefix) + rows = cur.fetchall() + self.failUnless(cur.rowcount in (-1,len(self.samples))) + self.assertEqual(len(rows),len(self.samples), + 'cursor.fetchall did not retrieve all rows' + ) + rows = [r[0] for r in rows] + rows.sort() + for i in range(0,len(self.samples)): + self.assertEqual(rows[i],self.samples[i], + 'cursor.fetchall retrieved incorrect rows' + ) + rows = cur.fetchall() + self.assertEqual( + len(rows),0, + 'cursor.fetchall should return an empty list if called ' + 'after the whole result set has been fetched' + ) + self.failUnless(cur.rowcount in (-1,len(self.samples))) + + self.executeDDL2(cur) + cur.execute('select name from %sbarflys' % self.table_prefix) + rows = cur.fetchall() + self.failUnless(cur.rowcount in (-1,0)) + self.assertEqual(len(rows),0, + 'cursor.fetchall should return an empty list if ' + 'a select query returns no rows' + ) + + finally: + con.close() + + def test_mixedfetch(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + for sql in self._populate(): + cur.execute(sql) + + cur.execute('select name from %sbooze' % self.table_prefix) + rows1 = cur.fetchone() + rows23 = cur.fetchmany(2) + rows4 = cur.fetchone() + rows56 = cur.fetchall() + self.failUnless(cur.rowcount in (-1,6)) + self.assertEqual(len(rows23),2, + 'fetchmany returned incorrect number of rows' + ) + self.assertEqual(len(rows56),2, + 'fetchall returned incorrect number of rows' + ) + + rows = [rows1[0]] + rows.extend([rows23[0][0],rows23[1][0]]) + rows.append(rows4[0]) + rows.extend([rows56[0][0],rows56[1][0]]) + rows.sort() + for i in range(0,len(self.samples)): + self.assertEqual(rows[i],self.samples[i], + 'incorrect data retrieved or inserted' + ) + finally: + con.close() + + def help_nextset_setUp(self,cur): + ''' Should create a procedure called deleteme + that returns two result sets, first the + number of rows in booze then "name from booze" + ''' + raise NotImplementedError,'Helper not implemented' + #sql=""" + # create procedure deleteme as + # begin + # select count(*) from booze + # select name from booze + # end + #""" + #cur.execute(sql) + + def help_nextset_tearDown(self,cur): + 'If cleaning up is needed after nextSetTest' + raise NotImplementedError,'Helper not implemented' + #cur.execute("drop procedure deleteme") + + def test_nextset(self): + con = self._connect() + try: + cur = con.cursor() + if not hasattr(cur,'nextset'): + return + + try: + self.executeDDL1(cur) + sql=self._populate() + for sql in self._populate(): + cur.execute(sql) + + self.help_nextset_setUp(cur) + + cur.callproc('deleteme') + numberofrows=cur.fetchone() + assert numberofrows[0]== len(self.samples) + assert cur.nextset() + names=cur.fetchall() + assert len(names) == len(self.samples) + s=cur.nextset() + assert s == None,'No more return sets, should return None' + finally: + self.help_nextset_tearDown(cur) + + finally: + con.close() + + def test_nextset(self): + raise NotImplementedError,'Drivers need to override this test' + + def test_arraysize(self): + # Not much here - rest of the tests for this are in test_fetchmany + con = self._connect() + try: + cur = con.cursor() + self.failUnless(hasattr(cur,'arraysize'), + 'cursor.arraysize must be defined' + ) + finally: + con.close() + + def test_setinputsizes(self): + con = self._connect() + try: + cur = con.cursor() + cur.setinputsizes( (25,) ) + self._paraminsert(cur) # Make sure cursor still works + finally: + con.close() + + def test_setoutputsize_basic(self): + # Basic test is to make sure setoutputsize doesn't blow up + con = self._connect() + try: + cur = con.cursor() + cur.setoutputsize(1000) + cur.setoutputsize(2000,0) + self._paraminsert(cur) # Make sure the cursor still works + finally: + con.close() + + def test_setoutputsize(self): + # Real test for setoutputsize is driver dependant + raise NotImplementedError,'Driver need to override this test' + + def test_None(self): + con = self._connect() + try: + cur = con.cursor() + self.executeDDL1(cur) + cur.execute('insert into %sbooze values (NULL)' % self.table_prefix) + cur.execute('select name from %sbooze' % self.table_prefix) + r = cur.fetchall() + self.assertEqual(len(r),1) + self.assertEqual(len(r[0]),1) + self.assertEqual(r[0][0],None,'NULL value not returned as None') + finally: + con.close() + + def test_Date(self): + d1 = self.driver.Date(2002,12,25) + d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0))) + # Can we assume this? API doesn't specify, but it seems implied + # self.assertEqual(str(d1),str(d2)) + + def test_Time(self): + t1 = self.driver.Time(13,45,30) + t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0))) + # Can we assume this? API doesn't specify, but it seems implied + # self.assertEqual(str(t1),str(t2)) + + def test_Timestamp(self): + t1 = self.driver.Timestamp(2002,12,25,13,45,30) + t2 = self.driver.TimestampFromTicks( + time.mktime((2002,12,25,13,45,30,0,0,0)) + ) + # Can we assume this? API doesn't specify, but it seems implied + # self.assertEqual(str(t1),str(t2)) + + def test_Binary(self): + b = self.driver.Binary('Something') + b = self.driver.Binary('') + + def test_STRING(self): + self.failUnless(hasattr(self.driver,'STRING'), + 'module.STRING must be defined' + ) + + def test_BINARY(self): + self.failUnless(hasattr(self.driver,'BINARY'), + 'module.BINARY must be defined.' + ) + + def test_NUMBER(self): + self.failUnless(hasattr(self.driver,'NUMBER'), + 'module.NUMBER must be defined.' + ) + + def test_DATETIME(self): + self.failUnless(hasattr(self.driver,'DATETIME'), + 'module.DATETIME must be defined.' + ) + + def test_ROWID(self): + self.failUnless(hasattr(self.driver,'ROWID'), + 'module.ROWID must be defined.' + ) + diff --git a/tests/dbapitests.py b/tests/dbapitests.py new file mode 100644 index 00000000..c69c2e35 --- /dev/null +++ b/tests/dbapitests.py @@ -0,0 +1,43 @@ + +import unittest +from testutils import * +import dbapi20 + +def main(): + add_to_path() + import pyodbc + + from optparse import OptionParser + parser = OptionParser(usage="usage: %prog [options] connection_string") + parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") + parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") + + (options, args) = parser.parse_args() + if len(args) > 1: + parser.error('Only one argument is allowed. Do you need quotes around the connection string?') + + if not args: + connection_string = load_setup_connection_string('dbapitests') + + if not connection_string: + parser.print_help() + raise SystemExit() + else: + connection_string = args[0] + + class test_pyodbc(dbapi20.DatabaseAPI20Test): + driver = pyodbc + connect_args = [ connection_string ] + connect_kw_args = {} + + def test_nextset(self): pass + def test_setoutputsize(self): pass + def test_ExceptionsAsConnectionAttributes(self): pass + + suite = unittest.makeSuite(test_pyodbc, 'test') + testRunner = unittest.TextTestRunner(verbosity=(options.verbose > 1) and 9 or 0) + result = testRunner.run(suite) + +if __name__ == '__main__': + main() + diff --git a/tests/empty.accdb b/tests/empty.accdb new file mode 100644 index 0000000000000000000000000000000000000000..95cc8ae5023403f25cb315a9f886fa5eee333d8a GIT binary patch literal 311296 zcmeI53v^sZndj?vw_e>+x8yjAaqOgHJ0^*2BgwL2n>a}BmTU{jvMk%dgE!WrTXtJ7 zN0ObGOpr)m4q@RW!_4wnV8dgF$1KAv1O}GCOkfiB@H)#9CJeJY!UD6fBsYeHzOY~19qOGyEXneQwDWyI) z<{#**`0C+bed5GK%WKs8-}uPG>9_pbh9}Q`@wxgh{p4F882rJ{|N4>ke}3DS_W!j1 z*;{x1`2D~8&=WWP{nVzn^=%E`+x*U7{^ZluV^_sKzU|Xnd;j^|>wo*I-@oM}&;0&- z%ddaW|NDcE5A?im;(?*#|K=aR^WcWJocP-HuZ(~9fBl!_bLq1G-2S0^YAc`n)#ick z9V%6-X-hpc;YI=^KmsH{0wh2JBtQZrKmsJN6a)g!E=Te6vL)a+i3qv?^yX8KMb1W+ zqE;~t5+DH*AOR8}0TLhq5+DH*AOR8}ftQ59^XUa`KIg|F(Pwo)siPQkkGjg)K7kJ& zsP#vCYyG`7Dx_+(X@HmqC_Vq7I|DyYhhM@gQ?3U~x0Mp-5)6Js^f2#QA#f;V%eUUYN}ov_x*#E@J=5~B)< zd1C}|q&=#UnAby6&>j&Kv8YEF>g9MK0j-s0=Rp#LRuz(YX{LaUk~}t7U2`IYl~Fff z)vc6s;7Ll9)JcU@Nsfo);g>x8nLJEVavH)arpDD6K1nsGrc||xtA2d(_3B;_0Jg#G;a6G2=!+jJn18N__2UMpzi`?aZo4N;S zPQbrfA9o>MLY+ZMNmZ#`4#0OD`IKi;394>&x7K&K+X!ttP_{A30|Iv1l_-HoaFt@qt zh(~_oQyMI zf+WpZX$+RCUVO)O!w$Ko)Bl)G`_t*yrPDX-78>N^>GZaAy4r^lNmIY>xc`~A>Qc?s z5G%}yjV8A$@QVaUfCNZ@1W14cNPq-LfCNb3QX}AVeqALxRbgb>+t@XYZ+M{TE(86& z7;nb_rZ{Ua7!5eua6lrP%_s4*jdnX+?U*tDl00L8fjiJAATdgju(!vc1bqnZ(0;B* z83O7-^lVpxu`77I;YCM#1Tw||#$#~%9Ao~vk0F0N-oNQZX9fBgrtpz|g#ipho8fL5 zq&}?&nI|#0ozer!<(c8`{DJIXW}vzOJqmVcwy{X>gbZX)pqE1WG2CHfJNmpy_fy2u z>9KU$l>=a230(e7W3e}B?B0`xQQMqhk7614xtpkH=a6e~w}9LT}k+pKm0 z+Cx@e@HYR&9AA56Tlv;n-EIC$2Z4!UTKQrs?>7H!Qh6TIb5Jx!X#WAjh zEs(_BjL+&3Tb&ZT^oXr2cc#aBqY(7sXW}CV*x@1maPf(N*t2VaaNHJ1VEQ3MobK)z z?da(0>KPs$>NwmpEa#zSMb@jcW7u_cjAL&i)n`IoWgPkv-Q#zr{S*D(IMBV)^$2Nj1;sc*BF=4j!v6}5*KH>Z}{x6QG0ka zk%Hzr_-e1Gt5ZqVtt-(tbZ5_*uEd?=eFH{|@&ez{vqL>~{*KYnj;=fUdIm;y!Zou5 z>+b0~+Sg%pjm#3}@c2<#LPOPrX9;k$r>CboF^(eYU{!Mj8$LR4*bP=WORzf=1BZ1w zp;-b9-+^fCC+O4Rs9k98M+1hq^jO zdk!a3N8M9#ZShd%OspyHIhsgxzP_i+ts!d*0vX?<-6oOivi{y^fvoe%@t%x6rCH~~ zDqChe#|OrTd%ElVx(aG)*NcZw*TC*h?cz5s!{dE@9jUWN2YM6Qd6jWS{8V>O?@{p5 z&aYVD@0HO=fCNZ@1W14cNPq-L;H4yh^~PoDf6>hsD6fvT%wR3_*@dDJ>3GQym^PZA zwGRO;lbWkUz!|SuDFWtl)LbnBI$&yQMZmiQYOWChOEc805&;tdYF;rtV`2qFkXR)m zNUTy3BvzRSn0ZlCF2XesDnvj_s3s`FwGcugtc6f1!u1fUM7RM$ScLTuA|l)fp<0Aj zLZ}hpW(ccAsE2T^2u%>yh_C~~S`luCaGeOP5OfnximZDRq{zB=LzMSTKdfY{!2vfC zAOR8}0TLhq5+DH*AORA1c@S`%wR#S>l-BRs|Gzv+&Qg*936KB@kN^pg011!)36KB@ zkidBfVBmAfww9$x>A(UL2{@k*&Nc`-$FTp>do56ZMw^>}4B%snD=Row@L9nx0>;VX zB~~c4LYWoHMZgexyuu1WD}=02DFW(Jyv7RJ=s;Yw(SZos=m0;SxHUS6Yoh~k(MAU% zXrlud6vSU4>4>n}3fEd;jR>$%h;I-977FnjW!N7Ywrj%#5wu}~2-+|~a=%%^h;WMt z$g5qODu|#>6-3ac3dpown<|K)O%+7YrV21{h}TJU5jI(2vj{MGh__o|j}`Wc03(Qa zixpa}uv>&{;kC;Ow_D*gBEV20zS9b?w!&>9To12Xt+3q+J4ApnMZC!h+pN$m!g_c$ zSfSAhTSd4LUNI}gt=yy19Pw^fXjfKnM1XNeyUz-KD`$$HUd($Etp>>Ypm#T)DRL3!y!sr-OF|OM@S(sIPdc{B7mC z%AYU$NZISkUMPK6>9*3Zmh_iAA9ygZkqZf4`4xsGd>Im`QFx!8ydyH=sJHiT{OSvx zKWq8)qj&wad<%{CaB({cKlJ6;kOI zRU_~BbF=vUH~;F(lC=E}!lz61s&mL<9O!wg}my^h{Llu7Z znrsxZ!5aqMY`*=;Kh)XS@7c52qbAfiN_biw!nZe5$?yU9$XQ>JaB>E1eT6}J8 zGZ}1G-cPeBfz&pdU3Kx@#zPWbEsKk$YvZFd=MEp^UDCU>Smy7#ILO8_8YEl zWj+YKN096mRruMIrH%XQoJkGp6L%Vf<KrlPdh&k}V?m&Y@LYSNMKh(z3Nvt|wA2MaveoqkAvzY{FA6yQw7%! zb1jkU;$O)H>&Ai%XWWeEli?w)z?9ls^pnX%-d_s-xW3!cTu3JJI~>ICP<}8YxI`EG zWU{(pQCaQ5k6*6z=P3`~kGqKS>{D&}6USLR%AW@#-dntgQuU&e%S~mf$Q@bk(U~fNHzzM5o5Lu{B*2}(x7-JI3BJvgP$$t0kqaJ86J z!sA%RFGO@~RE3{<4IwSb;SMp6 zUW&Z{HRtVY5SU*>AHtL@GI&3zipoVvI?9EShSr&_LYZ`tO_GxuKW-e={R07Z`= zY231HO|ai^otf)I-KGjZJ98k+k+8iX4dCi2&(K1(2`|ac|1@S3bmr9b5E3#E;F45s zam`WY;lk&{t57)gS>Qa1XCk{HgPd)D3C?1Pt{hT9)KxgE*ypnav*}i##>&r5s>F*< zKs*9cJCu0HML?-axM&Y2<6-l0J^XndvXs={a`DPTxaw>FN<8l)S{&`0{1E0+wG|k_XqG;ivp+9YPLTg3cFH+ zQ0ianZ;68<&lUmhe#E)jjq|ol9MG5}P5^OgbK=O!kW-=0d>Whh2ILZBYKHufWryko ze&VdRte;NiCfuqMc>a-dFgLL^Zph-?g08PHG@08B`Aq^OKmsH{0wh2JBtQZrKmsH% zKLT9;e}0@8iUdf21W14cNPq-LfCNZ@1V~^x3A8LlX3?ccqJfX{iStW1dIjb~f`?}@M14lA`k zvoxGhV-*&P!|uyn+fOeIhyacXV*O}Y3s#n%aob#%+bZti20HL36KB@kN^pg011!)36KB@kN^oRHGzeW|F6a% zKX!ld@qZu3|CjniF+CC>0TLhq5+DH*AOR8}0TLhq5?BNQj{h$L9;1=~36KB@kN^pg z011!)36KB@kica^V4>sxYcT$gJy3l7Kfv+-%jEo1O%fmh5+DH*AOR8}0TLhq5+H$> z2LX=%zdX(}OGyGGKmsH{0wh2JBtQZrKmsK2@+Pp*@&D^E{*S$sGVrp3}!$ABtQZrKmsH{ z0wh2JBtQZrKmsH%2LX=%&jE*CBtQZrKmsH{0wh2JBtQZrKmsIig%I#L?+!~*`Z2dZ zJC={Z{k-z+FI53mnsvMhL9mA;jvpVT^!R_Zgy_c}5RVeXF&=*HQI2D6F<_Pw$i`6W zMWPV^x6WsF)FhPdp)o!fofMN7#}269Lh{#Uy*TXhkL&|p(Z_{_QPcq zp;u=@9YTC@>D94E)oD-YL+TuklPa7EeOMh-oq!MkkrM1Mgus74cA7p24Q?br0wh2J zBtQZrKmsH{0wh2J7oC9P?A7gmQKVEo7C9Sfh&&tqSa=}gdeK#+0wh2JBtQZrKmsH{ z0wh2JBtQZrKmv;-;B$^tNfIB%()}`=ehuUX>;>chWlBCW&@a2X1wpWvL#&WvY+l(O z>+if_dT~<^A1GIP#NT5IpgklVZ9I@2@rPR2`uu?ca|7++-{h`CkTo7K?;+Uqv~pZK zgc5|*HUc`v(=h}UtxB~?L)IgPJXX3!14$KA6Byx~fC)bsod zp_K(hgvzP{S4+^Ze9+F1ody&dG5?VO36KB@kN^pg011!)36Q{|2{?|E8R;*}8UJ6K zabI*MOn?MPfCNZ@1W14cNPq-LfCNb3av?CY6pFSiMIzCqNJP&@Uu;54k_ZOPoycM} zSwtd^^R2R*-TXJIk3?D`KMH?1JQ&^({zcWNs>Z9LRX?iy)5^OlZ>s!7=>E{IP-&?2 zn}wf0`Nn~7RDI)1U%%_?H|_h$-jDB1?%lX|v4vuM5+DH*AOR8}fs0I_L#Z=Qz0moy zmM7hBu@O9gWpBDwhiZeR+#yv17qI}_q;66hR4qO$R1hv@>TcM;qK&sXvt4PY}XQ4re^~An4CALs9KYkGaf^76&EwP<9IpIZq2mXb zZGJ*xqt~hiVdXc8k4exjH-rZ=&7w#`u;ojA|4BhnI$0*$RgpWPe3zMotbew}3NycosiFqO4u6C+seWvVrDv}^9@YEyFt<=>m7q~f3=g~=?E*~vZ zEoE*J=YzjniDu_X^d=-)=_zQ&G!gFSJ1P57W!v@n^%T6~532vtTfe0$m~$#8K$0&~ zyUNtbCnGu0B(RBg02OmwokH~(gb}Dz2uVch)h)R-t4#TV@BFglZNH^qQR^uj4ppGK z3yV>mLrsgT0dasCZd})XTjxS>li%L~SflP$+jA?*w?FxZIt%+PO^$u|#kHw>kgTj` zHHf4w#Uj7IGG(LMZy6qp;r(8dDBd`d+M^4628m@8Dq8<-0`^<%k9T8Lu5lC$=TsMB zBPzgk`J8M7`z?X%m6Tf)m*4)X*`ipq64(M8fRXN`{`p=|xr3f0q^j8~fY|(=$Vu)t zoQ0;@G_r-LKyoqQOQ7^(G@ON3@Ozb5 zl8yQG=G=O+C*77p0?sE3xjlCy0|_s?T=#b8B66IulFlQoo$!?(-*@sq{ozlQ{;`}d zZX`egBtQa7PT*VpqjG&t%RHotz_o8lOv~N<6~3Fd!{6%dN>_P zPE4l8$7187r~9W;10%_;jl0%u-LbW?v9Z2h0>rxZAmsM?*0!zf4Q;o2clgE+?B2Dm zp}wK9eRq4ze&D_*2IuV!t?gUuo7$UXZ{H2)s^IJQj!#ZS+wPf4jwO;4(S2h>*o6U*813vhKBn3ZLRmc`pE9K zS7&0?R|RuP$<11BYi4tB5BI6x;qm$yo<;A)lc9VXu{Yr{w;gk;j7xov6mwm(zBZJ6 zH=%`epIVRaEjU`Qc3_X8b#lyYr0vmn@Hb+{+%dR!B1wn+~d1(uQx8{ zN$r^X#MX>h;?jm(w%}o-6QA{PN#UvH44xB4R207Qkeb4yTGV~!iQ<{zG(IERRc5E9 z@LVy5P~L3?X6G3WO$gtPPYl|(q7<9eHXQB1XEXG!$G#hqgb_}LL#qy1ueQRqUhlVR z4m(^9!oqTpX3h!yTr;Y5Gz;%eBKIz>q4-Z?AA+V)uo%Ke%9BJ0?@>&qKZVC?dFGlx+sV6qsc1dfs`=O&kgXG&p^tIR zy4g_PdSbL#kFb`{9nfG5&&lKH1ra8bx(z2Y3P(X#N-XUs@t5ali9ZEt0P&^tZ75MI z>Vj!uWzV&@Bs+7?w4CKO@Q5RxoN4Lih(nTE5L358+K$@RhP*c;w)(NBCFK=8+O&S%$ivq9^VJpaInF)zW-eyGZaztKE{c5& zHBz|CQ8#>SGkmTZA#DI39>W7ZN0s8c9N$5oOlLH65as_b_YCX{=&6y$ePRC{@9A^p zC*EMDTslrU@?KcFjCCauxCb?3M%Ruqr$VLGGaKZshtt?2fF$PAN@=$)n zp;f%K``_JPtx(||=QdEmmZ)hpgmf@`O0@vSWVoG-Y`7cBFgc2<^`uJBd( zR+t}ON2RahHeaw-ITMpDTL0U9ufV;}y@io_q1Q@pxl>)7JXU zsyq+~$MBmDJ1gqzn{a1e-?*iIeLXJKJC6Zj0@vutW0U75r;^p9TN{u0lZ{(&F&Ikn0^XS{tjk-?x6Sr1ii-TvQLXzOmt0th>6at9!o+ zbnT0kAMEZ`f$sKpJVCTIG#+b>wK;0XmUw69`Yzl&C&nkoho*3C8yZRtCZi3G8j6}) z5r`%Nk42MXCvf>oM!N@2CDHvD?Mw|!^iR~Do9TwNf$w`tT~%|x|A{wL`$7Syr%HNg zi*3F@d3Cg91~bp|g<_%Ub}15=p3y(VIwb;ru62$G=mx9viGbf_onHiW@ze!GK-XSf zi3sQ-tSc1(U5s^QBA~0Xu3QBCy6Y-LKzC|gPy}=x)rCYrXKr1k2%+XX@68fYF`0^&;SMP`5#Z1_(DQZX`egBtQZr zKmsH{0wh2JBtQZSBM@+YTsd3&zXeS;EFgFOuZ3mJSj#{_TF$!3kKQ=b36R?XppP=W z0QxA?6`+qY{Q>$Y(*$q_jjgy6RAEEO4|`Q(?|Py&I}LrbQwoI^6yOdCA!D&bR%l( z0yqty9X1(3kreW(^QRJtQLUr4R&b_=Qi&naN4x3X0cSeZGn7b;n#8rif-^nbl{z}) zI%$gqXZo&A6G_`G_=@F)NFAeHcj%;bH$?e#hm;yIGg;ms^vWeMX!81=E~A__T?kD# z>!`Z#AvE26G^JxX5o$qDIP;N$EyOKjSpnr^9Juhnh?Y zOb5nvogZ4%`Mt5Vox}>sTE|!lxi)cg0 zJKkU!u6Jotc(?uL{eR-Dy)bO*V7I{~XfXT!xa=GC(&-n?JNWY7^LOBz86oBa3cW{AOR8}0TLhq5+DH*AORAX zF9F9{3;S!c`Q7*b*<*f_011!)36KB@kN^pg011!)36KB@h{gL7Y_OJJ{I>8!9A`9w z3DJ7 z*jfP6rJ+6Y*8<3R%zG^WHM68^0eENsUx#QW&RAOR8}0TLhq5+DH*AOR8}0TQ?%0y4_E*llL92}t~wi<^k!{MV`` zSA~#zD)Q0DxkzW^`pEwYKN)^cxI4Tq{9A1wuoPM{4H6&$5+DH*AOR8}0TLhq5+H$P zCXk&SE~eh{ez8`3OOsgUn%n8*GUL2V-Cd?9qKlDt&<%_)ZS-9 zhKzNwSd%wne)1M|i=LZqa-B9Ril@v$xAV}&b=F)Zummts6=uwzC$I#NT{d}f+?**( z0NG`enfY@DmH>85J|Q<}U&H2b9iubchoq-xg9 zespa$>t;VX#F}-pAKhrpy4jD;w`Se!M@!!{`_U2DTp=l=`>Ph3W~n)M=0?yF*Y4>7X~cR)*4xlBYkdCr8+C_|rr(~%i` zn^pb%vZ~*#ar}t03n3-WgE~%jy+s^an~#97G3Qmzr#`1yKYf^AT2n@@;-|s!9|@2E z36KB@kN^pg011!)30y%0xc=W2bTU~K5+DH*AOR8}0TLhq5+DH*Ab|n`(E^tRI8-kn zPC*pAwn2f%g*YrY5y$!C$PPCrpI-m(Lb7F85+DH*AOR8}0TLhq5+DH*AOR8}fu$p0 zmNb;b8f6np_f{?0#R6r?#vtt4Wx>XA6FB4s5G(K3%pN%4gu}M|?}o~J`HcB^=9t3e zM?hxjqbj^w&;LgSdbPItzx>KUjY)t6NPq-LfCNZ@1W14cNPq+uM}Y1B#qqn;W4r7B znf5=i;U;gTPB|3!L^j8}*3)cj4`QhrG4_|Il-_eqU>~f4?{wdw%V;8%Nl^Ti#p%vkW1o z2x!axxOzZ4;H~!lbhf)AeMm5d6eYho-)gTlSg8)6Xel*{*$ESv zDezj%6gbDZ0^T!1nFL6H1W14cNPq-LfCNZ@1W14cTmp`>He>Pc*Y^JnnPWmC0TLhq z5+DH*AOR8}0TLhq5+DH*5CY|wJ%)f24YywQrCAKQL>A#e`rDUSyc*!pVe-rp>;I)D6fg1y#^1sjj zR)3BEMc=P|Q@(!Re&6?|R=-Jz}@!rR~Uza}k z%&o_+yY89V=C6Lf@z5Lo<;PD3Pkp`p+z&tUrt-0P&skB2+B#edCFY_hn3hp zr`#(Gl`~GuuAF3> zr<_ay%)8DRr)5`8vdvS@4~L6zuaa!@G?Ar{(&>ozUS*w@`7Up>_1xt#fpR8KIlQqc z-w7qC4ln;xiODwSE^jsQ6ai0gk@SmMzL9{uB1$_gCtpvtRyo({MFr(T=BW=(C6;fY zlP97Lwai7Z+1dvy=o={1*E)IhwNA^dug%t@FMfii{$}}FCy&0?X_@u4+2-mS@%{vX z9HF$+@+%i{a$5nSNnZWMtnYlj*2%-yIxTY%Y_=Z0xESQ?Yn?p$TBl{!*JkU{7e#=% zd`?>oSH1iqSSL>rtkW_V!DgE&g5!Lmyz@wFCp^?DrM}Y_c`E#s&{w`WpU?le{LLSv z*xX2f1W14cx(+s~RdQEW*Qr*uU-hbeDyn+a87!?LORPjyml{_I9NmQW+DUwmVo$)m z9qto2iedqmaW$rFjWtCmj375DcLMzK8Qp=j63F+Yx~0G` z=H_ep8Nzawt}6(8MC)`nO0yefEcEZyC7yEq|5E=v(B8yWDXaR(pDR0%9mr_f2Yp~) zj}G)LY8$>Ql@sv0z7@*x>2MI75?M@0(kX@SQ-~W>%3p>xiQ09X@=WSZpu&xh(EcFg zJ9Ms2Amj;C3Hkf3d)VV1fh?M_X0v8zuQFF!&5r78_8MGI`t|iKp|1kw8ha4epAr22 zi~Ame&po)7)Z*$Wi)khGwOg)TCU&L1a*g0vuG?W9O0NGC`kJvK@3ZSSDY?r zaT+PdArCNL5?9S}gp;d$ zFTO>i6}aAxLy}TYs)La4Mk7Y9wN_(Ep%d=?`bsKg%xdrTTZMasTve0MUbM?zuWR)k zZA4$i$8hbj*WO(BY|5*0!kbhj-7%NMB<>AD`ZV;;GM0ObEMztXesT@Z`mR7}%@Swk z?sfttm2#W&){2s*a8DZ3_t_JWgwzS0QWBxe3)DZ3E1mYgmaNTvUs9q05_mv^KZy_N!%^lg?+fTt=?)bOo)ji=kasf5)2IP|^E1I~sHG4}GWn@fr z2E{srQ#FO;lTf!66-sX5a^mHbkK-hthKgf2Wh0(MSL-|D3aBaS%fpR4$|O|-mhw|U zM{U5*ObT}r?(%>U#X5MBLsGkpB31dZN-927o9u&1yRIln+=;hUqTfhwHR?e<{{L+F z2jQ=UyTiAIZw`N^>XE9qS6yA@tNMq^dn%LQ%#8#{fCNZ@1W14cNPq-LfCT0s5I}PW zb~PTm4;a*1r{x?0^<-VlIr_guslgJk$ z4@OSH{(n{Ex$s|v?+e$5|991AtA?xARsEv!!)X7nsr*&w8=;Sd-W=)=wS=w?{m5wm#YJs1xfhX$wd2s(~| z*KATIncFZ-n@Of=9?6(!^1i7|G#LjwU+%dEEHYGV61rXO%qDcRCn2wOQLdGapv%c$ z=1&-Nq6y%UL1|+Olu)4=dHP+G%|Yf>^y)ER`+BK;I6*@|QxH_F@XhIv!Nw6in^s=1 zl_@sbO$d-3^_|o~R-xgP2_lx}W~fre1H~vw#^iE>$PmA=e=_6wMS`HfGuAN4c(25` zY$g+=IAezqz0|}mLadrhl$$W5n2912iR>A(W0X=W??EjwL(9ied#7}Ll`%*eZIl6e zL7#KRZk$I#c1&9$=hWKu^N1|xa}eiKPS=Ree>TsG7+Vx#6~o~ zI~gB01{IU2iDF<;>DKrnee~92qsF+rb7ZylCd6 zhmh4aXdE>}qUKyDkYQx@qKG;hR5T~+i2XTRs{r#8UV|$~MnfQv7^_IBNhW|NTgQ1{ zL@uYYIqE+=WF-D40TLhq5+DH*AOR8}0TLhq5+DH*xJU%b)RXGWQ!jM>ti^tBfhLcZ zsg@w-0?2k8GZ%KFL+&`{${a)|ZU;IoV(6-qUKZ(Ykgk`V=uVdQe-JKZYT9ug@wET{ zob7+%$c+R@fCNZ@1W14cNPq-LfCNZ@1THRt!ae{osuJ6vtX{co|GQxlQ)9yD)_*gs z2y)v0f8uHXe~#_{EJsQtKmsH{0wh2JBtQZrKmsH{0wi#82^97MG(e%%j%o?HmH@eZ z0Ji-vQ)24#$NwMlwErJr`~TveS>{dxBtQZrKmsH{0wh2JBtQZr@bV;3*wWt&5LktO q#`2IQ6=au9R`rlM{_>tBnFnB(2axyliT(e6%m`@5dN7{x|NjpTeKR}& literal 0 HcmV?d00001 diff --git a/tests/empty.mdb b/tests/empty.mdb new file mode 100644 index 0000000000000000000000000000000000000000..dac96e8c0a4f023ebeab71b1d1151b876d9eb658 GIT binary patch literal 188416 zcmeI53vgUldB@M)T}i9m)m~}s7{{@bwXqdr6vuWRgftDZtr)CGeo1z4V2CBJWD7kl zDRxxSdJ`H7VM;QgFoA)9OQFzGHw=)WIGs!(yc!^BD1DF_5|@@oNzx$`I!!6Q{eS1& zvwQa;S(X%8lK*G5d%r!;@BGfaS9|XH&N<3as%^AmAk#6NNjGy?Z{o@f%}bePP3+&wlsg2mkfozy67Bzq$9(9j7~eM*FNy`XTMN8uxRUNuld4NZNC|N+pn*@bKR$&yYnN_%O3n~ z-3_}h`J>@?ci;A*|9R?ttM57V&C4!tdiupDyI%0BzP0Vd9ZM1~{Njqve{8Q*>gx3y zpa~rbkN^pg011!)36KB@kN^pgz)TRRchKGA>w+cVI2l*^`{F|ClzY@QGCxUx1W14c zNPq-LfCNZ@1W14cNPq<97J=;7y(znp^W(VGW?4k3{TOq<$gi4xCSYsYR;S{s4vz@r zF##N+I(Um)Vh#mzLh`CsVR%*}=Tt6=4v6YJ3In2&BBE^%Bw&$F5mQm611gFFQV57h zimF^zCdD%35tD!zgu=qlI68$F^x*q| z+5!K5q>QNTh#yfc>L^N=&zsa8$a5IswfcK2(q+``$f-*uw9hVt4x*fBE|-|vsg7x_ zhmmR*(u!^a$VYNtqV}LHkq1;aQnuhd04XKCAJrRY`oh@>U6a*5I8~F?`1H#yN%0D!h$4IUOjF011!) z36KB@kN^pg00}Gr0wL#fNvXz8^gE>2(dgGncSE%N$wzwoFh@YT=B9hDgV6DHr1J}B z`~SLkjcyaX3cIULxz49_e8%mM>wc_JL9)tsi7VA+bVFOwk=v#_cZbno8&*B&z)6Qy zI*hgG7>;Ge@*ulFE ziKVv8!j}JGTaZ-53Fp8HvSLnnrv>EP} zLFyqr$UK6;6|Qdy!rGZe=buZ0TLhq5+DH* zAOR8}0TLjA1xFy{JR$u_bp=fIgI!6}k5}qiSZ9kv|GxtLe=+EnH!O;kqaPf|mx_@~{>uXblfv}!#Z=z!{@bjI0_4d-*%XyU4<&M0nHq`h zK|ZS_ED8>y-`|I40{t*2kHMgP7*7bq`XGtVU|21l5&WJut+%m_PtZ1DwT`m~J^yaq z`xo=|gOD->>Bf&0%xifrFEPI#^`B3Sz`VT&Pc6=Fo*qJ5YC8~A{BB)&NrQ(s+B`xY z4V3_a1c!WCIqNSL&>yaUfRbo$`QS{!yRtS3(q zm7pU55+DH*AOR8}0TLhq5+H&3OTckn))wzlgOpzXZ=?Hs^3Ri<$rZ_esrg{dj+*M4 z#}mgB*C&1*e>mP7uP++(pT9My<|IG@BtQZrKmsH{0wgdo0Xrr(ajc0$vl9n1bm|1` z@=8)i?NwKs4{B-d^*tTq*_P=! zU}D9M$AQew!FTv^lE$lN_dA;V_h&kLZ`t2%;)ZNMcc!P`cqNQiZ)?x~Zr`iM_d42( z9JD5`@#$}Fu|+8HagHD8J=$y3&a}L3cdMU4G#KdXRFU`VY(CQ6+_`&iPiC;YwYh)y z-b~MaKUQ7&SVq-U`M~~6uqq#5jN z(VW7@r+4tcf##m0`#ak*+Pm6#5BBcv*}1!If9Gyl%R|-W9_yX=bC3#<011!)36KB@ zkN^pg013#?AuJN%^$<))8G`QWi$40%gXA1Ke!mh|rze_dksl;L0wh2JBtQZrKmsH{0wh2J z(CO8OddO znM9oD;^U3877qJ2mKXv|)@KGtF^ee@kO6$S)G`q;{-3%;1dO+*mRn(k6)v^HWg_6L zNTsc?(h94r@LCaY?xj{+VT~2)t#G*rFbznpwZiMHu+9pv7Xh{gsRk=tVTJWpxKad| zA*41~VWSl`S>Y}dt_Y%Y*b3VAL43410}LEe zuaO`TmRjLr5n%F=y4niYSm9a`U<8qRgB7l`!W%_^Sw!l3EBuZXZV&;65~)TjY_`G{ zJ!=3#W>x_NGy4I8nVtY)v7QqEK~I*DD(krhBItPqB4`r~5tjG{7pZD1#H_&8J`^1Z zkN^pg011!)36KB@EF1!kbDv%J9lahi{x9qQEu0#%CL}-tBtQZrKmsH{0wh2JBtQc5 zj)2`cY~Cq8cNLB}=_Ce1=}3SCNPq-LfCNZ@1W14cNPq-LfCLsW0gnGK;KpUuNPq-L zfCNZ@1W14cNPq-LVBQkI1mNO`fW`wdR$?gNZjJUW}iJx5umE zkH&6`eZD$geQWgAZ+{WnvC)wL36KB@kN^qH8i6|57gY9gbsg3IvoGEM%a)%s+G7kM zt*T9psS&k9b*Q6i7d{76m)fa1)jM%Ks7BRx)r9aPYp+%+Yt?Suvq?5Eg-1%=jCfsY zNVTXz#P3jjYKe}9ZCc@C%0V$9)voTo7BX^)d3 zx)n`1iiSM|vcswe(oCrQVI{A91+VtJ%J`j_h1PCZSyk;3zNn|D)S))xJUgfr8qsI$ zAz!&h%(N|j-DombIKyy4PLR2|4E7+aKAi09Rq3lCpA{y-PLNeAkY#XM_W62kP^GW+ z`OG2{X4ZzP3jZNgU0R?ts}0aqUq(_%Y|%S`W>UQ&-Jp}o48Qd!$ZVU=s9$T)5_2${L@tZh$wRB(HdMHedBgwJkNRqj9;RP~RQYOGu&F{JI~bzQ;hT2p}BJ>VuN zpbd@Dk5>l@8N&^20L2WVfCo`PWO4;y184tYdghJG_3gFL>Qa5{>Hs_~Y20f_30KgsA=f;Q{T6dNLaGFh`fZaePZDB6> zp>>7C%{QOBf4WT~)6HtDx(T`tt=ptZUmNo0iLg97ghH}s)e(8t%E=eXk+uH-gJhfV zFYn0Ow}ez$=p>9y6uj>mSOpvj?33lRa zmdi${Uad-BoAMb7(eX8HM@F6gy=rJ(VRg;7+>Ot1{w$1YYqwRs;Qq+{mis05g!=*a zUG8Cbue-&)()~(ubMo(N+G-kWzLR()@u@^Z;^M@g#_x;Y8Lx=H6ni%I-Pqs6w#2TC zEss^l9;iNE-B-Q8`ro2YMjwrSF1jgtSu_!SrRwgg!K$}cZLfN|^0CU#SAL{&Ri#_` ze--ys3|HJ%@$<+JBR_mxUSEsc9Jx-;YB~}ifjLYdqB|ikL&Kr93F#83D&y6TA7vvi zeXr{MZyjjg{DGB^?R#s-^MCjD@8A3WUnRe|^TT&sb5-W){cqg=kx#$;?1`0sHF)&d zUp;Z#C9hP!H1hn`yI;KJ*yRs@{4ZNxiQRl`*MAQ@^36LRIM(?OgSUS8t~G1E^U)6t zcHQ$A>pq`)tnbsKH!u3pi(Ny(Vik%#lGkJ<6f06ryv~DiQf!fO(#8a#C!O za?-^pQck?igK|=Ak#f?R@H|%yU182SH=?YUZTx`OY@UXJV(kS1FX^8Y>1(|L`dY79 z*4Gvr&{sM^MfzH=fWFpimi4v82K2>Gg<^fJS3qCuHOueaI;$?HEwbc+I*d zI8V;U|7mHR^YHTkrPd~A|Hq~&wxGGKCV{IMOwd0s_(>{Pu~X`G1`z{4XY561^YSmc zqF8eFE0qlX;?;M$lI#(yKnZE90x!quNVM$R+#=p7rJgdc!|vfHpGiLR{nFP56B3h- z1W14cNPq-LfCNZ@1kQ(?X5 zX=R6~(-?r1;nGozYz|_qYXsIS!LizXddzdgA5={jS;I)`pfSW4x^QO9x)o#6+jFI- zq1j>Ne+<523nPQhUAj~=#x{ay&|Bc!0}V15Slp_0J*ax2^)UPgkj9jgfd-O#n;wUi z*duCr8Lj$Ijtl~3P@h5zp0v6JX)~ztVYRL#thhd=#-i<|C2{v@9gczW77U)3hPOe7 zVU%OSKc-t2PYdDj>mMBr)7KE$f$YE^U58i*EH+>?f4#Z}$AoesVL!B5IUyYnf>VJ6 zj-;(b=nr(BPNd3@DXDmF@Z1>h+=${l5hv$KHRONx<#@n9;h#%#9vwz_7U!|ClIg>F zZ?DH)`o3W;;%>zC-6I3D-o4<=1wGjR<5J8>n;oj}Y@x?rI+h!DB=wBaZw zRVzg)bhX7*Yl8>>B)1ocD6;{9SzILTI+2(XB~Z}q`hu1VE~#&6{ZK+o_Qr6ji!+(Z z1tx!TBNV%#F<9rQn4?zXRwb1Z)x^#z4dXq@xl8+qg^ZYfU?>HbXDV=e+NLYgr9w`% z7!s)e!h1i8l4nTbZ;fS|5o1{+C8&{21vfdB65eYCEN`QF= zajG|>qM&Z&J!j+)$`o6~z5{>T`#xP(zEFJi z;KVnA8xvd-cz0^!pbRkFfnf2aBLNa10TLhq5+DH*AOR8}0TLjA3yy%}tkC^`sd!kO za*w(jbKfsG7_2Y}kN^pg011!)36KB@kN^pg00|TkXe|1E{=R1IK>L(bh&UBVnY~iZ zg(A^%-t?PC1deC5vaeAhz$jv=7GQ#L@jR*oRV4uuAOR8}0TLhq5+DH*AORAXHUY=E zS+8Cp89C~0)6OrDK}avsp=yoSQ^_YP-}}@ub#eE;V1i^R%#kU>rdV-!4pJF>17HnyCCOSPE{?ziW<@ zd_wOSe*c{9{4dTx{@+`_tytV{1i$|VaUSM3cAfm0xC}i&tfq%`;->cdXFJNh3G2SK zVaYt%`F8Rpn;M|~r*-~^^{Rrh3#M7+&T^UT5x+mrT7Gd}(q}fi^%jTZze}9i=>O+S zJiBzR=UFhv^Yh=0rIG*%kN^pg011!)36KB@kN^pgz}zMvV>*8SKjhpfPnXnJ@l3$3 z|1U#m@`ymYMUe5{QH7nBtF*Tcx&+>Mi?bthF-afM&-(Mr{9^#f(d++L>XF}kK=%%o)U{FuC zV(G)Co9XH89%cAF@gA zbR<9mBtQZrKmsH{0wh2JBtQZrFfjqH|35JwhLQjYkN^pg011!)36KB@kN^o>-~{|- z?WRc~JAKmsH{0wh2J zBtQZrKmv1$faCm=p8qdOMbs(xsJqd9KIb#1RHZT`KmsH{0wh2JBtQZrKmsH{0wi#5 z1oU|3xg{xyoh|X1ibvyA5*4SJP9_oOn>8znGxWd1{=YWem{m^5j1G?zjox#ZfN%d_ zZ|(oES5y8n;K9j5;;g6{uo;Z@L@S&{$=kN^pg011!)36KB@kN^qHG=Y%w zoSy%`6umr|nlGji_N~h*-2V@;%>4gVh=#Ygpf!b+r#Sz=cxS)y7Y1nbHwi4>@~`2f zzc4`Y?tzn@|DO!Z+c!J;ZO1Nuvg=>zzJSHM{!K8ezs%fxy&vGo5HP+L7*B^9PMY2S za!*=P#Cc6}&j>;GG4lQ+KWD)q_tJP_Jp@-&}_v=$}c z9dvrlf8$Y&irXfL=ymg(l&orxM4pGg)1J51^FHf&Ad@W>_i{j%jJ zjXT0vxj|Me@LdC@^5b1J=ijbS5K%G7%+ae5$YKB{pyelbn-n4Kkq>yH(yS7Y!72|s zFrU94bNQpH9rO8zAaPE=rOqP>kN^pg011!)36KB@kN^pg00~Tufa7FxWBws^$~~I< zHnseiiUdf21W14cNPq-LfCNZ@1W14cNT8HJy4363eWsC#YiPSF%IjpL} zO6DM!PyA-=?lp+5(T89UJXnyG$>s)iT|S!?lgP#-Yrqy}xzd+SBH30{W)zh;1TAFW zu`a(FH>&IN8DWpJY?F@Ip*Eww2X(X&P2!N2#fEyVTAfb|O+Pp_nFob_0U6ZbrqkN~P8Ed{opC`rHmXZo50%c0}z)-X&2N2hazJ1<#o> zEhgLW+4IG4lh)}$2?J2^Fr*v@;5H$FGX0vl^6xMZ5M?>%=K14P)AUxmuc!1_W+9 Mf;$7}PN3NT2T=lZkN^Mx literal 0 HcmV?d00001 diff --git a/tests/pgtests.py b/tests/pgtests.py new file mode 100644 index 00000000..a3e35334 --- /dev/null +++ b/tests/pgtests.py @@ -0,0 +1,422 @@ +#!/usr/bin/python + +# Unit tests for PostgreSQL on Linux (Fedora) +# This is a stripped down copy of the SQL Server tests. + +import sys, os, re +import unittest +from decimal import Decimal +from testutils import * + +_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' + +def _generate_test_string(length): + """ + Returns a string of composed of `seed` to make a string `length` characters long. + + To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are + tested with 3 lengths. This function helps us generate the test data. + + We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will + be hidden and to help us manually identify where a break occurs. + """ + if length <= len(_TESTSTR): + return _TESTSTR[:length] + + c = (length + len(_TESTSTR)-1) / len(_TESTSTR) + v = _TESTSTR * c + return v[:length] + +class PGTestCase(unittest.TestCase): + + # These are from the C++ code. Keep them up to date. + + # If we are reading a binary, string, or unicode value and do not know how large it is, we'll try reading 2K into a + # buffer on the stack. We then copy into a new Python object. + SMALL_READ = 2048 + + # A read guaranteed not to fit in the MAX_STACK_STACK stack buffer, but small enough to be used for varchar (4K max). + LARGE_READ = 4000 + + SMALL_STRING = _generate_test_string(SMALL_READ) + LARGE_STRING = _generate_test_string(LARGE_READ) + + def __init__(self, connection_string, method_name): + unittest.TestCase.__init__(self, method_name) + self.connection_string = connection_string + + def setUp(self): + self.cnxn = pyodbc.connect(self.connection_string) + self.cursor = self.cnxn.cursor() + + for i in range(3): + try: + self.cursor.execute("drop table t%d" % i) + self.cnxn.commit() + except: + pass + + self.cnxn.rollback() + + + def tearDown(self): + try: + self.cursor.close() + self.cnxn.close() + except: + # If we've already closed the cursor or connection, exceptions are thrown. + pass + + def test_datasources(self): + p = pyodbc.dataSources() + self.assert_(isinstance(p, dict)) + + def test_getinfo_string(self): + value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) + self.assert_(isinstance(value, str)) + + def test_getinfo_bool(self): + value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) + self.assert_(isinstance(value, bool)) + + def test_getinfo_int(self): + value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) + self.assert_(isinstance(value, (int, long))) + + def test_getinfo_smallint(self): + value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) + self.assert_(isinstance(value, int)) + + + def test_negative_float(self): + value = -200 + self.cursor.execute("create table t1(n float)") + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEqual(value, result) + + + def _test_strtype(self, sqltype, value, colsize=None): + """ + The implementation for string, Unicode, and binary tests. + """ + assert colsize is None or (value is None or colsize >= len(value)) + + if colsize: + sql = "create table t1(s %s(%s))" % (sqltype, colsize) + else: + sql = "create table t1(s %s)" % sqltype + + self.cursor.execute(sql) + self.cursor.execute("insert into t1 values(?)", value) + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), type(value)) + + if value is not None: + self.assertEqual(len(v), len(value)) + + self.assertEqual(v, value) + + # + # varchar + # + + def test_empty_varchar(self): + self._test_strtype('varchar', '', self.SMALL_READ) + + def test_null_varchar(self): + self._test_strtype('varchar', None, self.SMALL_READ) + + def test_large_null_varchar(self): + # There should not be a difference, but why not find out? + self._test_strtype('varchar', None, self.LARGE_READ) + + def test_small_varchar(self): + self._test_strtype('varchar', self.SMALL_STRING, self.SMALL_READ) + + def test_large_varchar(self): + self._test_strtype('varchar', self.LARGE_STRING, self.LARGE_READ) + + def test_varchar_many(self): + self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") + + v1 = 'ABCDEFGHIJ' * 30 + v2 = '0123456789' * 30 + v3 = '9876543210' * 30 + + self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); + row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() + + self.assertEqual(v1, row.c1) + self.assertEqual(v2, row.c2) + self.assertEqual(v3, row.c3) + + + + def test_small_decimal(self): + # value = Decimal('1234567890987654321') + value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) + self.cursor.execute("create table t1(d numeric(19))") + self.cursor.execute("insert into t1 values(?)", value) + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), Decimal) + self.assertEqual(v, value) + + + def test_small_decimal_scale(self): + # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation + # example in the C Data Types appendix. + value = '1000.10' + value = Decimal(value) + self.cursor.execute("create table t1(d numeric(20,6))") + self.cursor.execute("insert into t1 values(?)", value) + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), Decimal) + self.assertEqual(v, value) + + + def test_negative_decimal_scale(self): + value = Decimal('-10.0010') + self.cursor.execute("create table t1(d numeric(19,4))") + self.cursor.execute("insert into t1 values(?)", value) + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), Decimal) + self.assertEqual(v, value) + + + def _exec(self): + self.cursor.execute(self.sql) + + def test_close_cnxn(self): + """Make sure using a Cursor after closing its connection doesn't crash.""" + + self.cursor.execute("create table t1(id integer, s varchar(20))") + self.cursor.execute("insert into t1 values (?,?)", 1, 'test') + self.cursor.execute("select * from t1") + + self.cnxn.close() + + # Now that the connection is closed, we expect an exception. (If the code attempts to use + # the HSTMT, we'll get an access violation instead.) + self.sql = "select * from t1" + self.assertRaises(pyodbc.ProgrammingError, self._exec) + + def test_empty_string(self): + self.cursor.execute("create table t1(s varchar(20))") + self.cursor.execute("insert into t1 values(?)", "") + + def test_fixed_str(self): + value = "testing" + self.cursor.execute("create table t1(s char(7))") + self.cursor.execute("insert into t1 values(?)", "testing") + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), str) + self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL + self.assertEqual(v, value) + + def test_negative_row_index(self): + self.cursor.execute("create table t1(s varchar(20))") + self.cursor.execute("insert into t1 values(?)", "1") + row = self.cursor.execute("select * from t1").fetchone() + self.assertEquals(row[0], "1") + self.assertEquals(row[-1], "1") + + def test_version(self): + self.assertEquals(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. + + def test_rowcount_delete(self): + self.assertEquals(self.cursor.rowcount, -1) + self.cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + self.cursor.execute("insert into t1 values (?)", i) + self.cursor.execute("delete from t1") + self.assertEquals(self.cursor.rowcount, count) + + def test_rowcount_nodata(self): + """ + This represents a different code path than a delete that deleted something. + + The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over + the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a + zero return value. + """ + self.cursor.execute("create table t1(i int)") + # This is a different code path internally. + self.cursor.execute("delete from t1") + self.assertEquals(self.cursor.rowcount, 0) + + def test_rowcount_select(self): + self.cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + self.cursor.execute("insert into t1 values (?)", i) + self.cursor.execute("select * from t1") + self.assertEquals(self.cursor.rowcount, 4) + + # PostgreSQL driver fails here? + # def test_rowcount_reset(self): + # "Ensure rowcount is reset to -1" + # + # self.cursor.execute("create table t1(i int)") + # count = 4 + # for i in range(count): + # self.cursor.execute("insert into t1 values (?)", i) + # self.assertEquals(self.cursor.rowcount, 1) + # + # self.cursor.execute("create table t2(i int)") + # self.assertEquals(self.cursor.rowcount, -1) + + def test_lower_case(self): + "Ensure pyodbc.lowercase forces returned column names to lowercase." + + # Has to be set before creating the cursor, so we must recreate self.cursor. + + pyodbc.lowercase = True + self.cursor = self.cnxn.cursor() + + self.cursor.execute("create table t1(Abc int, dEf int)") + self.cursor.execute("select * from t1") + + names = [ t[0] for t in self.cursor.description ] + names.sort() + + self.assertEquals(names, [ "abc", "def" ]) + + # Put it back so other tests don't fail. + pyodbc.lowercase = False + + def test_row_description(self): + """ + Ensure Cursor.description is accessible as Row.cursor_description. + """ + self.cursor = self.cnxn.cursor() + self.cursor.execute("create table t1(a int, b char(3))") + self.cnxn.commit() + self.cursor.execute("insert into t1 values(1, 'abc')") + + row = self.cursor.execute("select * from t1").fetchone() + self.assertEquals(self.cursor.description, row.cursor_description) + + + def test_executemany(self): + self.cursor.execute("create table t1(a int, b varchar(10))") + + params = [ (i, str(i)) for i in range(1, 6) ] + + self.cursor.executemany("insert into t1(a, b) values (?,?)", params) + + # REVIEW: Without the cast, we get the following error: + # [07006] [unixODBC]Received an unsupported type from Postgres.;\nERROR: table "t2" does not exist (14) + + count = self.cursor.execute("select cast(count(*) as int) from t1").fetchone()[0] + self.assertEqual(count, len(params)) + + self.cursor.execute("select a, b from t1 order by a") + rows = self.cursor.fetchall() + self.assertEqual(count, len(rows)) + + for param, row in zip(params, rows): + self.assertEqual(param[0], row[0]) + self.assertEqual(param[1], row[1]) + + + def test_executemany_failure(self): + """ + Ensure that an exception is raised if one query in an executemany fails. + """ + self.cursor.execute("create table t1(a int, b varchar(10))") + + params = [ (1, 'good'), + ('error', 'not an int'), + (3, 'good') ] + + self.failUnlessRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) + + + def test_row_slicing(self): + self.cursor.execute("create table t1(a int, b int, c int, d int)"); + self.cursor.execute("insert into t1 values(1,2,3,4)") + + row = self.cursor.execute("select * from t1").fetchone() + + result = row[:] + self.failUnless(result is row) + + result = row[:-1] + self.assertEqual(result, (1,2,3)) + + result = row[0:4] + self.failUnless(result is row) + + + def test_row_repr(self): + self.cursor.execute("create table t1(a int, b int, c int, d int)"); + self.cursor.execute("insert into t1 values(1,2,3,4)") + + row = self.cursor.execute("select * from t1").fetchone() + + result = str(row) + self.assertEqual(result, "(1, 2, 3, 4)") + + result = str(row[:-1]) + self.assertEqual(result, "(1, 2, 3)") + + result = str(row[:1]) + self.assertEqual(result, "(1,)") + + +def main(): + from optparse import OptionParser + parser = OptionParser(usage="usage: %prog [options] connection_string") + parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") + parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") + parser.add_option("-t", "--test", help="Run only the named test") + + (options, args) = parser.parse_args() + + if len(args) > 1: + parser.error('Only one argument is allowed. Do you need quotes around the connection string?') + + if not args: + connection_string = load_setup_connection_string('pgtests') + + if not connection_string: + parser.print_help() + raise SystemExit() + else: + connection_string = args[0] + + if options.verbose: + cnxn = pyodbc.connect(connection_string) + + print 'library:', os.path.abspath(pyodbc.__file__) + print 'odbc: %s' % cnxn.getinfo(pyodbc.SQL_ODBC_VER) + print 'driver: %s %s' % (cnxn.getinfo(pyodbc.SQL_DRIVER_NAME), cnxn.getinfo(pyodbc.SQL_DRIVER_VER)) + print 'driver supports ODBC version %s' % cnxn.getinfo(pyodbc.SQL_DRIVER_ODBC_VER) + cnxn.close() + + if options.test: + # Run a single test + if not options.test.startswith('test_'): + options.test = 'test_%s' % (options.test) + + s = unittest.TestSuite([ PGTestCase(connection_string, options.test) ]) + else: + # Run all tests in the class + + methods = [ m for m in dir(PGTestCase) if m.startswith('test_') ] + methods.sort() + s = unittest.TestSuite([ PGTestCase(connection_string, m) for m in methods ]) + + testRunner = unittest.TextTestRunner(verbosity=options.verbose) + result = testRunner.run(s) + +if __name__ == '__main__': + + # Add the build directory to the path so we're testing the latest build, not the installed version. + + add_to_path() + + import pyodbc + main() diff --git a/tests/sqlservertests.py b/tests/sqlservertests.py new file mode 100644 index 00000000..437209e6 --- /dev/null +++ b/tests/sqlservertests.py @@ -0,0 +1,972 @@ +#!/usr/bin/python +# -*- coding: latin-1 -*- + +usage = """\ +usage: %prog [options] connection_string + +Unit tests for SQL Server. To use, pass a connection string as the parameter. +The tests will create and drop tables t1 and t2 as necessary. + +These run using the version from the 'build' directory, not the version +installed into the Python directories. You must run python setup.py build +before running the tests. + +You can also put the connection string into a setup.cfg file in the root of the project +(the same one setup.py would use) like so: + + [sqlservertests] + connection-string=DRIVER={SQL Server};SERVER=localhost;UID=uid;PWD=pwd;DATABASE=db + +The connection string above will use the 2000/2005 driver, even if SQL Server 2008 +is installed: + + 2000: DRIVER={SQL Server} + 2005: DRIVER={SQL Server} + 2008: DRIVER={SQL Server Native Client 10.0} +""" + +import sys, os, re +import unittest +from decimal import Decimal +from datetime import datetime, date, time +from os.path import join, getsize, dirname, abspath +from testutils import * + +_TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' + +def _generate_test_string(length): + """ + Returns a string of composed of `seed` to make a string `length` characters long. + + To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are + tested with 3 lengths. This function helps us generate the test data. + + We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will + be hidden and to help us manually identify where a break occurs. + """ + if length <= len(_TESTSTR): + return _TESTSTR[:length] + + c = (length + len(_TESTSTR)-1) / len(_TESTSTR) + v = _TESTSTR * c + return v[:length] + +class SqlServerTestCase(unittest.TestCase): + + SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] + LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] + + ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] + UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] + IMAGE_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] + + def __init__(self, method_name, connection_string): + unittest.TestCase.__init__(self, method_name) + self.connection_string = connection_string + + def setUp(self): + self.cnxn = pyodbc.connect(self.connection_string) + self.cursor = self.cnxn.cursor() + + for i in range(3): + try: + self.cursor.execute("drop table t%d" % i) + self.cnxn.commit() + except: + pass + + self.cnxn.rollback() + + def tearDown(self): + try: + self.cursor.close() + self.cnxn.close() + except: + # If we've already closed the cursor or connection, exceptions are thrown. + pass + + def test_multiple_bindings(self): + "More than one bind and select on a cursor" + self.cursor.execute("create table t1(n int)") + self.cursor.execute("insert into t1 values (?)", 1) + self.cursor.execute("insert into t1 values (?)", 2) + self.cursor.execute("insert into t1 values (?)", 3) + for i in range(3): + self.cursor.execute("select n from t1 where n < ?", 10) + self.cursor.execute("select n from t1 where n < 3") + + + def test_different_bindings(self): + self.cursor.execute("create table t1(n int)") + self.cursor.execute("create table t2(d datetime)") + self.cursor.execute("insert into t1 values (?)", 1) + self.cursor.execute("insert into t2 values (?)", datetime.now()) + + def test_datasources(self): + p = pyodbc.dataSources() + self.assert_(isinstance(p, dict)) + + def test_getinfo_string(self): + value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) + self.assert_(isinstance(value, str)) + + def test_getinfo_bool(self): + value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) + self.assert_(isinstance(value, bool)) + + def test_getinfo_int(self): + value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) + self.assert_(isinstance(value, (int, long))) + + def test_getinfo_smallint(self): + value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) + self.assert_(isinstance(value, int)) + + def test_noscan(self): + self.assertEqual(self.cursor.noscan, False) + self.cursor.noscan = True + self.assertEqual(self.cursor.noscan, True) + + def test_guid(self): + self.cursor.execute("create table t1(g1 uniqueidentifier)") + self.cursor.execute("insert into t1 values (newid())") + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), str) + self.assertEqual(len(v), 36) + + def test_nextset(self): + self.cursor.execute("create table t1(i int)") + for i in range(4): + self.cursor.execute("insert into t1(i) values(?)", i) + + self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") + + for i, row in enumerate(self.cursor): + self.assertEqual(i, row.i) + + self.assertEqual(self.cursor.nextset(), True) + + for i, row in enumerate(self.cursor): + self.assertEqual(i + 2, row.i) + + def test_fixed_unicode(self): + value = u"t\xebsting" + self.cursor.execute("create table t1(s nchar(7))") + self.cursor.execute("insert into t1 values(?)", u"t\xebsting") + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), unicode) + self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL + self.assertEqual(v, value) + + + def _test_strtype(self, sqltype, value, colsize=None): + """ + The implementation for string, Unicode, and binary tests. + """ + assert colsize is None or (value is None or colsize >= len(value)) + + if colsize: + sql = "create table t1(s %s(%s))" % (sqltype, colsize) + else: + sql = "create table t1(s %s)" % sqltype + + self.cursor.execute(sql) + self.cursor.execute("insert into t1 values(?)", value) + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), type(value)) + + if value is not None: + self.assertEqual(len(v), len(value)) + + self.assertEqual(v, value) + + # + # varchar + # + + def test_varchar_null(self): + self._test_strtype('varchar', None, 100) + + # Generate a test for each fencepost size: test_varchar_0, etc. + def _maketest(value): + def t(self): + self._test_strtype('varchar', value, len(value)) + return t + for value in ANSI_FENCEPOSTS: + locals()['test_varchar_%s' % len(value)] = _maketest(value) + + def test_varchar_many(self): + self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") + + v1 = 'ABCDEFGHIJ' * 30 + v2 = '0123456789' * 30 + v3 = '9876543210' * 30 + + self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); + row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() + + self.assertEqual(v1, row.c1) + self.assertEqual(v2, row.c2) + self.assertEqual(v3, row.c3) + + def test_varchar_upperlatin(self): + self._test_strtype('varchar', 'á') + + # + # unicode + # + + def test_unicode_null(self): + self._test_strtype('nvarchar', None, 100) + + # Generate a test for each fencepost size: test_unicode_0, etc. + def _maketest(value): + def t(self): + self._test_strtype('nvarchar', value, len(value)) + return t + for value in UNICODE_FENCEPOSTS: + locals()['test_unicode_%s' % len(value)] = _maketest(value) + + def test_unicode_upperlatin(self): + self._test_strtype('varchar', 'á') + + # + # binary + # + + def test_null_binary(self): + self._test_strtype('varbinary', None, 100) + + def test_large_null_binary(self): + # Bug 1575064 + self._test_strtype('varbinary', None, 4000) + + # Generate a test for each fencepost size: test_unicode_0, etc. + def _maketest(value): + def t(self): + self._test_strtype('varbinary', buffer(value), len(value)) + return t + for value in ANSI_FENCEPOSTS: + locals()['test_binary_%s' % len(value)] = _maketest(value) + + # + # image + # + + def test_image_null(self): + self._test_strtype('image', None) + + # Generate a test for each fencepost size: test_unicode_0, etc. + def _maketest(value): + def t(self): + self._test_strtype('image', buffer(value)) + return t + for value in IMAGE_FENCEPOSTS: + locals()['test_image_%s' % len(value)] = _maketest(value) + + def test_image_upperlatin(self): + self._test_strtype('image', buffer('á')) + + # + # text + # + + # def test_empty_text(self): + # self._test_strtype('text', buffer('')) + + def test_null_text(self): + self._test_strtype('text', None) + + # Generate a test for each fencepost size: test_unicode_0, etc. + def _maketest(value): + def t(self): + self._test_strtype('text', value) + return t + for value in ANSI_FENCEPOSTS: + locals()['test_text_%s' % len(value)] = _maketest(value) + + def test_text_upperlatin(self): + self._test_strtype('text', 'á') + + # + # bit + # + + def test_bit(self): + value = True + self.cursor.execute("create table t1(b bit)") + self.cursor.execute("insert into t1 values (?)", value) + v = self.cursor.execute("select b from t1").fetchone()[0] + self.assertEqual(type(v), bool) + self.assertEqual(v, value) + + def test_bit_string_true(self): + self.cursor.execute("create table t1(b bit)") + self.cursor.execute("insert into t1 values (?)", "xyzzy") + v = self.cursor.execute("select b from t1").fetchone()[0] + self.assertEqual(type(v), bool) + self.assertEqual(v, True) + + def test_bit_string_false(self): + self.cursor.execute("create table t1(b bit)") + self.cursor.execute("insert into t1 values (?)", "") + v = self.cursor.execute("select b from t1").fetchone()[0] + self.assertEqual(type(v), bool) + self.assertEqual(v, False) + # + # decimal + # + + def test_small_decimal(self): + # value = Decimal('1234567890987654321') + value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) + self.cursor.execute("create table t1(d numeric(19))") + self.cursor.execute("insert into t1 values(?)", value) + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), Decimal) + self.assertEqual(v, value) + + + def test_small_decimal_scale(self): + # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation + # example in the C Data Types appendix. + value = '1000.10' + value = Decimal(value) + self.cursor.execute("create table t1(d numeric(20,6))") + self.cursor.execute("insert into t1 values(?)", value) + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), Decimal) + self.assertEqual(v, value) + + + def test_negative_decimal_scale(self): + value = Decimal('-10.0010') + self.cursor.execute("create table t1(d numeric(19,4))") + self.cursor.execute("insert into t1 values(?)", value) + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), Decimal) + self.assertEqual(v, value) + + def test_subquery_params(self): + """Ensure parameter markers work in a subquery""" + self.cursor.execute("create table t1(id integer, s varchar(20))") + self.cursor.execute("insert into t1 values (?,?)", 1, 'test') + row = self.cursor.execute(""" + select x.id + from ( + select id + from t1 + where s = ? + and id between ? and ? + ) x + """, 'test', 1, 10).fetchone() + self.assertNotEqual(row, None) + self.assertEqual(row[0], 1) + + def _exec(self): + self.cursor.execute(self.sql) + + def test_close_cnxn(self): + """Make sure using a Cursor after closing its connection doesn't crash.""" + + self.cursor.execute("create table t1(id integer, s varchar(20))") + self.cursor.execute("insert into t1 values (?,?)", 1, 'test') + self.cursor.execute("select * from t1") + + self.cnxn.close() + + # Now that the connection is closed, we expect an exception. (If the code attempts to use + # the HSTMT, we'll get an access violation instead.) + self.sql = "select * from t1" + self.assertRaises(pyodbc.ProgrammingError, self._exec) + + def test_empty_string(self): + self.cursor.execute("create table t1(s varchar(20))") + self.cursor.execute("insert into t1 values(?)", "") + + def test_fixed_str(self): + value = "testing" + self.cursor.execute("create table t1(s char(7))") + self.cursor.execute("insert into t1 values(?)", "testing") + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), str) + self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL + self.assertEqual(v, value) + + def test_empty_unicode(self): + self.cursor.execute("create table t1(s nvarchar(20))") + self.cursor.execute("insert into t1 values(?)", u"") + + def test_unicode_query(self): + self.cursor.execute(u"select 1") + + def test_negative_row_index(self): + self.cursor.execute("create table t1(s varchar(20))") + self.cursor.execute("insert into t1 values(?)", "1") + row = self.cursor.execute("select * from t1").fetchone() + self.assertEquals(row[0], "1") + self.assertEquals(row[-1], "1") + + def test_version(self): + self.assertEquals(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. + + # + # date, time, datetime + # + + def test_datetime(self): + value = datetime(2007, 1, 15, 3, 4, 5) + + self.cursor.execute("create table t1(dt datetime)") + self.cursor.execute("insert into t1 values (?)", value) + + result = self.cursor.execute("select dt from t1").fetchone()[0] + self.assertEquals(value, result) + + def test_datetime_fraction(self): + # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime + # supported is xxx000. + + value = datetime(2007, 1, 15, 3, 4, 5, 123000) + + self.cursor.execute("create table t1(dt datetime)") + self.cursor.execute("insert into t1 values (?)", value) + + result = self.cursor.execute("select dt from t1").fetchone()[0] + self.assertEquals(result, value) + + def test_datetime_fraction_rounded(self): + # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the + # database supports. + + full = datetime(2007, 1, 15, 3, 4, 5, 123456) + rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) + + self.cursor.execute("create table t1(dt datetime)") + self.cursor.execute("insert into t1 values (?)", full) + + result = self.cursor.execute("select dt from t1").fetchone()[0] + self.assertEquals(result, rounded) + + def test_date(self): + value = date(2001, 1, 1) + + self.cursor.execute("create table t1(dt date)") + self.cursor.execute("insert into t1 values (?)", value) + + result = self.cursor.execute("select dt from t1").fetchone()[0] + self.assertEquals(type(result), type(value)) + self.assertEquals(result, value) + + # + # ints and floats + # + + def test_int(self): + value = 1234 + self.cursor.execute("create table t1(n int)") + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEquals(result, value) + + def test_negative_int(self): + value = -1 + self.cursor.execute("create table t1(n int)") + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEquals(result, value) + + def test_bigint(self): + input = 3000000000 + self.cursor.execute("create table t1(d bigint)") + self.cursor.execute("insert into t1 values (?)", input) + result = self.cursor.execute("select d from t1").fetchone()[0] + self.assertEqual(result, input) + + def test_float(self): + value = 1234.567 + self.cursor.execute("create table t1(n float)") + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEquals(result, value) + + def test_negative_float(self): + value = -200 + self.cursor.execute("create table t1(n float)") + self.cursor.execute("insert into t1 values (?)", value) + result = self.cursor.execute("select n from t1").fetchone()[0] + self.assertEqual(value, result) + + + # Not supported in 2005. Get 2008 beta. + # def test_date(self): + # value = date.today() + # + # self.cursor.execute("create table t1(dt datetime)") + # self.cursor.execute("insert into t1 values (?)", value) + # + # result = self.cursor.execute("select dt from t1").fetchone()[0] + # self.assertEquals(value, result) + + # Not supported in 2005. Get 2008 beta. + # def test_time(self): + # value = datetime.now().time() + # + # self.cursor.execute("create table t1(dt datetime)") + # self.cursor.execute("insert into t1 values (?)", value) + # + # result = self.cursor.execute("select dt from t1").fetchone()[0] + # self.assertEquals(value, result) + + # + # stored procedures + # + + def test_sp_results(self): + self.cursor.execute( + """ + if exists (select * from dbo.sysobjects where id = object_id(N'[test_select]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) + drop procedure [dbo].[test_select] + """) + self.cursor.execute( + """ + Create procedure test_select + AS + select top 10 name, id, xtype, refdate + from sysobjects + """) + rows = self.cursor.execute("exec test_select").fetchall() + self.assertEquals(type(rows), list) + self.assertEquals(len(rows), 10) # there has to be at least 10 items in sysobjects + self.assertEquals(type(rows[0].refdate), datetime) + + # Note: This will fail because the last thing in the stored procedure is an implicit drop, not a select! + # + def test_sp_results_from_temp(self): + self.cursor.execute( + """ + if exists (select * from dbo.sysobjects where id = object_id(N'[test_select]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) + drop procedure [dbo].[test_select] + """) + self.cursor.execute( + """ + Create procedure test_select + AS + select top 10 name, id, xtype, refdate + into #tmptable + from sysobjects + + select * from #tmptable + """) + # Because we're using a temporary table, two results are returned: + # + # (1) the results of the drop table, which is the number of rows in the temporary table + # (2) the result set from the select. + # + # Ignore the first result (which is just cursor.rowcount) + + self.cursor.execute("exec test_select") + self.assertEquals(self.cursor.rowcount, 10) # (1) + self.assert_(self.cursor.description is None) + + self.assert_(self.cursor.nextset()) # (2) + self.assert_(self.cursor.description is not None) + self.assert_(len(self.cursor.description) == 4) + + rows = self.cursor.fetchall() + self.assertEquals(type(rows), list) + self.assertEquals(len(rows), 10) # there has to be at least 10 items in sysobjects + self.assertEquals(type(rows[0].refdate), datetime) + + + def test_sp_results_from_temp2(self): + self.cursor.execute( + """ + if exists (select * from dbo.sysobjects where id = object_id(N'[test_select]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) + drop procedure [dbo].[test_select] + """) + self.cursor.execute( + """ + Create procedure test_select + AS + select top 10 name, id, xtype, refdate + into #tmptable + from sysobjects + + select * from #tmptable + """) + # Because we're using a temporary table, two results are returned: + # + # (1) the results of the drop table, which is the number of rows in the temporary table + # (2) the result set from the select. + # + # Ignore the first result (which is just cursor.rowcount) + + # Note: Try dynamically figuring out whether nextset() is required or not. + + self.cursor.execute("exec test_select") + while self.cursor.description is None: + if not self.cursor.nextset(): + raise SystemExit('No result set!') + + rows = self.cursor.fetchall() + self.assertEquals(type(rows), list) + self.assertEquals(len(rows), 10) # there has to be at least 10 items in sysobjects + self.assertEquals(type(rows[0].refdate), datetime) + + + def test_sp_results_from_vartbl(self): + self.cursor.execute( + """ + if exists (select * from dbo.sysobjects where id = object_id(N'[test_select]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) + drop procedure [dbo].[test_select] + """) + self.cursor.execute( + """ + Create procedure test_select + AS + declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime) + + insert into @tmptbl + select top 10 name, id, xtype, refdate + from sysobjects + + select * from @tmptbl + """) + # Because we're using a temporary table, two results are returned: + # + # (1) the results of the drop table, which is the number of rows in the temporary table + # (2) the result set from the select. + # + # Ignore the first result (which is just cursor.rowcount) + + self.cursor.execute("exec test_select") + self.assertEquals(self.cursor.rowcount, 10) # (1) + self.assert_(self.cursor.nextset()) # (2) + + rows = self.cursor.fetchall() + self.assertEquals(type(rows), list) + self.assertEquals(len(rows), 10) # there has to be at least 10 items in sysobjects + self.assertEquals(type(rows[0].refdate), datetime) + + def test_sp_with_dates(self): + # Reported in the forums that passing two datetimes to a stored procedure doesn't work. + self.cursor.execute( + """ + if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) + drop procedure [dbo].[test_sp] + """) + self.cursor.execute( + """ + create procedure test_sp(@d1 datetime, @d2 datetime) + AS + declare @d as int + set @d = datediff(year, @d1, @d2) + select @d + """) + self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) + rows = self.cursor.fetchall() + self.assert_(rows is not None) + self.assert_(rows[0][0] == 0) # 0 years apart + + # + # misc + # + + def test_rowcount_delete(self): + self.assertEquals(self.cursor.rowcount, -1) + self.cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + self.cursor.execute("insert into t1 values (?)", i) + self.cursor.execute("delete from t1") + self.assertEquals(self.cursor.rowcount, count) + + def test_rowcount_nodata(self): + """ + This represents a different code path than a delete that deleted something. + + The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over + the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a + zero return value. + """ + self.cursor.execute("create table t1(i int)") + # This is a different code path internally. + self.cursor.execute("delete from t1") + self.assertEquals(self.cursor.rowcount, 0) + + def test_rowcount_select(self): + """ + Ensure Cursor.rowcount is set properly after a select statement. + + pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a + select statement, so we'll test for that behavior. This is valid behavior according to the DB API + specification, but people don't seem to like it. + """ + self.cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + self.cursor.execute("insert into t1 values (?)", i) + self.cursor.execute("select * from t1") + self.assertEquals(self.cursor.rowcount, -1) + + rows = self.cursor.fetchall() + self.assertEquals(len(rows), count) + self.assertEquals(self.cursor.rowcount, -1) + + def test_rowcount_reset(self): + "Ensure rowcount is reset to -1" + + self.cursor.execute("create table t1(i int)") + count = 4 + for i in range(count): + self.cursor.execute("insert into t1 values (?)", i) + self.assertEquals(self.cursor.rowcount, 1) + + self.cursor.execute("create table t2(i int)") + self.assertEquals(self.cursor.rowcount, -1) + + def test_lower_case(self): + "Ensure pyodbc.lowercase forces returned column names to lowercase." + + # Has to be set before creating the cursor, so we must recreate self.cursor. + + pyodbc.lowercase = True + self.cursor = self.cnxn.cursor() + + self.cursor.execute("create table t1(Abc int, dEf int)") + self.cursor.execute("select * from t1") + + names = [ t[0] for t in self.cursor.description ] + names.sort() + + self.assertEquals(names, [ "abc", "def" ]) + + # Put it back so other tests don't fail. + pyodbc.lowercase = False + + def test_row_description(self): + """ + Ensure Cursor.description is accessible as Row.cursor_description. + """ + self.cursor = self.cnxn.cursor() + self.cursor.execute("create table t1(a int, b char(3))") + self.cnxn.commit() + self.cursor.execute("insert into t1 values(1, 'abc')") + + row = self.cursor.execute("select * from t1").fetchone() + self.assertEquals(self.cursor.description, row.cursor_description) + + + def test_temp_select(self): + # A project was failing to create temporary tables via select into. + self.cursor.execute("create table t1(s char(7))") + self.cursor.execute("insert into t1 values(?)", "testing") + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), str) + self.assertEqual(v, "testing") + + self.cursor.execute("select s into t2 from t1") + v = self.cursor.execute("select * from t1").fetchone()[0] + self.assertEqual(type(v), str) + self.assertEqual(v, "testing") + + + def test_money(self): + d = Decimal('123456.78') + self.cursor.execute("create table t1(i int identity(1,1), m money)") + self.cursor.execute("insert into t1(m) values (?)", d) + v = self.cursor.execute("select m from t1").fetchone()[0] + self.assertEqual(v, d) + + + def test_executemany(self): + self.cursor.execute("create table t1(a int, b varchar(10))") + + params = [ (i, str(i)) for i in range(1, 6) ] + + self.cursor.executemany("insert into t1(a, b) values (?,?)", params) + + count = self.cursor.execute("select count(*) from t1").fetchone()[0] + self.assertEqual(count, len(params)) + + self.cursor.execute("select a, b from t1 order by a") + rows = self.cursor.fetchall() + self.assertEqual(count, len(rows)) + + for param, row in zip(params, rows): + self.assertEqual(param[0], row[0]) + self.assertEqual(param[1], row[1]) + + + def test_executemany_one(self): + "Pass executemany a single sequence" + self.cursor.execute("create table t1(a int, b varchar(10))") + + params = [ (1, "test") ] + + self.cursor.executemany("insert into t1(a, b) values (?,?)", params) + + count = self.cursor.execute("select count(*) from t1").fetchone()[0] + self.assertEqual(count, len(params)) + + self.cursor.execute("select a, b from t1 order by a") + rows = self.cursor.fetchall() + self.assertEqual(count, len(rows)) + + for param, row in zip(params, rows): + self.assertEqual(param[0], row[0]) + self.assertEqual(param[1], row[1]) + + + def test_executemany_failure(self): + """ + Ensure that an exception is raised if one query in an executemany fails. + """ + self.cursor.execute("create table t1(a int, b varchar(10))") + + params = [ (1, 'good'), + ('error', 'not an int'), + (3, 'good') ] + + self.failUnlessRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) + + + def test_row_slicing(self): + self.cursor.execute("create table t1(a int, b int, c int, d int)"); + self.cursor.execute("insert into t1 values(1,2,3,4)") + + row = self.cursor.execute("select * from t1").fetchone() + + result = row[:] + self.failUnless(result is row) + + result = row[:-1] + self.assertEqual(result, (1,2,3)) + + result = row[0:4] + self.failUnless(result is row) + + + def test_row_repr(self): + self.cursor.execute("create table t1(a int, b int, c int, d int)"); + self.cursor.execute("insert into t1 values(1,2,3,4)") + + row = self.cursor.execute("select * from t1").fetchone() + + result = str(row) + self.assertEqual(result, "(1, 2, 3, 4)") + + result = str(row[:-1]) + self.assertEqual(result, "(1, 2, 3)") + + result = str(row[:1]) + self.assertEqual(result, "(1,)") + + + def test_concatenation(self): + v2 = '0123456789' * 30 + v3 = '9876543210' * 30 + + self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") + self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) + + row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() + + self.assertEqual(row.both, v2 + v3) + + def test_view_select(self): + # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. + + # Create a table (t1) with 3 rows and a view (t2) into it. + self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") + for i in range(3): + self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) + self.cursor.execute("create view t2 as select * from t1") + + # Select from the view + self.cursor.execute("select * from t2") + rows = self.cursor.fetchall() + self.assert_(rows is not None) + self.assert_(len(rows) == 3) + + + def test_autocommit(self): + self.assertEqual(self.cnxn.autocommit, False) + + othercnxn = pyodbc.connect(self.connection_string, autocommit=True) + self.assertEqual(othercnxn.autocommit, True) + + othercnxn.autocommit = False + self.assertEqual(othercnxn.autocommit, False) + + def test_sqlserver_callproc(self): + try: + self.cursor.execute("drop procedure pyodbctest") + self.cnxn.commit() + except: + pass + + self.cursor.execute("create table t1(s varchar(10))") + self.cursor.execute("insert into t1 values(?)", "testing") + + self.cursor.execute(""" + create procedure pyodbctest @var1 varchar(32) + as + begin + select s + from t1 + return + end + """) + self.cnxn.commit() + + # for row in self.cursor.procedureColumns('pyodbctest'): + # print row.procedure_name, row.column_name, row.column_type, row.type_name + + self.cursor.execute("exec pyodbctest 'hi'") + + # print self.cursor.description + # for row in self.cursor: + # print row.s + + +def main(): + from optparse import OptionParser + parser = OptionParser(usage=usage) + parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") + parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") + parser.add_option("-t", "--test", help="Run only the named test") + + (options, args) = parser.parse_args() + + if len(args) > 1: + parser.error('Only one argument is allowed. Do you need quotes around the connection string?') + + if not args: + connection_string = load_setup_connection_string('sqlservertests') + + if not connection_string: + parser.print_help() + raise SystemExit() + else: + connection_string = args[0] + + cnxn = pyodbc.connect(connection_string) + print_library_info(cnxn) + cnxn.close() + + suite = load_tests(SqlServerTestCase, options.test, connection_string) + + testRunner = unittest.TextTestRunner(verbosity=options.verbose) + result = testRunner.run(suite) + + +if __name__ == '__main__': + + # Add the build directory to the path so we're testing the latest build, not the installed version. + + add_to_path() + + import pyodbc + main() diff --git a/tests/testutils.py b/tests/testutils.py new file mode 100644 index 00000000..ea7d4fa6 --- /dev/null +++ b/tests/testutils.py @@ -0,0 +1,99 @@ + +import os, sys, platform +from os.path import join, dirname, abspath +import unittest + +def add_to_path(): + """ + Prepends the build directory to the path so that newly built pyodbc libraries are used, allowing it to be tested + without installing it. + """ + # Put the build directory into the Python path so we pick up the version we just built. + # + # To make this cross platform, we'll search the directories until we find the .pyd file. + + import imp + + library_exts = [ t[0] for t in imp.get_suffixes() if t[-1] == imp.C_EXTENSION ] + library_names = [ 'pyodbc%s' % ext for ext in library_exts ] + + # Only go into directories that match our version number. + + dir_suffix = '-%s.%s' % (sys.version_info[0], sys.version_info[1]) + + build = join(dirname(dirname(abspath(__file__))), 'build') + + for root, dirs, files in os.walk(build): + for d in dirs[:]: + if not d.endswith(dir_suffix): + dirs.remove(d) + + for name in library_names: + if name in files: + sys.path.insert(0, root) + return + + print >>sys.stderr, 'Did not find the pyodbc library in the build directory. Will use an installed version.' + + +def print_library_info(cnxn): + import pyodbc + print 'python: %s' % sys.version + print 'pyodbc: %s %s' % (pyodbc.version, os.path.abspath(pyodbc.__file__)) + print 'odbc: %s' % cnxn.getinfo(pyodbc.SQL_ODBC_VER) + print 'driver: %s %s' % (cnxn.getinfo(pyodbc.SQL_DRIVER_NAME), cnxn.getinfo(pyodbc.SQL_DRIVER_VER)) + print ' supports ODBC version %s' % cnxn.getinfo(pyodbc.SQL_DRIVER_ODBC_VER) + print 'os: %s' % platform.system() + + if platform.system() == 'Windows': + print ' %s' % ' '.join([s for s in platform.win32_ver() if s]) + + + +def load_tests(testclass, name, *args): + """ + Returns a TestSuite for tests in `testclass`. + + name + Optional test name if you only want to run 1 test. If not provided all tests in `testclass` will be loaded. + + args + Arguments for the test class constructor. These will be passed after the test method name. + """ + if name: + if not name.startswith('test_'): + name = 'test_%s' % name + names = [ name ] + + else: + names = [ method for method in dir(testclass) if method.startswith('test_') ] + + return unittest.TestSuite([ testclass(name, *args) for name in names ]) + + +def load_setup_connection_string(section): + """ + Attempts to read the default connection string from the setup.cfg file. + + If the file does not exist or if it exists but does not contain the connection string, None is returned. If the + file exists but cannot be parsed, an exception is raised. + """ + from os.path import exists, join, dirname, splitext, basename + from ConfigParser import SafeConfigParser + + FILENAME = 'setup.cfg' + KEY = 'connection-string' + + path = join(dirname(dirname(abspath(__file__))), FILENAME) + + if exists(path): + try: + p = SafeConfigParser() + p.read(path) + except: + raise SystemExit('Unable to parse %s: %s' % (path, sys.exc_info()[1])) + + if p.has_option(section, KEY): + return p.get(section, KEY) + + return None diff --git a/web/docs.html b/web/docs.html new file mode 100644 index 00000000..3f47bdf0 --- /dev/null +++ b/web/docs.html @@ -0,0 +1,1166 @@ + + + + +pyodbc + + + + +
+ pyodbc - A Python DB API module for ODBC +
+ + + +
+ + + +

This module implements the Python Database API Specification, +so you should first be familiar with it.

+ +

Quick Examples

+ +

Make a direct connection to a database and create a cursor:

+ +
+  cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=localhost;DATABASE=testdb;UID=me;PWD=pass')
+  cursor = cnxn.cursor()
+ +

Select some values and print them:

+ +
+  cursor.execute("select user_id, user_name from users")
+  for row in cursor:
+      print row.user_id, row.user_name
+ +

Select the values, but use a more compact form. The execute function returns the cursor +object when a SELECT is executed, so execute can be moved into the for loop:

+ +
+  for row in cursor.execute("select user_id, user_name from users"):
+      print row.user_id, row.user_name
+ + +

Select a calculated value, giving it a name:

+ +
+  cursor.execute("select count(*) as user_count from users")
+  row = cursor.fetchone()
+  print '%d users' % row.user_count
+ +

Supply parameters:

+ +
+  cursor.execute("select count(*) as user_count from users where age > ?", 21)
+  row = cursor.fetchone()
+  print '%d users' % row.user_count
+ +

Delete some records and retrieve the count:

+ +
+  count = cursor.execute("delete from users where age < ?", 18)
+  print "deleted %s users" % count
+ +

Module Interface

+ +

connect(connectionstring, autocommit=False)

+ +
+
connectionstring
+
The ODBC connection string.
+ +
autocommit +
A Boolean that determines if the connection should be in autocommit mode or manual-commit + mode.
+
+ +

Returns a new Connection object.

+ +

The connection string is passed unmodified to SQLDriverConnect. +Connection strings can have driver specific components and you should refer to the +SQLDriverConnect or other ODBC documentation for details, but below are two common examples.

+ +

To connect using a DSN (a data source specified in the Data Access control panel applet), +use a string similar to the following.

+ +
+  cnxn = pyodbc.connect("DSN=dsnname")
+  cnxn = pyodbc.connect("DSN=dsnname;PWD=password")
+  cnxn = pyodbc.connect("DSN=dsnname;UID=user;PWD=password")
+ +

To connect to SQL Server directly (without a DSN), you must specify the server and database +to connect to using SQL Server-specific keywords. Note that the braces are required around the +driver name.

+ +
+  cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=server;DATABASE=database;UID=user;PWD=password)
+ +

Module Description Variables

+
+
version
+
The pyodbc version as a string in the format major.minor.revision.
+ +
apilevel
+
The string constant '2.0' indicating this module supports DB API level 2.0.
+ +
lowercase
+
A Boolean that controls whether column names in result rows are lowercased. This can be + changed any time and affects queries executed after the change. The default is False. This + can be useful when database columns have inconsistent capitalization.
+ +
pooling
+
A Boolean indicating whether connection pooling is enabled. This is a global (HENV) + setting, so it can only be modified before the first connection is made. The default is + True, which enables ODBC connection pooling.
+ +
threadsafety
+
The integer 1, indicating that threads may share the module but not connections. Note + that connections and cursors may be used by different threads, just not at the same time.
+ +
qmark
+
The string constant "qmark" to indicate parameters are identified using question marks.
+
+ +

DB API Type Functions

+ +

The DB API defines a set of functions that convert from well-known types to data types +required by the database module. If your code does not need to be portable between database +modules (if you will only use pyodbc), you do not need to use these.

+ +
+ +
Date(year,month,day), DateFromTicks(ticks)
+
Both of these return + a datetime.date instance.
+ +
Time(hour,minute,second), TimeFromTicks(ticks)
+
Both of these return + a datetime.time instance.
+ +
Timestamp(year,month,day,hour,minute,second), TimestampFromTicks(ticks)
+
Both of these return + a datetime.datetime + instance.
+ +
DATETIME
+ +
Set to the datetime.datetime type. This is not + entirely accurate since dates and times actually use two different classes, datetime.date and + datetime.time, but there is no way to specify this.
+ +
STRING
+
Set to the string type.
+ +
NUMBER
+ +
Set to the float type. This is not entirely accurate since the module uses different types of numbers + for different ODBC data types. Instead of using this, simply pass int, float, double, or decimal objects.
+ +
ROWID
+ +
Set to the int type.
+ +
Binary(string)
+ +
Returns a buffer instance.
+ +
BINARY
+ +
Set to the buffer type.
+ +
+ +

Module Constants

+ +

The following ODBC constants are defined. They only used with ODBC specific functions such +as Cursor.tables.

+ +
    +
  • SQL_ACCESSIBLE_PROCEDURES
  • +
  • SQL_ACCESSIBLE_TABLES
  • +
  • SQL_ACTIVE_ENVIRONMENTS
  • +
  • SQL_AGGREGATE_FUNCTIONS
  • +
  • SQL_ALTER_DOMAIN
  • +
  • SQL_ALTER_TABLE
  • +
  • SQL_ASYNC_MODE
  • +
  • SQL_BATCH_ROW_COUNT
  • +
  • SQL_BATCH_SUPPORT
  • +
  • SQL_BIGINT
  • +
  • SQL_BINARY
  • +
  • SQL_BIT
  • +
  • SQL_BOOKMARK_PERSISTENCE
  • +
  • SQL_CATALOG_LOCATION
  • +
  • SQL_CATALOG_NAME
  • +
  • SQL_CATALOG_NAME_SEPARATOR
  • +
  • SQL_CATALOG_TERM
  • +
  • SQL_CATALOG_USAGE
  • +
  • SQL_CHAR
  • +
  • SQL_COLLATION_SEQ
  • +
  • SQL_COLUMN_ALIAS
  • +
  • SQL_CONCAT_NULL_BEHAVIOR
  • +
  • SQL_CONVERT_FUNCTIONS
  • +
  • SQL_CONVERT_VARCHAR
  • +
  • SQL_CORRELATION_NAME
  • +
  • SQL_CREATE_ASSERTION
  • +
  • SQL_CREATE_CHARACTER_SET
  • +
  • SQL_CREATE_COLLATION
  • +
  • SQL_CREATE_DOMAIN
  • +
  • SQL_CREATE_SCHEMA
  • +
  • SQL_CREATE_TABLE
  • +
  • SQL_CREATE_TRANSLATION
  • +
  • SQL_CREATE_VIEW
  • +
  • SQL_CURSOR_COMMIT_BEHAVIOR
  • +
  • SQL_CURSOR_ROLLBACK_BEHAVIOR
  • +
  • SQL_DATABASE_NAME
  • +
  • SQL_DATA_SOURCE_NAME
  • +
  • SQL_DATA_SOURCE_READ_ONLY
  • +
  • SQL_DATETIME_LITERALS
  • +
  • SQL_DBMS_NAME
  • +
  • SQL_DBMS_VER
  • +
  • SQL_DDL_INDEX
  • +
  • SQL_DECIMAL
  • +
  • SQL_DEFAULT_TXN_ISOLATION
  • +
  • SQL_DESCRIBE_PARAMETER
  • +
  • SQL_DM_VER
  • +
  • SQL_DOUBLE
  • +
  • SQL_DRIVER_NAME
  • +
  • SQL_DRIVER_ODBC_VER
  • +
  • SQL_DRIVER_VER
  • +
  • SQL_DROP_ASSERTION
  • +
  • SQL_DROP_CHARACTER_SET
  • +
  • SQL_DROP_COLLATION
  • +
  • SQL_DROP_DOMAIN
  • +
  • SQL_DROP_SCHEMA
  • +
  • SQL_DROP_TABLE
  • +
  • SQL_DROP_TRANSLATION
  • +
  • SQL_DROP_VIEW
  • +
  • SQL_DYNAMIC_CURSOR_ATTRIBUTES1
  • +
  • SQL_DYNAMIC_CURSOR_ATTRIBUTES2
  • +
  • SQL_EXPRESSIONS_IN_ORDERBY
  • +
  • SQL_FILE_USAGE
  • +
  • SQL_FLOAT
  • +
  • SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1
  • +
  • SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2
  • +
  • SQL_GETDATA_EXTENSIONS
  • +
  • SQL_GROUP_BY
  • +
  • SQL_GUID
  • +
  • SQL_IDENTIFIER_CASE
  • +
  • SQL_IDENTIFIER_QUOTE_CHAR
  • +
  • SQL_INDEX_KEYWORDS
  • +
  • SQL_INFO_SCHEMA_VIEWS
  • +
  • SQL_INSERT_STATEMENT
  • +
  • SQL_INTEGER
  • +
  • SQL_INTEGRITY
  • +
  • SQL_INTERVAL_DAY
  • +
  • SQL_INTERVAL_DAY_TO_HOUR
  • +
  • SQL_INTERVAL_DAY_TO_MINUTE
  • +
  • SQL_INTERVAL_DAY_TO_SECOND
  • +
  • SQL_INTERVAL_HOUR
  • +
  • SQL_INTERVAL_HOUR_TO_MINUTE
  • +
  • SQL_INTERVAL_HOUR_TO_SECOND
  • +
  • SQL_INTERVAL_MINUTE
  • +
  • SQL_INTERVAL_MINUTE_TO_SECOND
  • +
  • SQL_INTERVAL_MONTH
  • +
  • SQL_INTERVAL_SECOND
  • +
  • SQL_INTERVAL_YEAR
  • +
  • SQL_INTERVAL_YEAR_TO_MONTH
  • +
  • SQL_KEYSET_CURSOR_ATTRIBUTES1
  • +
  • SQL_KEYSET_CURSOR_ATTRIBUTES2
  • +
  • SQL_KEYWORDS
  • +
  • SQL_LIKE_ESCAPE_CLAUSE
  • +
  • SQL_LONGVARBINARY
  • +
  • SQL_LONGVARCHAR
  • +
  • SQL_MAX_ASYNC_CONCURRENT_STATEMENTS
  • +
  • SQL_MAX_BINARY_LITERAL_LEN
  • +
  • SQL_MAX_CATALOG_NAME_LEN
  • +
  • SQL_MAX_CHAR_LITERAL_LEN
  • +
  • SQL_MAX_COLUMNS_IN_GROUP_BY
  • +
  • SQL_MAX_COLUMNS_IN_INDEX
  • +
  • SQL_MAX_COLUMNS_IN_ORDER_BY
  • +
  • SQL_MAX_COLUMNS_IN_SELECT
  • +
  • SQL_MAX_COLUMNS_IN_TABLE
  • +
  • SQL_MAX_COLUMN_NAME_LEN
  • +
  • SQL_MAX_CONCURRENT_ACTIVITIES
  • +
  • SQL_MAX_CURSOR_NAME_LEN
  • +
  • SQL_MAX_DRIVER_CONNECTIONS
  • +
  • SQL_MAX_IDENTIFIER_LEN
  • +
  • SQL_MAX_INDEX_SIZE
  • +
  • SQL_MAX_PROCEDURE_NAME_LEN
  • +
  • SQL_MAX_ROW_SIZE
  • +
  • SQL_MAX_ROW_SIZE_INCLUDES_LONG
  • +
  • SQL_MAX_SCHEMA_NAME_LEN
  • +
  • SQL_MAX_STATEMENT_LEN
  • +
  • SQL_MAX_TABLES_IN_SELECT
  • +
  • SQL_MAX_TABLE_NAME_LEN
  • +
  • SQL_MAX_USER_NAME_LEN
  • +
  • SQL_MULTIPLE_ACTIVE_TXN
  • +
  • SQL_MULT_RESULT_SETS
  • +
  • SQL_NEED_LONG_DATA_LEN
  • +
  • SQL_NON_NULLABLE_COLUMNS
  • +
  • SQL_NO_NULLS
  • +
  • SQL_NULLABLE
  • +
  • SQL_NULLABLE_UNKNOWN
  • +
  • SQL_NULL_COLLATION
  • +
  • SQL_NUMERIC
  • +
  • SQL_NUMERIC_FUNCTIONS
  • +
  • SQL_ODBC_INTERFACE_CONFORMANCE
  • +
  • SQL_ODBC_VER
  • +
  • SQL_OJ_CAPABILITIES
  • +
  • SQL_ORDER_BY_COLUMNS_IN_SELECT
  • +
  • SQL_PARAM_ARRAY_ROW_COUNTS
  • +
  • SQL_PARAM_ARRAY_SELECTS
  • +
  • SQL_PC_NOT_PSEUDO
  • +
  • SQL_PC_PSEUDO
  • +
  • SQL_PC_UNKNOWN
  • +
  • SQL_PROCEDURES
  • +
  • SQL_PROCEDURE_TERM
  • +
  • SQL_QUOTED_IDENTIFIER_CASE
  • +
  • SQL_REAL
  • +
  • SQL_ROW_UPDATES
  • +
  • SQL_SCHEMA_TERM
  • +
  • SQL_SCHEMA_USAGE
  • +
  • SQL_SCOPE_CURROW
  • +
  • SQL_SCOPE_SESSION
  • +
  • SQL_SCOPE_TRANSACTION
  • +
  • SQL_SCROLL_OPTIONS
  • +
  • SQL_SEARCH_PATTERN_ESCAPE
  • +
  • SQL_SERVER_NAME
  • +
  • SQL_SMALLINT
  • +
  • SQL_SPECIAL_CHARACTERS
  • +
  • SQL_SQL92_DATETIME_FUNCTIONS
  • +
  • SQL_SQL92_FOREIGN_KEY_DELETE_RULE
  • +
  • SQL_SQL92_FOREIGN_KEY_UPDATE_RULE
  • +
  • SQL_SQL92_GRANT
  • +
  • SQL_SQL92_NUMERIC_VALUE_FUNCTIONS
  • +
  • SQL_SQL92_PREDICATES
  • +
  • SQL_SQL92_RELATIONAL_JOIN_OPERATORS
  • +
  • SQL_SQL92_REVOKE
  • +
  • SQL_SQL92_ROW_VALUE_CONSTRUCTOR
  • +
  • SQL_SQL92_STRING_FUNCTIONS
  • +
  • SQL_SQL92_VALUE_EXPRESSIONS
  • +
  • SQL_SQL_CONFORMANCE
  • +
  • SQL_STANDARD_CLI_CONFORMANCE
  • +
  • SQL_STATIC_CURSOR_ATTRIBUTES1
  • +
  • SQL_STATIC_CURSOR_ATTRIBUTES2
  • +
  • SQL_STRING_FUNCTIONS
  • +
  • SQL_SUBQUERIES
  • +
  • SQL_SYSTEM_FUNCTIONS
  • +
  • SQL_TABLE_TERM
  • +
  • SQL_TIMEDATE_ADD_INTERVALS
  • +
  • SQL_TIMEDATE_DIFF_INTERVALS
  • +
  • SQL_TIMEDATE_FUNCTIONS
  • +
  • SQL_TINYINT
  • +
  • SQL_TXN_CAPABLE
  • +
  • SQL_TXN_ISOLATION_OPTION
  • +
  • SQL_TYPE_DATE
  • +
  • SQL_TYPE_TIME
  • +
  • SQL_TYPE_TIMESTAMP
  • +
  • SQL_UNION
  • +
  • SQL_UNKNOWN_TYPE
  • +
  • SQL_USER_NAME
  • +
  • SQL_VARBINARY
  • +
  • SQL_VARCHAR
  • +
  • SQL_WCHAR
  • +
  • SQL_WLONGVARCHAR
  • +
  • SQL_WVARCHAR
  • +
  • SQL_XOPEN_CLI_YEAR
  • +
+ +

Connection Objects

+ +

autocommit

+ +

False if the connection is in manual-commit mode (the default), which is the mode described +by the DB API. True if the connection is in auto-commit mode. This can be set using the +autocommit keyword in the connection function or can be changed by setting this attribute.

+ +

searchesc

+ +

The search pattern escape character used to escape '%' and '_' in search patterns, as returned by +SQLGetInfo(SQL_SEARCH_PATTERN_ESCAPE). The value is driver specific.

+ +

execute(sql, [params])

+ +

This is a new method (not in the DB API) that creates a new Cursor object and returns +Cursor.execute(...). See Cursor.execute for a description of the +parameters.

+ +
+  for results in cnxn.execute("select user_id from tmp"):
+      print results.user_id
+ +

Since a new Cursor is created by each call, do not use when executing multiple statements in +a row.

+ +

close()

+ +

Close the connection now (rather than whenever __del__ is called). The connection will be +unusable from this point forward; a ProgrammingError exception will be raised if any operation +is attempted with the connection. The same applies to all cursor objects trying to use the +connection. Note that closing a connection without committing the changes first will cause an +implicit rollback to be performed.

+ +

commit()

+ +

Commit any pending transaction to the database.

+ +

Note that Connections do not (yet) support autocommit; Connection.commit() must +be called or changes will be rolled back when the connection is closed.

+ +

rollback()

+ +

Causes the the database to roll back to the start of any pending transaction.

+ +

cursor()

+ +

Return a new Cursor object using the connection.

+ +

getinfo(infotype)

+ +

Calls SQLGetInfo, passing infotype and returns the result as a Boolean, string, +integer, or long value. The return type is determined by infotype.

+ +

The infotype value should be one of the following constants, defined in the pyodbc module. +The table below shows the data type returned. See + +SQLGetInfo for the meaning of each constant.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ConstantReturn Type
SQL_ACCESSIBLE_PROCEDURESTrue or False
SQL_ACCESSIBLE_TABLESTrue or False
SQL_ACTIVE_ENVIRONMENTSint
SQL_AGGREGATE_FUNCTIONSint or long
SQL_ALTER_DOMAINint or long
SQL_ALTER_TABLEint or long
SQL_ASYNC_MODEint or long
SQL_BATCH_ROW_COUNTint or long
SQL_BATCH_SUPPORTint or long
SQL_BOOKMARK_PERSISTENCEint or long
SQL_CATALOG_LOCATIONint
SQL_CATALOG_NAMETrue or False
SQL_CATALOG_NAME_SEPARATORstring
SQL_CATALOG_TERMstring
SQL_CATALOG_USAGEint or long
SQL_COLLATION_SEQstring
SQL_COLUMN_ALIASTrue or False
SQL_CONCAT_NULL_BEHAVIORint
SQL_CONVERT_FUNCTIONSint or long
SQL_CONVERT_VARCHARint or long
SQL_CORRELATION_NAMEint
SQL_CREATE_ASSERTIONint or long
SQL_CREATE_CHARACTER_SETint or long
SQL_CREATE_COLLATIONint or long
SQL_CREATE_DOMAINint or long
SQL_CREATE_SCHEMAint or long
SQL_CREATE_TABLEint or long
SQL_CREATE_TRANSLATIONint or long
SQL_CREATE_VIEWint or long
SQL_CURSOR_COMMIT_BEHAVIORint
SQL_CURSOR_ROLLBACK_BEHAVIORint
SQL_DATABASE_NAMEstring
SQL_DATA_SOURCE_NAMEstring
SQL_DATA_SOURCE_READ_ONLYTrue or False
SQL_DATETIME_LITERALSint or long
SQL_DBMS_NAMEstring
SQL_DBMS_VERstring
SQL_DDL_INDEXint or long
SQL_DEFAULT_TXN_ISOLATIONint or long
SQL_DESCRIBE_PARAMETERTrue or False
SQL_DM_VERstring
SQL_DRIVER_HDESCint or long
SQL_DRIVER_HENVint or long
SQL_DRIVER_HLIBint or long
SQL_DRIVER_HSTMTint or long
SQL_DRIVER_NAMEstring
SQL_DRIVER_ODBC_VERstring
SQL_DRIVER_VERstring
SQL_DROP_ASSERTIONint or long
SQL_DROP_CHARACTER_SETint or long
SQL_DROP_COLLATIONint or long
SQL_DROP_DOMAINint or long
SQL_DROP_SCHEMAint or long
SQL_DROP_TABLEint or long
SQL_DROP_TRANSLATIONint or long
SQL_DROP_VIEWint or long
SQL_DYNAMIC_CURSOR_ATTRIBUTES1int or long
SQL_DYNAMIC_CURSOR_ATTRIBUTES2int or long
SQL_EXPRESSIONS_IN_ORDERBYTrue or False
SQL_FILE_USAGEint
SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1int or long
SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2int or long
SQL_GETDATA_EXTENSIONSint or long
SQL_GROUP_BYint
SQL_IDENTIFIER_CASEint
SQL_IDENTIFIER_QUOTE_CHARstring
SQL_INDEX_KEYWORDSint or long
SQL_INFO_SCHEMA_VIEWSint or long
SQL_INSERT_STATEMENTint or long
SQL_INTEGRITYTrue or False
SQL_KEYSET_CURSOR_ATTRIBUTES1int or long
SQL_KEYSET_CURSOR_ATTRIBUTES2int or long
SQL_KEYWORDSstring
SQL_LIKE_ESCAPE_CLAUSETrue or False
SQL_MAX_ASYNC_CONCURRENT_STATEMENTSint or long
SQL_MAX_BINARY_LITERAL_LENint or long
SQL_MAX_CATALOG_NAME_LENint
SQL_MAX_CHAR_LITERAL_LENint or long
SQL_MAX_COLUMNS_IN_GROUP_BYint
SQL_MAX_COLUMNS_IN_INDEXint
SQL_MAX_COLUMNS_IN_ORDER_BYint
SQL_MAX_COLUMNS_IN_SELECTint
SQL_MAX_COLUMNS_IN_TABLEint
SQL_MAX_COLUMN_NAME_LENint
SQL_MAX_CONCURRENT_ACTIVITIESint
SQL_MAX_CURSOR_NAME_LENint
SQL_MAX_DRIVER_CONNECTIONSint
SQL_MAX_IDENTIFIER_LENint
SQL_MAX_INDEX_SIZEint or long
SQL_MAX_PROCEDURE_NAME_LENint
SQL_MAX_ROW_SIZEint or long
SQL_MAX_ROW_SIZE_INCLUDES_LONGTrue or False
SQL_MAX_SCHEMA_NAME_LENint
SQL_MAX_STATEMENT_LENint or long
SQL_MAX_TABLES_IN_SELECTint
SQL_MAX_TABLE_NAME_LENint
SQL_MAX_USER_NAME_LENint
SQL_MULTIPLE_ACTIVE_TXNTrue or False
SQL_MULT_RESULT_SETSTrue or False
SQL_NEED_LONG_DATA_LENTrue or False
SQL_NON_NULLABLE_COLUMNSint
SQL_NULL_COLLATIONint
SQL_NUMERIC_FUNCTIONSint or long
SQL_ODBC_INTERFACE_CONFORMANCEint or long
SQL_ODBC_VERstring
SQL_OJ_CAPABILITIESint or long
SQL_ORDER_BY_COLUMNS_IN_SELECTTrue or False
SQL_PARAM_ARRAY_ROW_COUNTSint or long
SQL_PARAM_ARRAY_SELECTSint or long
SQL_PROCEDURESTrue or False
SQL_PROCEDURE_TERMstring
SQL_QUOTED_IDENTIFIER_CASEint
SQL_ROW_UPDATESTrue or False
SQL_SCHEMA_TERMstring
SQL_SCHEMA_USAGEint or long
SQL_SCROLL_OPTIONSint or long
SQL_SEARCH_PATTERN_ESCAPEstring
SQL_SERVER_NAMEstring
SQL_SPECIAL_CHARACTERSstring
SQL_SQL92_DATETIME_FUNCTIONSint or long
SQL_SQL92_FOREIGN_KEY_DELETE_RULEint or long
SQL_SQL92_FOREIGN_KEY_UPDATE_RULEint or long
SQL_SQL92_GRANTint or long
SQL_SQL92_NUMERIC_VALUE_FUNCTIONSint or long
SQL_SQL92_PREDICATESint or long
SQL_SQL92_RELATIONAL_JOIN_OPERATORSint or long
SQL_SQL92_REVOKEint or long
SQL_SQL92_ROW_VALUE_CONSTRUCTORint or long
SQL_SQL92_STRING_FUNCTIONSint or long
SQL_SQL92_VALUE_EXPRESSIONSint or long
SQL_SQL_CONFORMANCEint or long
SQL_STANDARD_CLI_CONFORMANCEint or long
SQL_STATIC_CURSOR_ATTRIBUTES1int or long
SQL_STATIC_CURSOR_ATTRIBUTES2int or long
SQL_STRING_FUNCTIONSint or long
SQL_SUBQUERIESint or long
SQL_SYSTEM_FUNCTIONSint or long
SQL_TABLE_TERMstring
SQL_TIMEDATE_ADD_INTERVALSint or long
SQL_TIMEDATE_DIFF_INTERVALSint or long
SQL_TIMEDATE_FUNCTIONSint or long
SQL_TXN_CAPABLEint
SQL_TXN_ISOLATION_OPTIONint or long
SQL_UNIONint or long
SQL_USER_NAMEstring
SQL_XOPEN_CLI_YEARstring
+ +

Cursor Objects

+ +

These objects represent a database cursor, which is used to manage the context of a fetch operation. Cursors +created from the same connection are not isolated, i.e., any changes done to the database by a cursor are immediately +visible by the other cursors.

+ +

description

+ +

This read-only attribute is a sequence of 7-item sequences. Each of these sequences contains information describing +one result column: (name, type_code, display_size, internal_size, precision, scale, null_ok). pyodbc only provides +values for name, type_code, internal_size, and null_ok. The other values are set to None. + +This attribute will be None for operations that do not return rows or if the cursor has not had an operation invoked +via the executeXXX() method yet. + +The type_code member is the class type used to create the Python objects when reading rows. For example, a varchar +column's type will be str. The complete list of types supported is listed in the Data +Types section.

+ +

rowcount

+ +

This is always -1.

+ +

callproc(procname[,parameters])

+ +

This is not yet supported.

+ +

close()

+ +

Close the cursor now (rather than whenever __del__ is called). The cursor will be unusable from this point forward; +a ProgrammingError exception will be raised if any operation is attempted with the cursor.

+ +

execute(sql [,parameters])

+ +

Prepare and execute SQL. Parameters may be passed as a sequence, as specified by the DB API, or as individual +parameters.

+ +
+  # standard
+  cursor.execute("select a from tbl where b=? and c=?", (x, y))
+
+  # pyodbc extension
+  cursor.execute("select a from tbl where b=? and c=?", x, y)
+ +

The DB API specification does not specify the return value of this method. Cursors in pyodbc return different +things based on the SQL statement executed. Select statements return the Cursor object itself to allow more compact +code such as putting the execute method into for loops or appending fetchone or fetchall:

+ +
+  for row in cursor.execute("select album_id, photo_id from photos"):
+      print row.album_id, row.photo_id
+  
+  row = cursor.execute("select count(*) from tmp").fetchone()
+  
+  rows = cursor.execute("select * from tmp").fetchall()
+ +

Update and delete statements return the number of rows affected:

+ +
+  count = cursor.execute("update photos set processed=1 where user_id=1")
+
+  count = cursor.execute("delete from photos where user_id=1")
+ +

All other statements return None.

+ +

executemany(sql, seq_of_parameters)

+ +

Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings +found in the sequence seq_of_parameters. This method returns None.

+ +

fetchone()

+ +

Fetch the next row of a query result set, returning a single Row, or None when no more +data is available.

+ +

A ProgrammingError exception is raised if the previous call to executeXXX() did not produce any result set or no +call was issued yet.

+ +
+  cursor.execute("select user_name from photos where user_id=?", userid)
+  row = cursor.fetchone()
+  if row:
+      print row.user_name
+ +

nextset, setinputsizes, setoutputsize

+ +

These are optional in the API and are not supported.

+ +

fetchmany([size=cursor.arraysize])

+ +

Fetch the next set of rows of a query result, returning a list of Rows. An empty list is returned +when no more rows are available.

+ +

The number of rows to fetch per call is specified by the parameter. If it is not given, the cursor's arraysize, +which defaults to 1, determines the number of rows to be fetched. If this is not possible due to the specified number +of rows not being available, fewer rows may be returned.

+ +

A ProgrammingError exception is raised if the previous call to executeXXX() did not produce any result set or no +call was issued yet.

+ +

fetchall()

+ +

Fetch all remaining rows of a query result, returning them as a list of Rows. Since this reads +all rows into memory, it should not be used if there are a lot of rows. Consider iterating over the rows instead.

+ +

A ProgrammingError exception is raised if the previous call to executeXXX() did not produce any result set or no +call was issued yet.

+ +
+  cursor.execute("select photo_id from photos where user_id=1")
+  rows = cursor.fetchall()
+  for row in rows:
+      print row.user_name
+ +

__iter__, next

+ +

These methods allow a cursor to be used in a for loop, returning a single Row for +each iteration. This allows all rows to be visited easily.

+ +
+  cursor.execute("select photo_id from photos where user_id=1")
+  for row in cursor:
+      print row.photo_id
+ +

tables(table=None, catalog=None, schema=None, tableType=None)

+ +

Executes SQLTables and creates a results set of tables defined in the data source. Returns the Cursor.

+ +

The table, catalog, and schema interpret the '_' and '%' characters as wildcards. The escape character is driver +specific, so use Connection.searchescape.

+ +

Each row has the following columns. See the SQLTables documentation for more information.

+ +
    +
  1. table_cat: The catalog name.
  2. +
  3. table_schem: The schema name.
  4. +
  5. table_name: The table name.
  6. +
  7. table_type: One of 'TABLE', 'VIEW', SYSTEM TABLE', 'GLOBAL TEMPORARY' +'LOCAL TEMPORARY', 'ALIAS', 'SYNONYM', or a data source-specific type name.
  8. +
+ +
+  for row in cursor.tables():
+      print row.table_name
+ +

columns(table=None, catalog=None, schema=None, column=None)

+ +

Creates a results set of column names in specified tables by executing the ODBC SQLColumns function. +Each row fetched has the following columns:

+ +
    +
  1. table_cat
  2. +
  3. table_schem
  4. +
  5. table_name
  6. +
  7. column_name
  8. +
  9. data_type
  10. +
  11. type_name
  12. +
  13. column_size
  14. +
  15. buffer_length
  16. +
  17. decimal_digits
  18. +
  19. num_prec_radix
  20. +
  21. nullable
  22. +
  23. remarks
  24. +
  25. column_def
  26. +
  27. sql_data_type
  28. +
  29. sql_datetime_sub
  30. +
  31. char_octet_length
  32. +
  33. ordinal_position
  34. +
  35. is_nullable: One of SQL_NULLABLE, SQL_NO_NULLS, SQL_NULLS_UNKNOWN.
  36. +
+ + +

statistics(table, catalog=None, schema=None, unique=False, quick=True)

+ +Creates a results set of statistics about a single table and the indexes +associated with the table by executing SQLStatistics. + +
+
unique
+
If True, only unique indexes are retured. Otherwise all +indexes are returned.
+ +
quick
+
If True, CARDINALITY and PAGES are returned only if they are +readily available from the server
+
+ +

Each row fetched has the following columns:

+ +
    +
  1. table_cat
  2. +
  3. table_schem
  4. +
  5. table_name
  6. +
  7. non_unique
  8. +
  9. index_qualifier
  10. +
  11. index_name
  12. +
  13. type
  14. +
  15. ordinal_position
  16. +
  17. column_name
  18. +
  19. asc_or_desc
  20. +
  21. cardinality
  22. +
  23. pages
  24. +
  25. filter_condition
  26. +
+ +

rowIdColumns(table, catalog=None, schema=None, nullable=True)

+ +

Executes SQLSpecialColumns with SQL_BEST_ROWID which creates a result set of columns that uniquely identify a row. +Returns the Cursor object. Each row fetched has the following columns.

+ +
    +
  1. scope: One of SQL_SCOPE_CURROW, SQL_SCOPE_TRANSACTION, or SQL_SCOPE_SESSION
  2. +
  3. column_name
  4. +
  5. data_type: The ODBC SQL data type constant (e.g. SQL_CHAR)
  6. +
  7. type_name
  8. +
  9. column_size
  10. +
  11. buffer_length
  12. +
  13. decimal_digits
  14. +
  15. pseudo_column: One of SQL_PC_UNKNOWN, SQL_PC_NOT_PSEUDO, SQL_PC_PSEUDO
  16. +
+ +

rowVerColumns(table, catalog=None, schema=None, nullable=True)

+ +

Executes SQLSpecialColumns with SQL_ROWVER which creates a result set of +columns that are automatically updated when any value in the row is updated. +Returns the Cursor object. Each row fetched has the following columns.

+ +
    +
  1. scope: One of SQL_SCOPE_CURROW, SQL_SCOPE_TRANSACTION, or SQL_SCOPE_SESSION
  2. +
  3. column_name
  4. +
  5. data_type: The ODBC SQL data type constant (e.g. SQL_CHAR)
  6. +
  7. type_name
  8. +
  9. column_size
  10. +
  11. buffer_length
  12. +
  13. decimal_digits
  14. +
  15. pseudo_column: One of SQL_PC_UNKNOWN, SQL_PC_NOT_PSEUDO, SQL_PC_PSEUDO
  16. +
+ + +

primaryKeys(table, catalog=None, schema=None)

+ +

Creates a results set of column names that make up the primary key for a +table by executing the SQLPrimaryKeys function. Each row fetched has the +following columns:

+ +
    +
  1. table_cat
  2. +
  3. table_schem
  4. +
  5. table_name
  6. +
  7. column_name
  8. +
  9. key_seq
  10. +
  11. pk_name
  12. +
+ +

foreignKeys(table=None, catalog=None, schema=None, +foreignTable=None, foreignCatalog=None, foreignSchema=None)

+ +

Executes the SQLForeignKeys function and creates a results set of column +names that are foreign keys in the specified table (columns in the specified +table that refer to primary keys in other tables) or foreign keys in other +tables that refer to the primary key in the specified table. Each row fetched +has the following columns:

+ +
    +
  1. pktable_cat
  2. +
  3. pktable_schem
  4. +
  5. pktable_name
  6. +
  7. pkcolumn_name
  8. +
  9. fktable_cat
  10. +
  11. fktable_schem
  12. +
  13. fktable_name
  14. +
  15. fkcolumn_name
  16. +
  17. key_seq
  18. +
  19. update_rule
  20. +
  21. delete_rule
  22. +
  23. fk_name
  24. +
  25. pk_name
  26. +
  27. deferrability
  28. +
+ +

procedures(procedure=None, catalog=None, schema=None)

+ +

Executes SQLProcedures and creates a result set of information about the +procedures in the data source. Each row fetched has the following columns:

+ +
    +
  1. procedure_cat
  2. +
  3. procedure_schem
  4. +
  5. procedure_name
  6. +
  7. num_input_params
  8. +
  9. num_output_params
  10. +
  11. num_result_sets
  12. +
  13. remarks
  14. +
  15. procedure_type
  16. +
+ +

getTypeInfo(sqlType=None)

+ +

Executes SQLGetTypeInfo a creates a result set with information about the +specified data type or all data types supported by the ODBC driver if not +specified. Each row fetched has the following columns:

+ +
    +
  1. type_name
  2. +
  3. data_type
  4. +
  5. column_size
  6. +
  7. literal_prefix
  8. +
  9. literal_suffix
  10. +
  11. create_params
  12. +
  13. nullable
  14. +
  15. case_sensitive
  16. +
  17. searchable
  18. +
  19. unsigned_attribute
  20. +
  21. fixed_prec_scale
  22. +
  23. auto_unique_value
  24. +
  25. local_type_name
  26. +
  27. minimum_scale
  28. +
  29. maximum_scale
  30. +
  31. sql_data_type
  32. +
  33. sql_datetime_sub
  34. +
  35. num_prec_radix
  36. +
  37. interval_precision
  38. +
+ +

Row Objects

+ +

cursor_description

+ +

The column metadata from Cursor.description is also accessible from Row objects as +the cursor_description attribute. This is convenient when Rows are used as ad-hoc +data structures and are passed to other functions that need the metadata; now the Cursor does +not need to be passed with them.

+ +
+row = cursor.execute("select name, account_id from persons").fetchone()
+column_names = [ t[0] for t in row.cursor_description ]
+
+ +

Accessing Values

+ +

The DB API specifies that results must be tuple-like, so columns are normally accessed by indexing into the +sequence (e.g. row[0]) and pyodbc supports this. However, columns can also be accessed by name:

+ +
+cursor.execute("select album_id, photo_id from photos where user_id=1")
+row = cursor.fetchone()
+print row.album_id, row.photo_id 
+print row[0], row[1] # same as above, but less readable
+ +

This makes the code easier to maintain when modifying SQL, more readable, and allows rows to be used where a +custom class might otherwise be used. All rows from a single execute share the same dictionary of +column names, so using Row objects to hold a large result set may also use less memory than creating a object for +each row.

+ +

The SQL "as" keyword allows the name of a column in the result set to be specified. This is useful if a column +name has a spaces or if there is no name:

+ +
+cursor.execute("select count(*) as photo_count from photos where user_id=1")
+row = cursor.fetchone()
+print row.photo_count
+ +

Rows Are Mutable

+ +

Though SQL is very powerful, values sometimes need to be modified before they can be used. +Rows allow their values to be replaced, which makes Rows even more convenient ad-hoc dat +structures.

+ +
+# Replace a datetime in each row with one that has a time zone.
+rows = cursor.fetchall()
+for row in rows:
+  row.start_date = row.start_date.astimezone(tz)
+ +

Note that only existing columns can be replaced; new columns cannot be added to rows. If +you want to add a value that doesn't exist in the database, add a NULL to the select statement +as a placeholder:

+ +
+row = cursor.execute("select name, NULL as account_id from persons").fetchone()
+row.account_id = 1
+
+ +

Data Types

+ +

The following table shows the ODBC data types supported and the Python type used to represent values. +None is always used for NULL values.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ODBCPython
char, varchar, longvarchar, GUIDstring
wchar, wvarchar, wlongvarcharunicode
smallint, integer, tinyintint
bigintlong
decimal, numericdecimal
real, float, doubledouble
datedatetime.date
timedatetime.time
timestampdatetime.datetime
bitbool
binary, varbinary, longvarbinarybuffer
+ +

Errors

+ +

When an error occurs, the type of exception raised is based on the SQLSTATE.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SQLSTATEException Class
0A000NotSupportedError
22xxxDataError
23xxxIntegrityError
40002IntegrityError
24xxx, 25xxx, 42xxxProgramming Error
All OthersDatabaseError
+ +

Catalog Functions

+ +

Most of the ODBC catalog functions are available as methods on Cursor objects. The results +are presented as SELECT results in rows that are fetched normally. Refer to Microsoft's ODBC +documentation for details of how to use each function.

+ +
+cnxn   = pyodbc.connect(...)
+cursor = cnxn.cursor()
+for row in cursor.tables():
+    print row.table_name
+ +

Some of the parameters, such as table in Cursor.tables (SQLTables) accept +search patterns. In these parameters, the underscore character (_) is represents a +single-character wildcard and the percent character (%) represents any sequence of zero or more +characters. To include these characters as literals, precede them with the escape character +Connection.searchesc. (The escape character is driver dependent.)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ODBC FunctionMethodDescription
SQLTablesCursor.tablesReturns a list of table, catalog, or schema names, and table types.
SQLColumnsCursor.columnsReturns a list of column names in specified tables.
SQLStatisticsCursor.statisticsReturns a list of statistics about a single table and the indexes associated with the table.
SQLSpecialColumnsCursor.rowIdColumnsReturns a list of columns that uniquely identify a row.
SQLSpecialColumnsCursor.rowVerColumnsReturns a list of columns that are automatically updated any any value in the row is updated.
SQLPrimaryKeysCursor.primaryKeysReturns a list of column names that make up the primary key for a table.
SQLForeignKeysCursor.foreignKeysReturns a list of column names that are foreign keys in the specified table (columns + in the specified table that refer to primary keys in other tables) or foreign keys in + other tables that refer to the primary key in the specified table.
SQLProceduresCursor.proceduresReturns information about the procedures in the data source.
SQLGetTypeInfoCursor.getTypeInfoReturns a information about the specified data type or all data types supported by the driver.
+ +
+ +
+SourceForge.net Logo + + + diff --git a/web/index.html b/web/index.html new file mode 100644 index 00000000..c29e6b0b --- /dev/null +++ b/web/index.html @@ -0,0 +1,234 @@ + + + +pyodbc + + + + +
+ pyodbc - A Python DB API module for ODBC +
+ + + +
+ +

About pyodbc

+ +

pyodbc is a Python module that allows you to access ODBC databases. It implements the + Python Database API Specification v2.0.

+ +

Some notable features include:

+ +
    +
  • The library is free for commercial and personal use.
  • +
  • It conforms to the DB API standard.
  • +
  • No 3rd party libraries are required. Only native Python datatypes are used, such as decimal and datetime.
  • +
  • It requires Python 2.4 or higher, since the decimal type was added in 2.4. (We are open to suggestions regarding versions for + earlier Python builds.)
  • +
  • Additional features have been added to simplify database programming with Python.
  • +
+ +

Additional Features

+ +

The following features are beyond the requirements of the DB API. They are intended to provide a very + Python-like, convenient programming experience, but you should not use them if your code needs to be portable between + DB API modules. (Though we hope future DB API specifications will adopt some of these features.)

+ +

Access Values By Name

+ +

The DB API specifies that results must be tuple-like, so columns are normally accessed by indexing into the + sequence (e.g. row[0]) and pyodbc supports this. However, columns can also be accessed by name:

+ +
+  cursor.execute("select album_id, photo_id from photos where user_id=1")
+  row = cursor.fetchone()
+  print row.album_id, row.photo_id 
+  print row[0], row[1] # same as above, but less readable
+ +

This makes the code easier to maintain when modifying SQL, more readable, and allows rows to be used where a + custom class might otherwise be used. All rows from a single execute share the same dictionary of + column names, so using Row objects to hold a large result set may also use less memory than creating a object for + each row.

+ +

The SQL "as" keyword allows the name of a column in the result set to be specified. This is useful if a column + name has spaces or if there is no name:

+ +
+  cursor.execute("select count(*) as photo_count from photos where user_id=1")
+  row = cursor.fetchone()
+  print row.photo_count
+ +

Rows Values Can Be Replaced

+ +

Though SQL is very powerful, values sometimes need to be modified before they can be used. Rows allow their + values to be replaced, which makes them even more convenient ad-hoc data structures.

+ +
+  # Replace the 'start_date' datetime in each row with one that has a time zone.
+  rows = cursor.fetchall()
+  for row in rows:
+    row.start_date = row.start_date.astimezone(tz)
+ +

Note that columns cannot be added to rows; only values for existing columns can be modified.

+ +

Cursors are Iterable

+ +

The DB API makes this an optional feature. Each iteration returns a row object.

+ +
+  cursor.execute("select album_id, photo_id from photos where user_id=1")
+  for row in cursor:
+      print row.album_id, row.photo_id
+  
+ +

Cursor.execute Returns the "Right" Thing

+ +

The DB API specification does not specify the return value of Cursor.execute, so pyodbc returns different types + based on the SQL statement executed.

+ +

A select statement returns the cursor itself, allowing the execute results to be iterated over and used to fetch. + This makes the code very compact:

+ +
+  for row in cursor.execute("select album_id, photo_id from photos where user_id=1"):
+      print row.album_id, row.photo_id
+
+  row = cursor.execute("select count(*) from tmp").fetchone()
+  
+  rows = cursor.execute("select * from tmp").fetchall()
+ +

Update and delete statements return the number of rows affected:

+ +
+  count = cursor.execute("update photos set processed=1 where user_id=1")
+
+  count = cursor.execute("delete from photos where user_id=1")
+  
+ +

All other SQL statements return None.

+ +

Connection.execute

+ +

pyodbc Connection objects have an execute method that creates new Cursors automatically.

+ +
+    for row in cnxn.execute("select user_id from tmp"):
+        print row.user_id
+ +

Since each call creates a new cursor, do not use this when executing multiple statements in a row.

+ +

Passing Parameters

+ +

As specified in the DB API, Cursor.execute accepts an optional sequence of parameters:

+ +
+  cursor.execute("select a from tbl where b=? and c=?", (x, y))
+ +

However, pyodbc also accepts the parameters directly. Note that the parameters are not in a tuple:

+ +
+  cursor.execute("select a from tbl where b=? and c=?", x, y)
+ +

Autocommit Mode

+ +

Connections can be put into autocommit mode using the autocommit keyword of + the connect function or + the autocommit attribute of the Connection + object.

+ +

Miscellaneous ODBC Functions

+ +

Connection.getinfo function is an interface to +SQLGetInfo.

+ + +

Most of the ODBC catalog functions are available as methods on Cursor objects. The results +are presented as SELECT results in rows that are fetched normally. Refer to Microsoft's ODBC +documentation for details of how to use each function.

+ +
+  cnxn   = pyodbc.connect(...)
+  cursor = cnxn.cursor()
+  for row in cursor.tables():
+      print row.table_name
+  
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ODBC FunctionMethodDescription
SQLTablesCursor.tablesReturns a list of table, catalog, or schema names, and table types.
SQLColumnsCursor.columnsReturns a list of column names in specified tables.
SQLStatisticsCursor.statisticsReturns a list of statistics about a single table and the indexes associated with the table.
SQLSpecialColumnsCursor.rowIdColumnsReturns a list of columns that uniquely identify a row.
SQLSpecialColumnsCursor.rowVerColumnsReturns a list of columns that are automatically updated any any value in the row is updated.
SQLPrimaryKeysCursor.primaryKeysReturns a list of column names that make up the primary key for a table.
SQLForeignKeysCursor.foreignKeysReturns a list of column names that are foreign keys in the specified table (columns + in the specified table that refer to primary keys in other tables) or foreign keys in + other tables that refer to the primary key in the specified table.
SQLProceduresCursor.proceduresReturns information about the procedures in the data source.
SQLProceduresCursor.getTypeInfoReturns a information about the specified data type or all data types supported by the driver.
+ +
+ +
+SourceForge.net Logo + + + diff --git a/web/license.html b/web/license.html new file mode 100644 index 00000000..11d4d29a --- /dev/null +++ b/web/license.html @@ -0,0 +1,48 @@ + + + +pyodbc + + + + +
+ pyodbc - A Python DB API module for ODBC +
+ + + +
+ +

Copyright (c) 2004-2008 Michael Kleehammer

+ +

Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so.

+ +

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR +ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE.

+ +
+ +
+SourceForge.net Logo + + + diff --git a/web/styles.css b/web/styles.css new file mode 100644 index 00000000..0c509635 --- /dev/null +++ b/web/styles.css @@ -0,0 +1,131 @@ + +body +{ + margin: 0; + background-color: #fff; + position: relative; +} + +#titlebox +{ + background-color: #577eb2; + color: white; + font-family: Corbel, Verdana, Arial, sans-serif; + font-size: larger; + font-weight: bold; + padding: 0px 0px 4px 4px; +} + +#nav +{ + padding: 0px 0px 2px 4px; + font-size: smaller; + background-color: #c1d9f2; + border: 1px solid #577eb2; + color: #3e6aaa; +} + +#nav A +{ + color: #3460A0; + text-decoration: none; +} + +#nav A:hover +{ + color: #144080; + text-decoration: underline; +} + +#contents +{ + margin: .5em; +} + +h1 +{ + font-weight: bold; + font-size: 1.4em; + color: #577eb2; +} + +h2 +{ + font-weight: bold; + font-size: 1.1em; + color: #577eb2; +} + +h3 +{ + font-weight: normal; + font-size: 1em; + color: #577eb2; +} + +a:active +{ + color: #144080; +} + +a:visited +{ + color: #144080; +} +a:hover +{ + color: #577eb2; +} + + +DT +{ + margin-top: .5em; + margin-left: .5em; + font-weight: bold; +} + +DD +{ + margin-left: 2em; +} + + + +code +{ + font-family: "Consolas", "Courier New", "Courier", monospace; +} + +PRE +{ + font-family: "Consolas", "Courier New", "Courier", monospace; + margin-bottom: 0px; + padding-bottom: 0px; +} + +TABLE +{ + border: 1px solid #a0a0a0; +} + +THEAD TR TD +{ + background-color: #f0f0f0; + border-bottom: 1px solid #a0a0a0; +} + +.treven +{ + background-color: #f0f0f0; +} + +.added +{ + color: #00129c; +} + +.missing +{ + color: #9c0000; +} diff --git a/web/tutorial.html b/web/tutorial.html new file mode 100644 index 00000000..23b6c0c2 --- /dev/null +++ b/web/tutorial.html @@ -0,0 +1,122 @@ + + + +pyodbc + + + + +
+ pyodbc - A Python DB API module for ODBC +
+ + + +
+ +

Introduction

+ +

This document is high-level introduction to using pyodbc and does not cover all its +details. pyodbc implements the +Python Database API Specification +v2.0, so you should read this specification for more information.

+ +

If you haven't installed pyodbc, + download and + install it.

+ +

Connecting

+ +

First, you must import pyodbc. If you get errors here, make sure you have pyodbc installed.

+ +
+import pyodbc
+ +

Next, create a connection by passing an ODBC connection string to the connect method. This +step causes ODBC to load the database driver (the SQL Server driver in this example) and +connect to the database.

+ +
+cnxn = pyodbc.connect('DSN=northwind')
+ +

The ODBC connection string format is specified by ODBC in the +SQLDriverConnect +documentation. Unfortunately, this is for C programmers, but the comments section discussion +of the connection string format is useful.

+ +

ODBC itself recognizes the following keywords in a connection string:

+ +
+
DRIVER
+
The ODBC driver to use. Make sure the driver you want to use is installed.
+ +
DSN
+
The name of a DSN configured in the control panel Data Sources applet. This allows + database information to be specified in an application-independent manner and location.
+ +
UID
+
The user name when a login is required.
+ +
PWD
+
The password when a login is required. DSNs cannot contain passwords, so you may need + this even when using the DSN keyword. + +
FILEDSN
+
The name of a .dsn file, used when the DSN information is stored in a file.
+
+ +

Each database driver may support additional keywords. For example, the SQL Server driver +allows you to specify the machine SQL Server is running on using the SERVER keyword and the +database to connect to using the DATABASE keyword. These two allow you to connect to the +database without registering a DSN in the control panel. (The ODBC section of the SQL Native +Client Using +Connection String Keywords with SQL Native Client documentation may be useful when using SQL Server.)

+ +
+cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=localhost;DATABASE=testdb;UID=user;PWD=password')
+ +

Create an Example Table

+ +

Next, we'll create a table and populate it with some example values. First, make a cursor +and execute the necessary SQL. (The SQL may need to be modified for your database, +particularly the type names like 'int'. I'm testing this using SQL Server.) Finally, commit +the changes.

+ +
+cursor = cnxn.cursor()
+cursor.execute("create table tmp(a int, b varchar(30))")
+cnxn.commit()
+ +

First, notice that the commit is applied to the connection, not the cursor. Changes from +all cursors attached to the same connection will be commited. Also note that the commit +is required. If you do not commit, the changes will be rolled back when the connection +is closed.

+ +

Insert Some Values

+ + + +

Selecting Values

+ +

Once you have a connection, obtain a cursor from it and execute a select statement via the +cursor's execute method:

+ +
+cursor = cnxn.cursor()
+cursor.execute('select a, b from tmp')
+
+ + + +