diff --git a/.eslintrc.js b/.eslintrc.js new file mode 100755 index 0000000..b711605 --- /dev/null +++ b/.eslintrc.js @@ -0,0 +1 @@ +module.exports = { "extends": "airbnb-base" }; \ No newline at end of file diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 index e17ea04..331ea9c --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,9 @@ a.out build node_modules deps -.idea \ No newline at end of file +.idea +sandbox +core +oldtest +.env +.vscode/ \ No newline at end of file diff --git a/LICENSE b/LICENSE old mode 100644 new mode 100755 diff --git a/README.md b/README.md old mode 100644 new mode 100755 index 7095b30..bfc39ca --- a/README.md +++ b/README.md @@ -1,671 +1,888 @@ -node-odbc ---------- - -An asynchronous/synchronous interface for node.js to unixODBC and its supported -drivers. - -requirements ------------- - -* unixODBC binaries and development libraries for module compilation - * on Ubuntu/Debian `sudo apt-get install unixodbc unixodbc-dev` - * on RedHat/CentOS `sudo yum install unixODBC unixODBC-devel` - * on OSX - * using macports.org `sudo port unixODBC` - * using brew `brew install unixODBC` - * on IBM i `yum install unixODBC unixODBC-devel` (requires [yum](http://ibm.biz/ibmi-rpms)) -* odbc drivers for target database -* properly configured odbc.ini and odbcinst.ini. - -install -------- +# node-odbc -After insuring that all requirements are installed you may install by one of the -two following options: +`node-odbc` is an ODBC database interface for Node.js. It allows connecting to any database management system if the system has been correctly configured, including installing of unixODBC and unixODBC-devel packages, installing an ODBC driver for your desired database, and configuring your odbc.ini and odbcinst.ini files. By using an ODBC, and it makes remote development a breeze through the use of ODBC data sources, and switching between DBMS systems is as easy as modifying your queries, as all your code can stay the same. -### git +--- -```bash -git clone git://github.com/wankdanker/node-odbc.git -cd node-odbc -node-gyp configure build -``` -### npm +## Installation + +Instructions on how to set up your ODBC environment can be found in the SETUP.md. As an overview, three main steps must be done before `node-odbc` can interact with your database: + +* **Install unixODBC and unixODBC-devel:** Compilation of `node-odbc` on your system requires these packages to provide the correct headers. + * **Ubuntu/Debian**: `sudo apt-get install unixodbc unixodbc-dev` + * **RedHat/CentOS**: `sudo yum install unixODBC unixODBC-devel` + * **OSX**: + * **macports.org:** `sudo port unixODBC` + * **using brew:** `brew install unixODBC` + * **IBM i:** `yum install unixODBC unixODBC-devel` (requires [yum](http://ibm.biz/ibmi-rpms)) +* **Install ODBC drivers for target database:** Most database management system providers offer ODBC drivers for their product. See the website of your DBMS for more information. +* **odbc.ini and odbcinst.ini**: These files define your DSNs (data source names) and ODBC drivers, respectively. They must be set up for ODBC functions to correctly interact with your database. + +When all these steps have been completed, install `node-odbc` into your Node.js project by using: ```bash npm install odbc ``` +--- -quick example -------------- +## Important Changes in 2.0 -```javascript -var db = require('odbc')() - , cn = process.env.ODBC_CONNECTION_STRING - ; - -db.open(cn, function (err) { - if (err) return console.log(err); - - db.query('select * from user where user_id = ?', [42], function (err, data) { - if (err) console.log(err); - - console.log(data); +`node-odbc` has recently been upgraded from its initial release. The following list highlights the major improvements and potential code-breaking changes. - db.close(function () { - console.log('done'); - }); - }); -}); +* **Promise support:** All asynchronous functions can now be used with native JavaScript Promises. If a callback function is not passed, the ODBC functions will return a native Promise. If a callback _is_ passed to the ODBC functions, then the old callback behavior will be used. + +* **Performance improvements:** The underlying ODBC function calls have been reworked to greatly improve performance. For ODBC afficianados, `node-odbc` used to retrieved results using SQLGetData, which works for small amounts of data but is slow for large datasets. `node-odbc` now uses SQLBindCol for binding result sets, which for large queries is orders of magnitude faster. + +* **Rewritten with N-API:** `node-odbc` was completely rewritten using node-addon-api, a C++ wrapper for N-API, which created an engine-agnostic and ABI-stable package. This means that if you upgrade your Node.js version, there is no need to recompile the package, it just works! + +* **API Changes:** The API has been changed and simplified. See the documentation below for a list of all the changes. + +--- + +## API + +* [Connection](#Connection) + * [constructor (new Connection())](#constructor-\(new-connection\(connectionstring\)\)) + * [.query()](#.query\(sql,-parameters?,-callback?\)) + * [.callProcedure()](.callProcedure\(catalog,-schema,-name,-parameters?,-callback?\)) + * [.createStatement()](.createStatement\(callback?\)) + * [.tables()](#.tables\(catalog,-schema,-table,-type,-callback?\)) + * [.columns()](#.columns\(catalog,-schema,-table,-column,-callback?\)) + * [.beginTransaction()](#.beginTransaction\(callback?\)) + * [.commit()](#.commit\(callback?\)) + * [.rollback()](#.rollback\(callback?\)) + * [.close()](#.close\(callback?\)) +* [Pool](#Pool) + * [constructor (new Pool())](#constructor-\(new-pool\(connectionstring\)\)) + * [.init()](#.init\(callback?\)) + * [.connect()](#.connect\(callback?\)) + * [.query()](#.query\(sql,-parameters?,-callback?\)) + * [.close()](#.close\(callback?\)) +* [Statement](#Statement) + * [.prepare()](#.prepare\(sql,-callback?\)) + * [.bind()](#.bind\(parameters,-callback?\)) + * [.execute()](#.execute\(callback?\)) + * [.close()](#.close\(callback?\)) + +### **Callbacks _or_ Promises** + +Every asynchronous function in the Node.js `node-odbc` package can be called with either a callback Function or a Promise. To use Promises, simply do not pass a callback function (in the API docs below, specified with a `callback?`). This will return a Promise object than can then be used with `.then` or the more modern `async/await` workflow. To use callbacks, simply pass a callback function. For each function explained in the documents below, both Callback and Promise examples are given. + +_All examples are shown using IBM i Db2 DSNs and queries. Because ODBC is DBMS-agnostic, examples will work as long as the query strings are modified for your particular DBMS._ + +### **Result Array** + +All functions that return a result set do so in an array, where each row in the result set is an entry in the array. The format of data within the row can either be an array or an object, depending on the configuration option passed to the connection. + +The result array also contains several properties: +* `count`: the number of rows affected by the statement or procedure. Returns the result from ODBC function SQLRowCount. +* `columns`: a list of columns in the result set. This is returned in an array. Each column in the array has the following properties: + * `name`: The name of the column + * `dataType`: The data type of the column properties +* `statement`: The statement used to return the result set +* `parameters`: The parameters passed to the statement or procedure. For input/output and output parameters, this value will reflect the value updated from a procedure. +* `return`: The return value from some procedures. For many DBMS, this will always be undefined. + +``` +[ { CUSNUM: 938472, + LSTNAM: 'Henning ', + INIT: 'G K', + STREET: '4859 Elm Ave ', + CITY: 'Dallas', + STATE: 'TX', + ZIPCOD: 75217, + CDTLMT: 5000, + CHGCOD: 3, + BALDUE: 37, + CDTDUE: 0 }, + { CUSNUM: 839283, + LSTNAM: 'Jones ', + INIT: 'B D', + STREET: '21B NW 135 St', + CITY: 'Clay ', + STATE: 'NY', + ZIPCOD: 13041, + CDTLMT: 400, + CHGCOD: 1, + BALDUE: 100, + CDTDUE: 0 }, + statement: 'SELECT * FROM QIWS.QCUSTCDT', + parameters: [], + return: undefined, + count: -1, + columns: [ { name: 'CUSNUM', dataType: 2 }, + { name: 'LSTNAM', dataType: 1 }, + { name: 'INIT', dataType: 1 }, + { name: 'STREET', dataType: 1 }, + { name: 'CITY', dataType: 1 }, + { name: 'STATE', dataType: 1 }, + { name: 'ZIPCOD', dataType: 2 }, + { name: 'CDTLMT', dataType: 2 }, + { name: 'CHGCOD', dataType: 2 }, + { name: 'BALDUE', dataType: 2 }, + { name: 'CDTDUE', dataType: 2 } ] ] ``` -api +In this example, two rows are returned, with eleven columns each. The format of these columns is found on the `columns` property, with their names and dataType (which are integers mapped to SQL data types). + +With this result structure, users can iterate over the result set like any old array (in this case, `results.length` would return 2) while also accessing important information from the SQL call and result set. + +--- --- -### Database +## **Connection** + +Connection has the following functions: -The simple api is based on instances of the `Database` class. You may get an -instance in one of the following ways: +### `constructor (new Connection(connectionString))` + +Create a Connection object, which is opened (synchronously!) ```javascript -require("odbc").open(connectionString, function (err, db){ - //db is already open now if err is falsy -}); +const { Connection } = require('odbc'); +const connection = new Connection(connectionString); ``` -or by using the helper function: +--- -```javascript -var db = require("odbc")(); -``` +### `.query(sql, parameters?, callback?)` -or by creating an instance with the constructor function: +Run a query on the database. Can be passed an SQL string with parameter markers `?` and an array of parameters to bind to those markers. -```javascript -var Database = require("odbc").Database - , db = new Database(); +```JavaScript +const { Connection } = require('odbc'); +const connection = new Connection(connectionString); +connection.query('SELECT * FROM QIWS.QCUSTCDT', (error, result) => { + if (error) { console.error(error) } + console.log(result); +}) ``` -#### .connected +--- + +### `.callProcedure(catalog, schema, name, parameters?, callback?)` + +Calls a database procedure, returning the results in a [result array](#result-array). -Returns a Boolean of whether the database is currently connected. +#### Parameters: +* **catalog**: The name of the catalog where the procedure exists, or null to use the default catalog +* **schema**: The name of the schema where the procedure exists, or null to use a default schema +* **name**: The name of the procedure in the database +* **{OPTIONAL} parameters**: An array of parameters to pass to the procedure. For input and input/output parameters, the JavaScript value passed in is expected to be of a type translatable to the SQL type the procedure expects. For output parameters, any JavaScript value can be passed in, and will be overwritten by the function. The number of parameters passed in must match the number of parameters expected by the procedure. +* **{OPTIONAL} callback**: The function called when `.callProcedure` has finished execution. If no callback function is given, `.callProcedure` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error + * result: The result object from execution + +#### Examples: + +**Promises** ```javascript -var db = require("odbc")(); +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); -console.log( "Connected: " + db.connected ); +// can only use await keywork in an async function +async function callProcedureExample() { + const statement = await connection.createStatement(); + // now have a statement where sql can be prepared, bound, and executed +} + +callProcedureExample(); ``` -#### .open(connectionString, callback) +**Callbacks** -Open a connection to a database. +```javascript +const { Connection } = require('odbc'); +const connection = new Connection(connectionString); +connection.callProcedure(null, null, 'MY_PROC', [undefined], (error, result) => { + if (error) { console.error(error) } // handle + // result contains an array of results, and has a `parameters` property to access parameters returned by the procedure. + console.log(result); +}) +``` -* **connectionString** - The ODBC connection string for your database -* **callback** - `callback (err)` +--- -```javascript -var db = require("odbc")() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; +### `.createStatement(callback?)` -db.open(cn, function (err) { - if (err) { - return console.log(err); - } +Returns a [Statement](#Statement) object from the connection. - //we now have an open connection to the database -}); -``` -#### .openSync(connectionString) +#### Parameters: +* **{OPTIONAL} callback**: The function called when `.createStatement` has finished execution. If no callback function is given, `.createStatement` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error + * statement: The newly created Statement object -Synchronously open a connection to a database. +#### Examples: -* **connectionString** - The ODBC connection string for your database +**Promises** ```javascript -var db = require("odbc")() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); -try { - var result = db.openSync(cn); -} -catch (e) { - console.log(e.message); +// can only use await keywork in an async function +async function statementExample() { + const statement = await connection.createStatement(); + // now have a statement where sql can be prepared, bound, and executed } -//we now have an open connection to the database +statementExample(); ``` -#### .query(sqlQuery [, bindingParameters], callback) - -Issue an asynchronous SQL query to the database which is currently open. - -* **sqlQuery** - The SQL query to be executed. -* **bindingParameters** - _OPTIONAL_ - An array of values that will be bound to - any '?' characters in `sqlQuery`. -* **callback** - `callback (err, rows, moreResultSets)` +**Callbacks** ```javascript -var db = require("odbc")() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; - -db.open(cn, function (err) { - if (err) { - return console.log(err); - } - - //we now have an open connection to the database - //so lets get some data - db.query("select top 10 * from customers", function (err, rows, moreResultSets) { - if (err) { - return console.log(err); - } - - console.log(rows); - - //if moreResultSets is truthy, then this callback function will be called - //again with the next set of rows. - }); +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// returns information about all tables in schema MY_SCHEMA +connection.createStatement((error, statement) => { + if (error) { return; } // handle + // now have a statement where sql can be prepared, bound, and executed }); ``` -#### .querySync(sqlQuery [, bindingParameters]) +--- + +### `.tables(catalog, schema, table, type, callback?)` -Synchronously issue a SQL query to the database that is currently open. +Returns information about the table specified in the parameters by calling the ODBC function [SQLTables](https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqltables-function?view=sql-server-2017). Values passed to parameters will narrow the result set, while `null` will include all results of that level. -* **sqlQuery** - The SQL query to be executed. -* **bindingParameters** - _OPTIONAL_ - An array of values that will be bound to - any '?' characters in `sqlQuery`. +#### Parameters: +* **catalog**: The name of the catalog, or null if not specified +* **schema**: The name of the schema, or null if not specified +* **table**: The name of the table, or null if not specified +* **type**: The type of table that you want information about, or null if not specified +* **{OPTIONAL} callback**: The function called when `.tables` has finished execution. If no callback function is given, `.tables` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error + * result: The result object from execution + +#### Examples: + +**Promises** ```javascript -var db = require("odbc")() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// can only use await keywork in an async function +async function getTables() { + // returns information about all tables in schema MY_SCHEMA + const result = await connection.tables(null, 'MY_SCHEMA', null, null); + console.log(result); +} -//blocks until the connection is opened. -db.openSync(cn); +getTables(); +``` + +**Callbacks** -//blocks until the query is completed and all data has been acquired -var rows = db.querySync("select top 10 * from customers"); +```javascript +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); -console.log(rows); +// returns information about all tables in schema MY_SCHEMA +connection.columns(null, "MY_SCHEMA", null, null, (error, result) => { + if (error) { return; } // handle + console.log(result); +}); ``` -#### .close(callback) +--- + +### `.columns(catalog, schema, table, column, callback?)` -Close the currently opened database. +Returns information about the columns specified in the parameters by calling the ODBC function [SQLColumns](https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlcolumns-function?view=sql-server-2017). Values passed to parameters will narrow the result set, while `null` will include all results of that level. -* **callback** - `callback (err)` +#### Parameters: +* **catalog**: The name of the catalog, or null if not specified +* **schema**: The name of the schema, or null if not specified +* **table**: The name of the table, or null if not specified +* **column**: The name of the column that you want information about, or null if not specified +* **{OPTIONAL} callback**: The function called when `.columns` has finished execution. If no callback function is given, `.columns` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error + * result: The result object from execution + +#### Examples: + +**Promises** ```javascript -var db = require("odbc")() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; - -db.open(cn, function (err) { - if (err) { - return console.log(err); - } - - //we now have an open connection to the database - - db.close(function (err) { - console.log("the database connection is now closed"); - }); +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// can only use await keywork in an async function +async function getColumns() { + // returns information about all columns in table MY_SCEHMA.MY_TABLE + const result = await connection.columns(null, 'MY_SCHEMA', 'MY_TABLE', null); + console.log(result); +} + +getColumns(); +``` + +**Callbacks** + +```javascript +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// returns information about all columns in table MY_SCEHMA.MY_TABLE +connection.columns(null, "MY_SCHEMA", "MY_TABLE", null, (error, result) => { + if (error) { return; } // handle + console.log(result); }); ``` -#### .closeSync() +--- -Synchronously close the currently opened database. +### `.beginTransaction(callback?)` -```javascript -var db = require("odbc")() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; +Begins a transaction on the connection. The transaction can be committed by calling `.commit` or rolled back by calling `.rollback`. **If a connection is closed with an open transaction, it will be rolled back.** Connection isolation level will affect the data that other transactions can view mid transaction. -//Blocks until the connection is open -db.openSync(cn); +#### Parameters: +* **{OPTIONAL} callback**: The function called when `.beginTransaction` has finished execution. If no callback function is given, `.beginTransaction` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error -//Blocks until the connection is closed -db.closeSync(); -``` +#### Examples: + +**Promises** -#### .prepare(sql, callback) +```javascript +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); -Prepare a statement for execution. +// can only use await keywork in an async function +async function transaction() { + await connection.beginTransaction(); + // transaction is now open +} -* **sql** - SQL string to prepare -* **callback** - `callback (err, stmt)` +transaction(); +``` -Returns a `Statement` object via the callback +**Callbacks** ```javascript -var db = require("odbc")() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; - -//Blocks until the connection is open -db.openSync(cn); - -db.prepare("insert into hits (col1, col2) VALUES (?, ?)", function (err, stmt) { - if (err) { - //could not prepare for some reason - console.log(err); - return db.closeSync(); - } - - //Bind and Execute the statment asynchronously - stmt.execute(['something', 42], function (err, result) { - result.closeSync(); - - //Close the connection - db.closeSync(); - }); -}) +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// returns information about all columns in table MY_SCEHMA.MY_TABLE +connection.beginTransaction((error) => { + if (error) { return; } // handle + // transaction is now open +}); ``` -#### .prepareSync(sql) +--- -Synchronously prepare a statement for execution. +### `.commit(callback?)` -* **sql** - SQL string to prepare +Commits an open transaction. If called on a connection that doesn't have an open transaction, will no-op. -Returns a `Statement` object +#### Parameters: +* **{OPTIONAL} callback**: The function called when `.commit` has finished execution. If no callback function is given, `.commit` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error -```javascript -var db = require("odbc")() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; +#### Examples: -//Blocks until the connection is open -db.openSync(cn); +**Promises** -//Blocks while preparing the statement -var stmt = db.prepareSync("insert into hits (col1, col2) VALUES (?, ?)") +```javascript +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// can only use await keywork in an async function +async function commitTransaction() { + await connection.beginTransaction(); + const insertResult = await connection.query('INSERT INTO MY_TABLE VALUES(1, \'Name\')'); + await connection.commit(); + // INSERT query has now been committed +} -//Bind and Execute the statment asynchronously -stmt.execute(['something', 42], function (err, result) { - result.closeSync(); +commitTransaction(); +``` + +**Callbacks** - //Close the connection - db.closeSync(); +```javascript +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// returns information about all columns in table MY_SCEHMA.MY_TABLE +connection.beginTransaction((error1) => { + if (error1) { return; } // handle + connection.query('INSERT INTO MY_TABLE VALUES(1, \'Name\')', (error2, result) => { + if (error2) { return; } // handle + connection.commit((error3) => { + // INSERT query has now been committed + }) + }) }); ``` -#### .beginTransaction(callback) +--- -Begin a transaction -* **callback** - `callback (err)` +### `.rollback(callback?)` -#### .beginTransactionSync() +Rolls back an open transaction. If called on a connection that doesn't have an open transaction, will no-op. -Synchronously begin a transaction +#### Parameters: +* **{OPTIONAL} callback**: The function called when `.rollback` has finished execution. If no callback function is given, `.rollback` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error -#### .commitTransaction(callback) +#### Examples: -Commit a transaction +**Promises** -* **callback** - `callback (err)` +```javascript +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// can only use await keywork in an async function +async function rollbackTransaction() { + await connection.beginTransaction(); + const insertResult = await connection.query('INSERT INTO MY_TABLE VALUES(1, \'Name\')'); + await connection.rollback(); + // INSERT query has now been rolled back +} + +rollbackTransaction(); +``` + +**Callbacks** ```javascript -var db = require("odbc")() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; - -//Blocks until the connection is open -db.openSync(cn); - -db.beginTransaction(function (err) { - if (err) { - //could not begin a transaction for some reason. - console.log(err); - return db.closeSync(); - } - - var result = db.querySync("insert into customer (customerCode) values ('stevedave')"); - - db.commitTransaction(function (err) { - if (err) { - //error during commit - console.log(err); - return db.closeSync(); - } - - console.log(db.querySync("select * from customer where customerCode = 'stevedave'")); - - //Close the connection - db.closeSync(); - }); -}) +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// returns information about all columns in table MY_SCEHMA.MY_TABLE +connection.beginTransaction((error1) => { + if (error1) { return; } // handle + connection.query('INSERT INTO MY_TABLE VALUES(1, \'Name\')', (error2, result) => { + if (error2) { return; } // handle + connection.rollback((error3) => { + // INSERT query has now been rolled back + }) + }) +}); ``` -#### .commitTransactionSync() +--- -Synchronously commit a transaction +### `.close(callback?)` -```javascript -var db = require("odbc")() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; +Closes and open connection. Any transactions on the connection that have not been committed or rolledback will be rolledback. -//Blocks until the connection is open -db.openSync(cn); +--- +--- -db.beginTransactionSync(); -var result = db.querySync("insert into customer (customerCode) values ('stevedave')"); +### **Pool** -db.commitTransactionSync(); +### `constructor (new Pool(connectionString))` -console.log(db.querySync("select * from customer where customerCode = 'stevedave'")); +Creates a instance of the Pool class, storing information but not opening any connections. -//Close the connection -db.closeSync(); +```JavaScript +const { Pool } = require('odbc'); +const pool = new Pool(connectionString); ``` -#### .rollbackTransaction(callback) +**PLEASE NOTE:** The pool will not have any open connections until you call pool.init(); + +### `.init(callback?)` + +Opens all the connections in the Pool asynchronously. Returns once all of the Connections have been opened. -Rollback a transaction +#### Parameters: +* **{OPTIONAL} callback**: The function called when `.init` has finished execution. If no callback function is given, `.init` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error -* **callback** - `callback (err)` +#### Examples: + +**Promises** ```javascript -var db = require("odbc")() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; - -//Blocks until the connection is open -db.openSync(cn); - -db.beginTransaction(function (err) { - if (err) { - //could not begin a transaction for some reason. - console.log(err); - return db.closeSync(); - } - - var result = db.querySync("insert into customer (customerCode) values ('stevedave')"); - - db.rollbackTransaction(function (err) { - if (err) { - //error during rollback - console.log(err); - return db.closeSync(); - } - - console.log(db.querySync("select * from customer where customerCode = 'stevedave'")); - - //Close the connection - db.closeSync(); - }); -}) -``` +const { Pool } = require('odbc'); -#### .rollbackTransactionSync() +// can only use await keywork in an async function +async function connectExample() { + const pool = new Pool(`${process.env.CONNECTION_STRING}`); + await pool.init(); + // all Connections in the pool are now opened +} + +connectExample(); +``` -Synchronously rollback a transaction +**Callbacks** ```javascript -var db = require("odbc")() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; +const { Pool } = require('odbc'); +const pool = new Pool(`${process.env.CONNECTION_STRING}`); +pool.init((error1) => { + if (error1) { return; } // handle + // all Connections in the pool are now opened +}); +``` + +### `.connect(callback?)` -//Blocks until the connection is open -db.openSync(cn); +Returns a [Connection](#connection) object for you to use from the Pool. Doesn't actually open a connection, because they are already open in the pool when `.init` is called. -db.beginTransactionSync(); +#### Parameters: +* **{OPTIONAL} callback**: The function called when `.connect` has finished execution. If no callback function is given, `.connect` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error + * connection: The [Connection](#connection) retrieved from the Pool. -var result = db.querySync("insert into customer (customerCode) values ('stevedave')"); +#### Examples: -db.rollbackTransactionSync(); +**Promises** -console.log(db.querySync("select * from customer where customerCode = 'stevedave'")); +```javascript +const { Pool } = require('odbc'); + +// can only use await keywork in an async function +async function connectExample() { + const pool = new Pool(`${process.env.CONNECTION_STRING}`); + await pool.init(); + const connection = await pool.connect(); + // now have a Connection to do work with +} -//Close the connection -db.closeSync(); +connectExample(); +``` + +**Callbacks** + +```javascript +const { Pool } = require('odbc'); +const pool = new Pool(`${process.env.CONNECTION_STRING}`); +pool.init((error1) => { + if (error1) { return; } // handle + pool.connect((error2, connection) => { + if (error2) { return; } // handle + // now have a Connection to do work with + }); +}); ``` ----------- +--- + +### `.query(sql, parameters?, callback?)` -### Pool +Utility function to execute a query on any open connection in the pool. Will get a connection, fire of the query, return the results, and return the connection the the pool. -The node-odbc `Pool` is a rudimentary connection pool which will attempt to have -database connections ready and waiting for you when you call the `open` method. +#### Parameters: +* **sql**: An SQL string that will be executed. Can optionally be given parameter markers (`?`) and also given an array of values to bind to the parameters. +* **{OPTIONAL} parameters**: An array of values to bind to the parameter markers, if there are any. The number of values in this array must match the number of parameter markers in the sql statement. +* **{OPTIONAL} callback**: The function called when `.query` has finished execution. If no callback function is given, `.query` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error + * result: The [result array](#result-array) returned from the executed statement -If you use a `Pool` instance, any connection that you close will cause another -connection to be opened for that same connection string. That connection will -be used the next time you call `Pool.open()` for the same connection string. +#### Examples: -This should probably be changed. +**Promises** -#### .open(connectionString, callback) +```javascript +const { Pool } = require('odbc'); + +// can only use await keywork in an async function +async function queryExample() { + const pool = new Pool(`${process.env.CONNECTION_STRING}`); + await pool.init(); + const result = await pool.query('SELECT * FROM MY_TABLE'); + console.log(result); +} -Get a Database` instance which is already connected to `connectionString` +queryExample(); +``` -* **connectionString** - The ODBC connection string for your database -* **callback** - `callback (err, db)` +**Callbacks** ```javascript -var Pool = require("odbc").Pool - , pool = new Pool() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; - -pool.open(cn, function (err, db) { - if (err) { - return console.log(err); - } - - //db is now an open database connection and can be used like normal - //if we run some queries with db.query(...) and then call db.close(); - //a connection to `cn` will be re-opened silently behind the scense - //and will be ready the next time we do `pool.open(cn)` +const { Pool } = require('odbc'); +const pool = new Pool(`${process.env.CONNECTION_STRING}`); +pool.init((error1) => { + if (error1) { return; } // handle + pool.query('SELECT * FROM MY_TABLE', (error2, result) => { + if (error2) { return; } // handle + console.log(result); + }); }); ``` -#### .close(callback) +--- + +### `.close(callback?)` + +Closes the entire pool of currently unused connections. Will not close connections that are checked-out, but will discard the connections when they are closed with Connection's `.close` function. After calling close, must create a new Pool sprin up new Connections. -Close all connections in the `Pool` instance +#### Parameters: +* **{OPTIONAL} callback**: The function called when `.close` has finished execution. If no callback function is given, `.close` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error -* **callback** - `callback (err)` +#### Examples: + +**Promises** ```javascript -var Pool = require("odbc").Pool - , pool = new Pool() - , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" - ; - -pool.open(cn, function (err, db) { - if (err) { - return console.log(err); - } - - //db is now an open database connection and can be used like normal - //but all we will do now is close the whole pool - - pool.close(function () { - console.log("all connections in the pool are closed"); - }); -}); +const { Pool } = require('odbc'); + +// can only use await keywork in an async function +async function closeExample() { + const pool = new Pool(`${process.env.CONNECTION_STRING}`); + await pool.init(); + await pool.close(); + // pool is now closed +} + +closeExample(); ``` -example -------- +**Callbacks** ```javascript -var odbc = require("odbc") - , util = require('util') - , db = new odbc.Database() - ; - -var connectionString = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname"; - -db.open(connectionString, function(err) { - db.query("select * from table", function(err, rows, moreResultSets) { - console.log(util.inspect(rows, null, 10)); - - db.close(function() { - console.log("Database connection closed"); - }); - }); +const { Pool } = require('odbc'); +const pool = new Pool(`${process.env.CONNECTION_STRING}`); +pool.init((error1) => { + if (error1) { return; } // handle + pool.close((error2) => { + if (error2) { return; } // handle + // pool is now closed + }); }); ``` -testing -------- +--- +--- + +## **Statement** + +A statement object is created from a Connection, and cannot be created _ad hoc_ with a constructor. + +Statements allow you to prepare a commonly used statement, then bind parameters to it multiple times, executing in between. + +--- -Tests can be run by executing `npm test` from within the root of the node-odbc -directory. You can also run the tests by executing `node run-tests.js` from -within the `/test` directory. +### `.prepare(sql, callback?)` -By default, the tests are setup to run against a sqlite3 database which is -created at test time. This will require proper installation of the sqlite odbc -driver. On Ubuntu: `sudo apt-get install libsqliteodbc` +Prepares an SQL statement, with or without parameters (?) to bind to. -build options -------------- +#### Parameters: +* **sql**: An SQL string that is prepared and can be executed with the .`execute` function. +* **{OPTIONAL} callback**: The function called when `.prepare` has finished execution. If no callback function is given, `.prepare` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error -### Debug +#### Examples: -If you would like to enable debugging messages to be displayed you can add the -flag `DEBUG` to the defines section of the `binding.gyp` file and then execute -`node-gyp rebuild`. +**Promises** ```javascript - -'defines' : [ - "DEBUG" -], - -``` +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// can only use await keywork in an async function +async function prepareExample() { + const statement = await connection.createStatement(); + await statement.prepare('INSTERT INTO MY_TABLE VALUES(?, ?)'); + // statement has been prepared, can bind and execute +} -### Dynodbc +prepareExample(); +``` -You may also enable the ability to load a specific ODBC driver and bypass the -ODBC driver management layer. A performance increase of ~5Kqps was seen using -this method with the libsqlite3odbc driver. To do this, specify the `dynodbc` -flag in the defines section of the `binding.gyp` file. You will also need to -remove any library references in `binding.gyp`. Then execute `node-gyp -rebuild`. +**Callbacks** ```javascript - -'defines' : [ - "dynodbc" -], -'conditions' : [ - [ 'OS == "linux"', { - 'libraries' : [ - //remove this: '-lodbc' - ], - +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +connection.createStatement((error1, statement) => { + if (error1) { return; } // handle + statement.prepare('INSTERT INTO MY_TABLE VALUES(?, ?)' (error2) => { + if (error2) { return; } // handle + // statement has been prepared, can bind and execute + }); +}); ``` -### Unicode +--- + +### `.bind(parameters, callback?)` + +Binds an array of values to the parameters on the prepared SQL statement. Cannot be called before `.prepare`. + +#### Parameters: +* **sql**: An array of values to bind to the sql statement previously prepared. All parameters will be input parameters. The number of values passed in the array must match the number of parameters to bind to in the prepared statement. +* **{OPTIONAL} callback**: The function called when `.bind` has finished execution. If no callback function is given, `.bind` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error -By default, UNICODE suppport is enabled. This should provide the most accurate -way to get Unicode strings submitted to your database. For best results, you -may want to put your Unicode string into bound parameters. +#### Examples: -However, if you experience issues or you think that submitting UTF8 strings will -work better or faster, you can remove the `UNICODE` define in `binding.gyp` +**Promises** ```javascript - -'defines' : [ - "UNICODE" -], - -``` +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// can only use await keywork in an async function +async function bindExample() { + const statement = await connection.createStatement(); + await statement.prepare('INSTERT INTO MY_TABLE VALUES(?, ?)'); + // Assuming MY_TABLE has INTEGER and VARCHAR fields. + await statement.bind([1, 'Name']); + // statement has been prepared and values bound, can now execute +} -### timegm vs timelocal +bindExample(); +``` -When converting a database time to a C time one may use `timegm` or `timelocal`. See -`man timegm` for the details of these two functions. By default the node-odbc bindings -use `timelocal`. If you would prefer for it to use `timegm` then specify the `TIMEGM` -define in `binding.gyp` +**Callbacks** ```javascript - -'defines' : [ - "TIMEGM" -], - +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +connection.createStatement((error1, statement) => { + if (error1) { return; } // handle + statement.prepare('INSERT INTO MY_TABLE VALUES(?, ?)' (error2) => { + if (error2) { return; } // handle + // Assuming MY_TABLE has INTEGER and VARCHAR fields. + statement.bind([1, 'Name'], (error3) => { + if (error3) { return; } // handle + // statement has been prepared and values bound, can now execute + }); + }); +}); ``` -### Strict Column Naming +--- + +### `.execute(callback?)` + +Executes the prepared and optionally bound SQL statement. + +#### Parameters: +* **{OPTIONAL} callback**: The function called when `.execute` has finished execution. If no callback function is given, `.execute` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error + * result: The [result array](#result-array) returned from the executed statement + +#### Examples: + +**Promises** + +```javascript +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// can only use await keywork in an async function +async function executeExample() { + const statement = await connection.createStatement(); + await statement.prepare('INSTERT INTO MY_TABLE VALUES(?, ?)'); + // Assuming MY_TABLE has INTEGER and VARCHAR fields. + await statement.bind([1, 'Name']); + const result = await statement.execute(); + console.log(result); + +} -When column names are retrieved from ODBC, you can request by SQL_DESC_NAME or -SQL_DESC_LABEL. SQL_DESC_NAME is the exact column name or none if there is none -defined. SQL_DESC_LABEL is the heading or column name or calculation. -SQL_DESC_LABEL is used by default and seems to work well in most cases. +executeExample(); +``` -If you want to use the exact column name via SQL_DESC_NAME, enable the `STRICT_COLUMN_NAMES` -define in `binding.gyp` +**Callbacks** ```javascript - -'defines' : [ - "STRICT_COLUMN_NAMES" -], - +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +connection.createStatement((error1, statement) => { + if (error1) { return; } // handle + statement.prepare('INSTERT INTO MY_TABLE VALUES(?, ?)' (error2) => { + if (error2) { return; } // handle + // Assuming MY_TABLE has INTEGER and VARCHAR fields. + statement.bind([1, 'Name'], (error3) => { + if (error3) { return; } // handle + statement.execute((error4, result) => { + if (error4) { return; } // handle + console.log(result); + }) + }); + }); +}); ``` -tips ----- -### Using node < v0.10 on Linux +--- -Be aware that through node v0.9 the uv_queue_work function, which is used to -execute the ODBC functions on a separate thread, uses libeio for its thread -pool. This thread pool by default is limited to 4 threads. +### `.close(callback?)` -This means that if you have long running queries spread across multiple -instances of odbc.Database() or using odbc.Pool(), you will only be able to -have 4 concurrent queries. +Closes the Statement, freeing the statement handle. Running functions on the statement after closing will result in an error. -You can increase the thread pool size by using @developmentseed's [node-eio] -(https://github.com/developmentseed/node-eio). +#### Parameters: +* **{OPTIONAL} callback**: The function called when `.close` has finished execution. If no callback function is given, `.close` will return a native JavaScript `Promise`. Callback signature is: + * error: The error that occured in execution, or `null` if no error -#### install: -```bash -npm install eio +#### Examples: + +**Promises** + +```javascript +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +// can only use await keywork in an async function +async function executeExample() { + const statement = await connection.createStatement(); + await statement.prepare('INSTERT INTO MY_TABLE VALUES(?, ?)'); + // Assuming MY_TABLE has INTEGER and VARCHAR fields. + await statement.bind([1, 'Name']); + const result = await statement.execute(); + console.log(result); + await statement.close(); +} + +executeExample(); ``` -#### usage: +**Callbacks** + ```javascript -var eio = require('eio'); -eio.setMinParallel(threadCount); +const { Connection } = require('odbc'); +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +connection.createStatement((error1, statement) => { + if (error1) { return; } // handle + statement.prepare('INSTERT INTO MY_TABLE VALUES(?, ?)' (error2) => { + if (error2) { return; } // handle + // Assuming MY_TABLE has INTEGER and VARCHAR fields. + statement.bind([1, 'Name'], (error3) => { + if (error3) { return; } // handle + statement.execute((error4, result) => { + if (error4) { return; } // handle + console.log(result); + statement.close((error5) => { + if (error5) { return; } // handle + // statement closed successfully + }) + }) + }); + }); +}); ``` -### Using the FreeTDS ODBC driver +--- +--- -* If you have column names longer than 30 characters, you should add - "TDS_Version=7.0" to your connection string to retrive the full column name. - * Example : "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname;TDS_Version=7.0" -* If you got error "[unixODBC][FreeTDS][SQL Server]Unable to connect to data source" - Try use SERVERNAME instead of SERVER - * Example : "DRIVER={FreeTDS};SERVERNAME=host;UID=user;PWD=password;DATABASE=dbname" -* Be sure that your odbcinst.ini has the proper threading configuration for your - FreeTDS driver. If you choose the incorrect threading model it may cause - the thread pool to be blocked by long running queries. This is what - @wankdanker currently uses on Ubuntu 12.04: +## Future improvements -``` -[FreeTDS] -Description = TDS driver (Sybase/MS SQL) -Driver = libtdsodbc.so -Setup = libtdsS.so -CPTimeout = 120 -CPReuse = -Threading = 0 -``` +Development of `node-odbc` is an ongoing endeavor, and there are many planned improvements for the package. If you would like to see something, simply add it to the Issues and we will respond! -contributors ------- +## contributors + +* Mark Irish (mirish@ibm.com) * Dan VerWeire (dverweire@gmail.com) * Lee Smith (notwink@gmail.com) * Bruno Bigras @@ -678,23 +895,12 @@ contributors license ------- -Copyright (c) 2013 Dan VerWeire +Copyright (c) 2013 Dan VerWeire Copyright (c) 2010 Lee Smith -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ofthe Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/binding.gyp b/binding.gyp old mode 100644 new mode 100755 index 52f55f6..a2933d4 --- a/binding.gyp +++ b/binding.gyp @@ -1,20 +1,18 @@ { 'targets' : [ { - 'target_name' : 'odbc_bindings', + 'target_name' : 'odbc', 'sources' : [ 'src/odbc.cpp', 'src/odbc_connection.cpp', 'src/odbc_statement.cpp', - 'src/odbc_result.cpp', 'src/dynodbc.cpp' ], - 'cflags' : ['-Wall', '-Wextra', '-Wno-unused-parameter'], + 'cflags' : ['-Wall', '-Wextra', '-Wno-unused-parameter', '-DNAPI_DISABLE_CPP_EXCEPTIONS'], 'include_dirs': [ - " { + this.odbcConnection.query(sql, parameters, (error, result) => { + if (error) { + reject(error); + } else { + resolve(result); + } + }); + }); + } + + return this.odbcConnection.query(sql, parameters, callback); + } + + /** + * + * @param {string} name + * @param {Array} parameters + * @param {function} [cb] + */ + callProcedure(catalog, schema, name, params = undefined, cb = undefined) { + // name + // name, params + // name, cb + // name, params, cb + + let callback = cb; + let parameters = params; + + if (typeof callback === 'undefined') { + if (typeof parameters === 'function') { + callback = parameters; + parameters = null; + } else if (typeof parameters === 'undefined') { + parameters = null; + } + } + + if (typeof name !== 'string' + || (parameters !== null && !Array.isArray(parameters)) + || (typeof callback !== 'function' && typeof callback !== 'undefined')) { + throw new TypeError('[node-odbc]: Incorrect function signature for call to connection.query({string}, {array}[optional], {function}[optional]).'); + } + + // promise... + if (callback === undefined) { + return new Promise((resolve, reject) => { + this.odbcConnection.callProcedure(catalog, schema, name, parameters, (error, result) => { + if (error) { + reject(new Error(error)); + } else { + resolve(result); + } + }); + }); + } + + // ...or callback + return this.odbcConnection.callProcedure(catalog, schema, name, parameters, callback); + } + + // TODO: Write the documentation + /** + * + * @param {*} callback + */ + createStatement(callback = undefined) { + // type-checking + if (typeof callback !== 'function' && typeof callback !== 'undefined') { + throw new TypeError('[node-odbc]: Incorrect function signature for call to connection.createStatement({function}[optional]).'); + } + + // promise... + if (callback === undefined) { + return new Promise((resolve, reject) => { + this.odbcConnection.createStatement((error, odbcStatement) => { + if (error) { + reject(new Error(error)); + } else { + const statement = new Statement(odbcStatement); + resolve(statement); + } + }); + }); + } + + + // ...or callback + return this.odbcConnection.createStatement((error, odbcStatement) => { + if (error) { return callback(error, null); } + + const statement = new Statement(odbcStatement); + return callback(null, statement); + }); + } + + /** TODO: + * Get the value of the passed attribute from the connection. Asynchronous, can be used either + * with a callback function or a Promise. + * @param {string} attribute - The title of the book. + * @param {function} [callback] - Callback function. If not passed, a Promise will be returned. + */ + close(callback = undefined) { + // type-checking + if (typeof callback !== 'function' && typeof callback !== 'undefined') { + throw new TypeError('[node-odbc]: Incorrect function signature for call to connection.close({function}[optional]).'); + } + + // promise... + if (callback === undefined) { + return new Promise((resolve, reject) => { + this.odbcConnection.close((error) => { + if (error) { + reject(new Error(error)); + } else { + resolve(); + } + }); + }); + } + + // ...or callback + return this.odbcConnection.close(callback); + } + + // TODO: Documentation + columns(catalog, schema, table, type, callback = undefined) { + // promise... + if (callback === undefined) { + return new Promise((resolve, reject) => { + this.odbcConnection.columns(catalog, schema, table, type, (error, result) => { + if (error) { + reject(new Error(error)); + } else { + resolve(result); + } + }); + }); + } + + // ...or callback + this.odbcConnection.columns(catalog, schema, table, type, callback); + return undefined; + } + + // TODO: Documentation + tables(catalog, schema, table, type, callback = undefined) { + // promise... + if (callback === undefined) { + return new Promise((resolve, reject) => { + this.odbcConnection.tables(catalog, schema, table, type, (error, result) => { + if (error) { + reject(new Error(error)); + } else { + resolve(result); + } + }); + }); + } + + // ...or callback + this.odbcConnection.tables(catalog, schema, table, type, callback); + return undefined; + } + + /** + * Begins a transaction, turning off auto-commit. Transaction is ended with commit() or + * rollback(). + * @param {function} [callback] - Callback function. If not passed, a Promise will be returned. + */ + beginTransaction(callback = undefined) { + // promise... + if (callback === undefined) { + return new Promise((resolve, reject) => { + this.odbcConnection.beginTransaction((error, result) => { + if (error) { + reject(new Error(error)); + } else { + resolve(result); + } + }); + }); + } + + // ...or callback + return this.odbcConnection.beginTransaction(callback); + } + + /** + * Asynchronously ends the transaction with a commit. + * @param {function} [callback] - Callback function. If not passed, a Promise will be returned. + */ + commit(callback = undefined) { + if (callback === undefined) { + return new Promise((resolve, reject) => { + this.odbcConnection.commit((error) => { + if (error) { + reject(new Error(error)); + } else { + resolve(); + } + }); + }); + } + + return this.odbcConnection.commit(callback); + } + + /** + * Asynchronously ends the transaction with a rollback. + * @param {function} [callback] - Callback function. If not passed, a Promise will be returned. + */ + rollback(callback = undefined) { + if (callback === undefined) { + return new Promise((resolve, reject) => { + this.odbcConnection.rollback((error) => { + if (error) { + reject(new Error(error)); + } else { + resolve(); + } + }); + }); + } + + return this.odbcConnection.rollback(callback); + } +} + +module.exports.Connection = Connection; diff --git a/lib/Pool.js b/lib/Pool.js new file mode 100755 index 0000000..26c13c9 --- /dev/null +++ b/lib/Pool.js @@ -0,0 +1,229 @@ +const odbc = require('../build/Release/odbc.node'); +const { Connection } = require('./Connection'); + +// TODO: Have to add options: +// increase Pool size or no +// max pool size? +// starting pool size +// increment size +class Pool { + constructor(connectionString, initialSize = 10) { + this.isInitialized = false; + this.freeConnections = []; + + if (typeof connectionString === 'object') { + const configObject = connectionString; + this.connectionString = configObject.connectionString; + if (Object.prototype.hasOwnProperty.call(configObject, 'connectionString')) { + this.connectionString = configObject.connectionString; + } else { + throw new Error('Pool configuration object must contain "connectionString" key'); + } + + this.initialSize = configObject.initialSize || 10; + this.incrementSize = configObject.incrementSize || configObject.initialSize || 10; + this.connectionTimeout = configObject.connectionTimeout || 1000; + this.idleTimeout = configObject.idleTimeout || 1000; + this.maxSize = configObject.maxSize || 64; + this.shrink = configObject.shrink || true; + } else { + this.connectionString = connectionString; + this.initialSize = initialSize; + this.incrementSize = initialSize; + this.maxSize = 100; + this.shrink = true; + } + } + + async init(callback = undefined) { + if (!this.isInitialized) { + // promise... + if (typeof callback === 'undefined') { + return new Promise(async (resolve, reject) => { + try { + await this.increasePoolSize(this.initialSize); + this.isInitialized = true; + resolve(null); + } catch (error) { + reject(new Error(error)); + } + }); + } + + // ...or callback + try { + await this.increasePoolSize(this.initialSize); + this.isInitialized = true; + } catch (error) { + return callback(error); + } + return callback(null); + } + + console.log('.init() was called, but the Pool was already initialized.'); + return undefined; + } + + // TODO: Documentation + // TODO: Does this need to be async? + // returns a open connection, ready to use. + // should overwrite the 'close' function of the connection, and rename it is 'nativeClose', so + // that that close can still be called. + async connect(callback = undefined) { + if (this.freeConnections.length < this.poolSize) { + await this.increasePoolSize(this.incrementSize); + } + + const connection = this.freeConnections.pop(); + + connection.nativeClose = connection.close; + + connection.close = async (closeCallback = undefined) => { + if (typeof closeCallback === 'undefined') { + return new Promise((resolve, reject) => { + connection.close((error, result) => { + if (error) { + reject(new Error(error)); + } else { + resolve(result); + } + }); + + if (this.shrink === false || this.freeConnections.length < this.initialSize) { + this.increasePoolSize(1); + } + }); + } + + connection.nativeClose(closeCallback); + + if (this.shrink === false || this.freeConnections.length < this.initialSize) { + try { + this.increasePoolSize(1); + } catch (error1) { + console.error(error1); + } + } + + return undefined; + }; + + // promise... + if (typeof callback === 'undefined') { + return new Promise((resolve, reject) => { + if (connection == null) { + reject(); + } else { + resolve(connection); + } + }); + } + + // ...or callback + return callback(null, connection); + } + + query(sql, params, cb) { + // determine the parameters passed + let callback = cb; + let parameters = params; + + if (typeof callback === 'undefined') { + if (typeof parameters === 'function') { + callback = parameters; + parameters = null; + } else if (typeof parameters === 'undefined') { + parameters = null; + } // else parameters = params, check type in this.ODBCconnection.query + } + + const connection = this.freeConnections.pop(); + + // promise... + if (typeof callback !== 'function') { + return new Promise((resolve, reject) => { + connection.query(sql, parameters, (error, result) => { + // after running, close the connection whether error or not + connection.close((closeError) => { + this.freeConnections.push(new Connection(this.connectionString)); + if (closeError) { + // TODO:: throw and error + } + }); + + if (error) { + reject(new Error(error)); + } else { + resolve(result); + } + }); + }); + } + + // ...or callback + return connection.query(sql, parameters, (error, result) => { + // after running, close the connection whether error or not + connection.close((closeError) => { + this.freeConnections.push(new Connection(this.connectionString)); + if (closeError) { + // TODO:: throw an error + } + }); + callback(error, result); + }); + } + + // These are PRIVATE (although can't be made private in ES6... So don't use these)! + + // odbc.connect runs on an AsyncWorker, so this is truly non-blocking + async increasePoolSize(count) { + return new Promise((resolve, reject) => { + Pool.generateConnections(this.connectionString, count, (error, connections) => { + if (error) { + reject(new Error(error)); + } else { + this.freeConnections = [...this.freeConnections, ...connections]; + resolve(); + } + }); + }); + } + + static async generateConnections(connectionString, count, callback) { + // promise... + if (typeof callback === 'undefined') { + return new Promise((resolve, reject) => { + odbc.connect(connectionString, count, (error, odbcConnections) => { + if (error) { + reject(new Error(error)); + } else { + const connections = Pool.wrapConnections(odbcConnections); + resolve(connections); + } + }); + }); + } + + // or callback + return odbc.connect(connectionString, count, (error, odbcConnections) => { + if (error) { + callback(error, null); + return undefined; + } + + const connections = Pool.wrapConnections(odbcConnections); + callback(null, connections); + return undefined; + }); + } + + static wrapConnections(odbcConnections) { + const connectionsArray = []; + odbcConnections.forEach((odbcConnection) => { + connectionsArray.push(new Connection(odbcConnection)); + }); + return connectionsArray; + } +} + +module.exports.Pool = Pool; diff --git a/lib/Statement.js b/lib/Statement.js new file mode 100755 index 0000000..6278fa9 --- /dev/null +++ b/lib/Statement.js @@ -0,0 +1,116 @@ +class Statement { + constructor(odbcStatement) { + this.odbcStatement = odbcStatement; + } + + /** + * Prepare an SQL statement template to which parameters can be bound and the statement then executed. + * @param {string} sql - The SQL statement template to prepare, with or without unspecified parameters. + * @param {function} [callback] - The callback function that returns the result. If omitted, uses a Promise. + * @returns {undefined|Promise} + */ + prepare(sql, callback = undefined) { + if (typeof sql !== 'string' + || (typeof callback !== 'function' && typeof callback !== 'undefined')) { + throw new TypeError('[node-odbc]: Incorrect function signature for call to statement.prepare({string}, {function}[optional]).'); + } + + // promise... + if (typeof callback === 'undefined') { + return new Promise((resolve, reject) => { + this.odbcStatement.prepare(sql, (error, result) => { + if (error) { + reject(new Error(error)); + } else { + resolve(result); + } + }); + }); + } + + // ...or callback + return this.odbcStatement.prepare(sql, callback); + } + + /** + * Bind parameters on the previously prepared SQL statement template. + * @param {*[]} parameters - The parameters to bind to the previously prepared SQL statement. + * @param {function} [callback] - The callback function that returns the result. If omitted, uses a Promise. + * @return {undefined|Promise} + */ + bind(parameters, callback = undefined) { + if (!Array.isArray(parameters) + || (typeof callback !== 'function' && typeof callback !== 'undefined')) { + throw new TypeError('[node-odbc]: Incorrect function signature for call to statement.bind({array}, {function}[optional]).'); + } + + // promise... + if (typeof callback === 'undefined') { + return new Promise((resolve, reject) => { + this.odbcStatement.bind(parameters, (error, result) => { + if (error) { + reject(new Error(error)); + } else { + resolve(result); + } + }); + }); + } + + // ...or callback + return this.odbcStatement.bind(parameters, callback); + } + + /** + * Executes the prepared SQL statement template with the bound parameters, returning the result. + * @param {function} [callback] - The callback function that returns the result. If omitted, uses a Promise. + */ + execute(callback = undefined) { + if (typeof callback !== 'function' && typeof callback !== 'undefined') { + throw new TypeError('[node-odbc]: Incorrect function signature for call to statement.execute({function}[optional]).'); + } + + if (typeof callback === 'undefined') { + return new Promise((resolve, reject) => { + this.odbcStatement.execute((error, result) => { + if (error) { + reject(new Error(error)); + } else { + resolve(result); + } + }); + }); + } + + // ...or callback + return this.odbcStatement.execute(callback); + } + + /** + * Closes the statement, deleting the prepared statement and freeing the handle, making further + * calls on the object invalid. + * @param {function} [callback] - The callback function that returns the result. If omitted, uses a Promise. + */ + close(callback = undefined) { + if (typeof callback !== 'function' && typeof callback !== 'undefined') { + throw new TypeError('[node-odbc]: Incorrect function signature for call to statement.close({function}[optional]).'); + } + + if (typeof callback === 'undefined') { + return new Promise((resolve, reject) => { + this.odbcStatement.close((error) => { + if (error) { + reject(new Error(error)); + } else { + resolve(); + } + }); + }); + } + + // ...or callback + return this.odbcStatement.close(callback); + } +} + +module.exports.Statement = Statement; diff --git a/lib/odbc.d.ts b/lib/odbc.d.ts deleted file mode 100644 index a3c3a28..0000000 --- a/lib/odbc.d.ts +++ /dev/null @@ -1,168 +0,0 @@ -declare function odbc(options?: odbc.DatabaseOptions): odbc.Database; - -declare namespace odbc { - export const SQL_CLOSE: number; - export const SQL_DROP: number; - export const SQL_UNBIND: number; - export const SQL_RESET_PARAMS: number; - export const SQL_DESTROY: number; - export const FETCH_ARRAY: number; - export const FETCH_OBJECT: number; - - export let debug: boolean; - - export interface ConnctionInfo { - [key: string]: string; - } - - export interface DatabaseOptions { - connectTimeout?: number; - loginTimeout?: number; - } - - export interface DescribeOptions { - database: string; - schema?: string; - table?: string; - type?: string; - column?: string; - } - - export interface SimpleQueue { - push(fn: Function): void; - next(): any; - maybeNext(): any; - } - - export interface ODBCTable { - TABLE_CAT: string; - TABLE_SCHEM: string; - TABLE_NAME: string; - TABLE_TYPE: string; - REMARKS: string; - } - - export interface ODBCColumn { - TABLE_CAT: string; - TABLE_SCHEM: string; - TABLE_NAME: string; - COLUMN_NAME: string; - DATA_TYPE: number; - TYPE_NAME: string; - COLUMN_SIZE: number; - BUFFER_LENGTH: number; - DECIMAL_DIGITS: number; - NUM_PREC_RADIX: number; - NULLABLE: number; - REMARKS: string; - COLUMN_DEF: string; - SQL_DATA_TYPE: number; - SQL_DATETIME_SUB: number; - CHAR_OCTET_LENGTH: number; - ORDINAL_POSITION: number; - IS_NULLABLE: string; - } - - export interface ODBCConnection { - connected: boolean; - connectTimeout: number; - loginTimeout: number; - open(connctionString: string | ConnctionInfo, cb: (err: any, result: any) => void): void; - openSync(connctionString: string | ConnctionInfo): void; - close(cb: (err: any) => void): void; - closeSync(): void; - createStatement(cb: (err: any, stmt: ODBCStatement) => void): void; - createStatementSync(): ODBCStatement; - query(sql: string, cb: (err: any, rows: ResultRow[], moreResultSets: any) => void): void; - query(sql: string, bindingParameters: any[], cb: (err: any, rows: ResultRow[], moreResultSets: any) => void): void; - querySync(sql: string, bindingParameters?: any[]): ResultRow[]; - beginTransaction(cb: (err: any) => void): void; - beginTransactionSync(): void; - endTransaction(rollback: boolean, cb: (err: any) => void): void; - endTransactionSync(rollback: boolean): void; - tables(catalog: string | null, schema: string | null, table: string | null, type: string | null, cb: (err: any, result: ODBCResult) => void): void; - columns(catalog: string | null, schema: string | null, table: string | null, column: string | null, cb: (err: any, result: ODBCResult) => void): void; - } - - export interface ResultRow { - [key: string]: any; - } - - export interface ODBCResult { - fetchMode: number; - fetchAll(cb: (err: any, data: ResultRow[]) => void): void; - fetchAllSync(): ResultRow[]; - fetch(cb: (err: any, data: ResultRow) => void): void; - fetchSync(): ResultRow; - closeSync(): void; - moreResultsSync(): any; - getColumnNamesSync(): string[]; - } - - export interface ODBCStatement { - queue: SimpleQueue; - execute(cb: (err: any, result: ODBCResult) => void): void; - execute(bindingParameters: any[], cb: (err: any, result: ODBCResult) => void): void; - executeSync(bindingParameters?: any[]): ODBCResult; - executeDirect(sql: string, cb: (err: any, result: ODBCResult) => void): void; - executeDirect(sql: string, bindingParameters: any[], cb: (err: any, result: ODBCResult) => void): void; - executeDirectSync(sql: string, bindingParameters?: any[]): ODBCResult; - executeNonQuery(cb: (err: any, result: number) => void): void; - executeNonQuery(bindingParameters: any[], cb: (err: any, result: number) => void): void; - executeNonQuerySync(bindingParameters?: any[]): number; - prepare(sql: string, cb: (err: any) => void): void; - prepareSync(sql: string): void; - bind(bindingParameters: any[], cb: (err: any) => void): void; - bindSync(bindingParameters: any[]): void; - closeSync(): void; - } - - export class Database { - constructor(options?: DatabaseOptions); - conn: ODBCConnection; - queue: SimpleQueue; - connected: boolean; - connectTimeout: number; - loginTimeout: number; - SQL_CLOSE: number; - SQL_DROP: number; - SQL_UNBIND: number; - SQL_RESET_PARAMS: number; - SQL_DESTROY: number; - FETCH_ARRAY: number; - FETCH_OBJECT: number; - open(connctionString: string | ConnctionInfo, cb: (err: any, result: any) => void): void; - openSync(connctionString: string | ConnctionInfo): void; - close(cb: (err: any) => void): void; - closeSync(): void; - query(sql: string, cb: (err: any, rows: ResultRow[], moreResultSets: any) => void): void; - query(sql: string, bindingParameters: any[], cb: (err: any, rows: ResultRow[], moreResultSets: any) => void): void; - querySync(sql: string, bindingParameters?: any[]): ResultRow[]; - queryResult(sql: string, cb: (err: any, result: ODBCResult) => void): void; - queryResult(sql: string, bindingParameters: any[], cb: (err: any, result: ODBCResult) => void): void; - queryResultSync(sql: string, bindingParameters?: any[]): ODBCResult; - prepare(sql: string, cb: (err: any, statement: ODBCStatement) => void): void; - prepareSync(sql: string): ODBCStatement; - beginTransaction(cb: (err: any) => void): void; - beginTransactionSync(): void; - endTransaction(rollback: boolean, cb: (err: any) => void): void; - endTransactionSync(rollback: boolean): void; - commitTransaction(cb: (err: any) => void): void; - commitTransactionSync(): void; - rollbackTransaction(cb: (err: any) => void): void; - rollbackTransactionSync(): void; - tables(catalog: string | null, schema: string | null, table: string | null, type: string | null, cb: (err: any, result: ODBCTable[]) => void): void; - columns(catalog: string | null, schema: string | null, table: string | null, column: string | null, cb: (err: any, result: ODBCColumn[]) => void): void; - describe(options: DescribeOptions, cb: (err: any, result: (ODBCTable & ODBCColumn)[]) => void): void; - } - - export class Pool { - constructor(options?: DatabaseOptions); - open(connctionString: string, cb: (err: any, db: Database) => void): void; - close(cb: (err: any) => void): void; - } - - export function open(connctionString: string | ConnctionInfo, cb: (err: any, result: any) => void): void; -} - -export = odbc; diff --git a/lib/odbc.js b/lib/odbc.js old mode 100644 new mode 100755 index 23a2de4..6caa8c0 --- a/lib/odbc.js +++ b/lib/odbc.js @@ -1,820 +1,9 @@ -/* - Copyright (c) 2013, Dan VerWeire - Copyright (c) 2010, Lee Smith - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ - -var odbc = require("bindings")("odbc_bindings") - , SimpleQueue = require("./simple-queue") - , util = require("util") - ; - -module.exports = function (options) { - return new Database(options); -} - -module.exports.debug = false; - -module.exports.Database = Database; -module.exports.ODBC = odbc.ODBC; -module.exports.ODBCConnection = odbc.ODBCConnection; -module.exports.ODBCStatement = odbc.ODBCStatement; -module.exports.ODBCResult = odbc.ODBCResult; -module.exports.loadODBCLibrary = odbc.loadODBCLibrary; - -module.exports.open = function (connectionString, options, cb) { - var db; - - if (typeof options === 'function') { - cb = options; - options = null; - } - - db = new Database(options); - - db.open(connectionString, function (err) { - cb(err, db); - }); -} - -function Database(options) { - var self = this; - - options = options || {}; - - if (odbc.loadODBCLibrary) { - if (!options.library && !module.exports.library) { - throw new Error("You must specify a library when complied with dynodbc, " - + "otherwise this jams will segfault."); - } - - if (!odbc.loadODBCLibrary(options.library || module.exports.library)) { - throw new Error("Could not load library. You may need to specify full " - + "path."); - } - } - - self.odbc = (options.odbc) ? options.odbc : new odbc.ODBC(); - self.odbc.domain = process.domain; - self.queue = new SimpleQueue(); - self.fetchMode = options.fetchMode || null; - self.connected = false; - self.connectTimeout = (options.hasOwnProperty('connectTimeout')) - ? options.connectTimeout - : null - ; - self.loginTimeout = (options.hasOwnProperty('loginTimeout')) - ? options.loginTimeout - : null - ; -} - -//Expose constants -Object.keys(odbc.ODBC).forEach(function (key) { - if (typeof odbc.ODBC[key] !== "function") { - //On the database prototype - Database.prototype[key] = odbc.ODBC[key]; - - //On the exports - module.exports[key] = odbc.ODBC[key]; - } -}); - -Database.prototype.open = function (connectionString, cb) { - var self = this; - - if (typeof(connectionString) == "object") { - var obj = connectionString; - connectionString = ""; - - Object.keys(obj).forEach(function (key) { - connectionString += key + "=" + obj[key] + ";"; - }); - } - - self.odbc.createConnection(function (err, conn) { - if (err) return cb(err); - - self.conn = conn; - self.conn.domain = process.domain; - - if (self.connectTimeout || self.connectTimeout === 0) { - self.conn.connectTimeout = self.connectTimeout; - } - - if (self.loginTimeout || self.loginTimeout === 0) { - self.conn.loginTimeout = self.loginTimeout; - } - - self.conn.open(connectionString, function (err, result) { - if (err) return cb(err); - - self.connected = true; - - return cb(err, result); - }); - }); -}; - -Database.prototype.openSync = function (connectionString) { - var self = this; - - self.conn = self.odbc.createConnectionSync(); - - if (self.connectTimeout || self.connectTimeout === 0) { - self.conn.connectTimeout = self.connectTimeout; - } - - if (self.loginTimeout || self.loginTimeout === 0) { - self.conn.loginTimeout = self.loginTimeout; - } - - if (typeof(connectionString) == "object") { - var obj = connectionString; - connectionString = ""; - - Object.keys(obj).forEach(function (key) { - connectionString += key + "=" + obj[key] + ";"; - }); - } - - var result = self.conn.openSync(connectionString); - - if (result) { - self.connected = true; - } - - return result; -} - -Database.prototype.close = function (cb) { - var self = this; - - self.queue.push(function (next) { - //check to see if conn still exists (it's deleted when closed) - if (!self.conn) { - if (cb) cb(null); - return next(); - } - - self.conn.close(function (err) { - self.connected = false; - delete self.conn; - - if (cb) cb(err); - return next(); - }); - }); -}; - -Database.prototype.closeSync = function () { - var self = this; - - var result = self.conn.closeSync(); - - self.connected = false; - delete self.conn; - - return result -} - -Database.prototype.query = function (sql, params, cb) { - var self = this; - - if (typeof(params) == 'function') { - cb = params; - params = null; - } - - if (!self.connected) { - return cb({ message : "Connection not open."}, [], false); - } - - self.queue.push(function (next) { - function cbQuery (initialErr, result) { - fetchMore(); - - function fetchMore() { - if (self.fetchMode) { - result.fetchMode = self.fetchMode; - } - - result.fetchAll(function (err, data) { - var moreResults, moreResultsError = null; - - try { - moreResults = result.moreResultsSync(); - } - catch (e) { - moreResultsError = e; - //force to check for more results - moreResults = true; - } - - //close the result before calling back - //if there are not more result sets - if (!moreResults) { - result.closeSync(); - } - - cb(err || initialErr, data, moreResults); - initialErr = null; - - while (moreResultsError) { - try { - moreResults = result.moreResultsSync(); - cb(moreResultsError, [], moreResults); // No errors left - still need to report the - // last one, though - moreResultsError = null; - } catch (e) { - cb(moreResultsError, [], moreResults); - moreResultsError = e; - } - } - - if (moreResults) { - return fetchMore(); - } - else { - return next(); - } - }); - } - } - - if (params) { - self.conn.query(sql, params, cbQuery); - } - else { - self.conn.query(sql, cbQuery); - } - }); -}; - -Database.prototype.queryResult = function (sql, params, cb) { - var self = this; - - if (typeof(params) == 'function') { - cb = params; - params = null; - } - - if (!self.connected) { - return cb({ message : "Connection not open."}, null); - } - - self.queue.push(function (next) { - //ODBCConnection.query() is the fastest-path querying mechanism. - if (params) { - self.conn.query(sql, params, cbQuery); - } - else { - self.conn.query(sql, cbQuery); - } - - function cbQuery (err, result) { - if (err) { - cb(err, null); - - return next(); - } - - if (self.fetchMode) { - result.fetchMode = self.fetchMode; - } - - cb(err, result); - - return next(); - } - }); -}; - -Database.prototype.queryResultSync = function (sql, params) { - var self = this, result; - - if (!self.connected) { - throw ({ message : "Connection not open."}); - } - - if (params) { - result = self.conn.querySync(sql, params); - } - else { - result = self.conn.querySync(sql); - } - - if (self.fetchMode) { - result.fetchMode = self.fetchMode; - } - - return result; -}; - -Database.prototype.querySync = function (sql, params) { - var self = this, result; - - if (!self.connected) { - throw ({ message : "Connection not open."}); - } - - if (params) { - result = self.conn.querySync(sql, params); - } - else { - result = self.conn.querySync(sql); - } - - if (self.fetchMode) { - result.fetchMode = self.fetchMode; - } - - var data = result.fetchAllSync(); - - result.closeSync(); - - return data; -}; - -Database.prototype.beginTransaction = function (cb) { - var self = this; - - self.conn.beginTransaction(cb); - - return self; -}; - -Database.prototype.endTransaction = function (rollback, cb) { - var self = this; - - self.conn.endTransaction(rollback, cb); - - return self; -}; - -Database.prototype.commitTransaction = function (cb) { - var self = this; - - self.conn.endTransaction(false, cb); //don't rollback - - return self; -}; - -Database.prototype.rollbackTransaction = function (cb) { - var self = this; - - self.conn.endTransaction(true, cb); //rollback - - return self; -}; - -Database.prototype.beginTransactionSync = function () { - var self = this; - - self.conn.beginTransactionSync(); - - return self; -}; - -Database.prototype.endTransactionSync = function (rollback) { - var self = this; - - self.conn.endTransactionSync(rollback); - - return self; -}; - -Database.prototype.commitTransactionSync = function () { - var self = this; - - self.conn.endTransactionSync(false); //don't rollback - - return self; -}; - -Database.prototype.rollbackTransactionSync = function () { - var self = this; - - self.conn.endTransactionSync(true); //rollback - - return self; -}; - -Database.prototype.columns = function(catalog, schema, table, column, callback) { - var self = this; - if (!self.queue) self.queue = []; - - callback = callback || arguments[arguments.length - 1]; - - self.queue.push(function (next) { - self.conn.columns(catalog, schema, table, column, function (err, result) { - if (err) return callback(err, [], false); - - result.fetchAll(function (err, data) { - result.closeSync(); - - callback(err, data); - - return next(); - }); - }); - }); -}; - -Database.prototype.tables = function(catalog, schema, table, type, callback) { - var self = this; - if (!self.queue) self.queue = []; - - callback = callback || arguments[arguments.length - 1]; - - self.queue.push(function (next) { - self.conn.tables(catalog, schema, table, type, function (err, result) { - if (err) return callback(err, [], false); - - result.fetchAll(function (err, data) { - result.closeSync(); - - callback(err, data); - - return next(); - }); - }); - }); -}; - -Database.prototype.describe = function(obj, callback) { - var self = this; - - if (typeof(callback) != "function") { - throw({ - error : "[node-odbc] Missing Arguments", - message : "You must specify a callback function in order for the describe method to work." - }); - - return false; - } - - if (typeof(obj) != "object") { - callback({ - error : "[node-odbc] Missing Arguments", - message : "You must pass an object as argument 0 if you want anything productive to happen in the describe method." - }, []); - - return false; - } - - if (!obj.database) { - callback({ - error : "[node-odbc] Missing Arguments", - message : "The object you passed did not contain a database property. This is required for the describe method to work." - }, []); - - return false; - } - - //set some defaults if they weren't passed - obj.schema = obj.schema || "%"; - obj.type = obj.type || "table"; - - if (obj.table && obj.column) { - //get the column details - self.columns(obj.database, obj.schema, obj.table, obj.column, callback); - } - else if (obj.table) { - //get the columns in the table - self.columns(obj.database, obj.schema, obj.table, "%", callback); - } - else { - //get the tables in the database - self.tables(obj.database, obj.schema, null, obj.type || "table", callback); - } -}; - -Database.prototype.prepare = function (sql, cb) { - var self = this; - - self.conn.createStatement(function (err, stmt) { - if (err) return cb(err); - - stmt.queue = new SimpleQueue(); - - stmt.prepare(sql, function (err) { - if (err) return cb(err); - - return cb(null, stmt); - }); - }); -} - -Database.prototype.prepareSync = function (sql, cb) { - var self = this; - - var stmt = self.conn.createStatementSync(); - - stmt.queue = new SimpleQueue(); - - stmt.prepareSync(sql); - - return stmt; -} - -//Proxy all of the asynchronous functions so that they are queued -odbc.ODBCStatement.prototype._execute = odbc.ODBCStatement.prototype.execute; -odbc.ODBCStatement.prototype._executeDirect = odbc.ODBCStatement.prototype.executeDirect; -odbc.ODBCStatement.prototype._executeNonQuery = odbc.ODBCStatement.prototype.executeNonQuery; -odbc.ODBCStatement.prototype._prepare = odbc.ODBCStatement.prototype.prepare; -odbc.ODBCStatement.prototype._bind = odbc.ODBCStatement.prototype.bind; - -odbc.ODBCStatement.prototype.execute = function (params, cb) { - var self = this; - - self.queue = self.queue || new SimpleQueue(); - - if (!cb) { - cb = params; - params = null; - } - - self.queue.push(function (next) { - //If params were passed to this function, then bind them and - //then execute. - if (params) { - self._bind(params, function (err) { - if (err) { - return cb(err); - } - - self._execute(function (err, result) { - cb(err, result); - - return next(); - }); - }); - } - //Otherwise execute and pop the next bind call - else { - self._execute(function (err, result) { - cb(err, result); - - //NOTE: We only execute the next queued bind call after - // we have called execute() or executeNonQuery(). This ensures - // that we don't call a bind() a bunch of times without ever - // actually executing that bind. Not - self.bindQueue && self.bindQueue.next(); - - return next(); - }); - } - }); -}; - -odbc.ODBCStatement.prototype.executeDirect = function (sql, cb) { - var self = this; - - self.queue = self.queue || new SimpleQueue(); - - self.queue.push(function (next) { - self._executeDirect(sql, function (err, result) { - cb(err, result); - - return next(); - }); - }); -}; - -odbc.ODBCStatement.prototype.executeNonQuery = function (params, cb) { - var self = this; - - self.queue = self.queue || new SimpleQueue(); - - if (!cb) { - cb = params; - params = null; - } - - self.queue.push(function (next) { - //If params were passed to this function, then bind them and - //then executeNonQuery. - if (params) { - self._bind(params, function (err) { - if (err) { - return cb(err); - } - - self._executeNonQuery(function (err, result) { - cb(err, result); - - return next(); - }); - }); - } - //Otherwise executeNonQuery and pop the next bind call - else { - self._executeNonQuery(function (err, result) { - cb(err, result); - - //NOTE: We only execute the next queued bind call after - // we have called execute() or executeNonQuery(). This ensures - // that we don't call a bind() a bunch of times without ever - // actually executing that bind. Not - self.bindQueue && self.bindQueue.next(); - - return next(); - }); - } - }); -}; - -odbc.ODBCStatement.prototype.prepare = function (sql, cb) { - var self = this; - - self.queue = self.queue || new SimpleQueue(); - - self.queue.push(function (next) { - self._prepare(sql, function (err) { - cb(err); - - return next(); - }); - }); -}; - -odbc.ODBCStatement.prototype.bind = function (ary, cb) { - var self = this; - - self.bindQueue = self.bindQueue || new SimpleQueue(); - - self.bindQueue.push(function () { - self._bind(ary, function (err) { - cb(err); - - //NOTE: we do not call next() here because - //we want to pop the next bind call only - //after the next execute call - }); - }); -}; - - -//proxy the ODBCResult fetch function so that it is queued -odbc.ODBCResult.prototype._fetch = odbc.ODBCResult.prototype.fetch; - -odbc.ODBCResult.prototype.fetch = function (cb) { - var self = this; - - self.queue = self.queue || new SimpleQueue(); - - self.queue.push(function (next) { - self._fetch(function (err, data) { - if (cb) cb(err, data); - - return next(); - }); - }); -}; - -module.exports.Pool = Pool; - -Pool.count = 0; - -function Pool (options) { - var self = this; - self.index = Pool.count++; - self.availablePool = {}; - self.usedPool = {}; - self.odbc = new odbc.ODBC(); - self.options = options || {} - self.options.odbc = self.odbc; -} - -Pool.prototype.open = function (connectionString, callback) { - var self = this - , db - ; - - //check to see if we already have a connection for this connection string - if (self.availablePool[connectionString] && self.availablePool[connectionString].length) { - db = self.availablePool[connectionString].shift() - self.usedPool[connectionString].push(db) - - callback(null, db); - } - else { - db = new Database(self.options); - db.realClose = db.close; - - db.close = function (cb) { - //call back early, we can do the rest of this stuff after the client thinks - //that the connection is closed. - cb(null); - - - //close the connection for real - //this will kill any temp tables or anything that might be a security issue. - db.realClose(function () { - //remove this db from the usedPool - self.usedPool[connectionString].splice(self.usedPool[connectionString].indexOf(db), 1); - - //re-open the connection using the connection string - db.open(connectionString, function (error) { - if (error) { - console.error(error); - return; - } - - //add this clean connection to the connection pool - self.availablePool[connectionString] = self.availablePool[connectionString] || []; - self.availablePool[connectionString].push(db); - exports.debug && console.dir(self); - }); - }); - }; - - db.open(connectionString, function (error) { - exports.debug && console.log("odbc.js : pool[%s] : pool.db.open callback()", self.index); - - self.usedPool[connectionString] = self.usedPool[connectionString] || []; - self.usedPool[connectionString].push(db); - - callback(error, db); - }); - } -}; - -Pool.prototype.close = function (callback) { - var self = this - , required = 0 - , received = 0 - , connections - , key - , x - ; - - exports.debug && console.log("odbc.js : pool[%s] : pool.close()", self.index); - //we set a timeout because a previous db.close() may - //have caused the a behind the scenes db.open() to prepare - //a new connection - setTimeout(function () { - //merge the available pool and the usedPool - var pools = {}; - - for (key in self.availablePool) { - pools[key] = (pools[key] || []).concat(self.availablePool[key]); - } - - for (key in self.usedPool) { - pools[key] = (pools[key] || []).concat(self.usedPool[key]); - } - - exports.debug && console.log("odbc.js : pool[%s] : pool.close() - setTimeout() callback", self.index); - exports.debug && console.dir(pools); - - if (Object.keys(pools).length == 0) { - return callback(); - } - - for (key in pools) { - connections = pools[key]; - required += connections.length; - - exports.debug && console.log("odbc.js : pool[%s] : pool.close() - processing pools %s - connections: %s", self.index, key, connections.length); - - for (x = 0 ; x < connections.length; x ++) { - (function (x) { - //call the realClose method to avoid - //automatically re-opening the connection - exports.debug && console.log("odbc.js : pool[%s] : pool.close() - calling realClose() for connection #%s", self.index, x); - - connections[x].realClose(function () { - exports.debug && console.log("odbc.js : pool[%s] : pool.close() - realClose() callback for connection #%s", self.index, x); - received += 1; - - if (received === required) { - callback(); - - //prevent mem leaks - self = null; - connections = null; - required = null; - received = null; - key = null; - - return; - } - }); - })(x); - } - } - }, 2000); +const { Connection } = require('./Connection'); +const { Pool } = require('./Pool'); +const legacy = require('./legacy/legacy'); // v1.x behavior + +module.exports = { + Pool, + Connection, + legacy, }; diff --git a/lib/simple-queue.js b/lib/simple-queue.js deleted file mode 100644 index a6f784e..0000000 --- a/lib/simple-queue.js +++ /dev/null @@ -1,40 +0,0 @@ -module.exports = SimpleQueue; - -function SimpleQueue() { - var self = this; - - self.fifo = []; - self.executing = false; -} - -SimpleQueue.prototype.push = function (fn) { - var self = this; - - self.fifo.push(fn); - - self.maybeNext(); -}; - -SimpleQueue.prototype.maybeNext = function () { - var self = this; - - if (!self.executing) { - self.next(); - } -}; - -SimpleQueue.prototype.next = function () { - var self = this; - - if (self.fifo.length) { - var fn = self.fifo.shift(); - - self.executing = true; - - fn(function () { - self.executing = false; - - self.maybeNext(); - }); - } -}; \ No newline at end of file diff --git a/package-lock.json b/package-lock.json old mode 100644 new mode 100755 index 17417d4..7b37094 --- a/package-lock.json +++ b/package-lock.json @@ -1,18 +1,1467 @@ { "name": "odbc", - "version": "1.4.5", + "version": "2.0.0-beta.0", "lockfileVersion": 1, "requires": true, "dependencies": { - "bindings": { + "@babel/code-frame": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.0.0.tgz", + "integrity": "sha512-OfC2uemaknXr87bdLUkWog7nYuliM9Ij5HUcajsVcMCpQrcLmtxRbVFTIqmcSkSeYRBFBRxs2FiUqFJDLdiebA==", + "dev": true, + "requires": { + "@babel/highlight": "^7.0.0" + } + }, + "@babel/highlight": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.0.0.tgz", + "integrity": "sha512-UFMC4ZeFC48Tpvj7C8UgLvtkaUuovQX+5xNWrsIoMG8o2z+XFKjKaN9iVmS84dPwVN00W4wPmqvYoZF3EGAsfw==", + "dev": true, + "requires": { + "chalk": "^2.0.0", + "esutils": "^2.0.2", + "js-tokens": "^4.0.0" + } + }, + "acorn": { + "version": "6.0.7", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.0.7.tgz", + "integrity": "sha512-HNJNgE60C9eOTgn974Tlp3dpLZdUr+SoxxDwPaY9J/kDNOLQTkaDgwBUXAF4SSsrAwD9RpdxuHK/EbuF+W9Ahw==", + "dev": true + }, + "acorn-jsx": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.0.1.tgz", + "integrity": "sha512-HJ7CfNHrfJLlNTzIEUTj43LNWGkqpRLxm3YjAlcD0ACydk9XynzYsCBHxut+iqt+1aBXkx9UP/w/ZqMr13XIzg==", + "dev": true + }, + "ajv": { + "version": "6.8.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.8.1.tgz", + "integrity": "sha512-eqxCp82P+JfqL683wwsL73XmFs1eG6qjw+RD3YHx+Jll1r0jNd4dh8QG9NYAeNGA/hnZjeEDgtTskgJULbxpWQ==", + "dev": true, + "requires": { + "fast-deep-equal": "^2.0.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "ansi-escapes": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.2.0.tgz", + "integrity": "sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==", + "dev": true + }, + "ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "dev": true + }, + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "astral-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz", + "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==", + "dev": true + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", + "dev": true + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true + }, + "callsites": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.0.0.tgz", + "integrity": "sha512-tWnkwu9YEq2uzlBDI4RcLn8jrFvF9AOi8PxDNU3hZZjJcjkcRAq3vCI+vZcg1SuxISDYe86k9VZFwAxDiJGoAw==", + "dev": true + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "chardet": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", + "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", + "dev": true + }, + "circular-json": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/circular-json/-/circular-json-0.3.3.tgz", + "integrity": "sha512-UZK3NBx2Mca+b5LsG7bY183pHWt5Y1xts4P3Pz7ENTwGVnJOUWbRb3ocjvX7hx9tq/yTAdclXm9sZ38gNuem4A==", + "dev": true + }, + "cli-cursor": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz", + "integrity": "sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU=", + "dev": true, + "requires": { + "restore-cursor": "^2.0.0" + } + }, + "cli-width": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-2.2.0.tgz", + "integrity": "sha1-/xnt6Kml5XkyQUewwR8PvLq+1jk=", + "dev": true + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", + "dev": true + }, + "commander": { + "version": "2.15.1", + "resolved": "http://registry.npmjs.org/commander/-/commander-2.15.1.tgz", + "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag==", + "dev": true + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "contains-path": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/contains-path/-/contains-path-0.1.0.tgz", + "integrity": "sha1-/ozxhP9mcLa67wGp1IYaXL7EEgo=", + "dev": true + }, + "cross-spawn": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", + "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "dev": true, + "requires": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + } + }, + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, + "deep-is": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", + "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", + "dev": true + }, + "define-properties": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", + "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", + "dev": true, + "requires": { + "object-keys": "^1.0.12" + } + }, + "diff": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz", + "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==", + "dev": true + }, + "doctrine": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", + "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", + "dev": true, + "requires": { + "esutils": "^2.0.2" + } + }, + "dotenv": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-6.2.0.tgz", + "integrity": "sha512-HygQCKUBSFl8wKQZBSemMywRWcEDNidvNbjGVyZu3nbZ8qq9ubiPoGLMdRDpfSrpkkm9BXYFkpKxxFX38o/76w==" + }, + "error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "requires": { + "is-arrayish": "^0.2.1" + } + }, + "es-abstract": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.13.0.tgz", + "integrity": "sha512-vDZfg/ykNxQVwup/8E1BZhVzFfBxs9NqMzGcvIJrqg5k2/5Za2bWo40dK2J1pgLngZ7c+Shh8lwYtLGyrwPutg==", + "dev": true, + "requires": { + "es-to-primitive": "^1.2.0", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "is-callable": "^1.1.4", + "is-regex": "^1.0.4", + "object-keys": "^1.0.12" + } + }, + "es-to-primitive": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.0.tgz", + "integrity": "sha512-qZryBOJjV//LaxLTV6UC//WewneB3LcXOL9NP++ozKVXsIIIpm/2c13UDiD9Jp2eThsecw9m3jPqDwTyobcdbg==", + "dev": true, + "requires": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + } + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true + }, + "eslint": { + "version": "5.13.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-5.13.0.tgz", + "integrity": "sha512-nqD5WQMisciZC5EHZowejLKQjWGuFS5c70fxqSKlnDME+oz9zmE8KTlX+lHSg+/5wsC/kf9Q9eMkC8qS3oM2fg==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "ajv": "^6.5.3", + "chalk": "^2.1.0", + "cross-spawn": "^6.0.5", + "debug": "^4.0.1", + "doctrine": "^2.1.0", + "eslint-scope": "^4.0.0", + "eslint-utils": "^1.3.1", + "eslint-visitor-keys": "^1.0.0", + "espree": "^5.0.0", + "esquery": "^1.0.1", + "esutils": "^2.0.2", + "file-entry-cache": "^2.0.0", + "functional-red-black-tree": "^1.0.1", + "glob": "^7.1.2", + "globals": "^11.7.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "inquirer": "^6.1.0", + "js-yaml": "^3.12.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.3.0", + "lodash": "^4.17.5", + "minimatch": "^3.0.4", + "mkdirp": "^0.5.1", + "natural-compare": "^1.4.0", + "optionator": "^0.8.2", + "path-is-inside": "^1.0.2", + "progress": "^2.0.0", + "regexpp": "^2.0.1", + "semver": "^5.5.1", + "strip-ansi": "^4.0.0", + "strip-json-comments": "^2.0.1", + "table": "^5.0.2", + "text-table": "^0.2.0" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + }, + "ms": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", + "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", + "dev": true + } + } + }, + "eslint-config-airbnb-base": { + "version": "13.1.0", + "resolved": "https://registry.npmjs.org/eslint-config-airbnb-base/-/eslint-config-airbnb-base-13.1.0.tgz", + "integrity": "sha512-XWwQtf3U3zIoKO1BbHh6aUhJZQweOwSt4c2JrPDg9FP3Ltv3+YfEv7jIDB8275tVnO/qOHbfuYg3kzw6Je7uWw==", + "dev": true, + "requires": { + "eslint-restricted-globals": "^0.1.1", + "object.assign": "^4.1.0", + "object.entries": "^1.0.4" + } + }, + "eslint-import-resolver-node": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.2.tgz", + "integrity": "sha512-sfmTqJfPSizWu4aymbPr4Iidp5yKm8yDkHp+Ir3YiTHiiDfxh69mOUsmiqW6RZ9zRXFaF64GtYmN7e+8GHBv6Q==", + "dev": true, + "requires": { + "debug": "^2.6.9", + "resolve": "^1.5.0" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + } + } + }, + "eslint-module-utils": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.3.0.tgz", + "integrity": "sha512-lmDJgeOOjk8hObTysjqH7wyMi+nsHwwvfBykwfhjR1LNdd7C2uFJBvx4OpWYpXOw4df1yE1cDEVd1yLHitk34w==", + "dev": true, + "requires": { + "debug": "^2.6.8", + "pkg-dir": "^2.0.0" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + } + } + }, + "eslint-plugin-import": { + "version": "2.16.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.16.0.tgz", + "integrity": "sha512-z6oqWlf1x5GkHIFgrSvtmudnqM6Q60KM4KvpWi5ubonMjycLjndvd5+8VAZIsTlHC03djdgJuyKG6XO577px6A==", + "dev": true, + "requires": { + "contains-path": "^0.1.0", + "debug": "^2.6.9", + "doctrine": "1.5.0", + "eslint-import-resolver-node": "^0.3.2", + "eslint-module-utils": "^2.3.0", + "has": "^1.0.3", + "lodash": "^4.17.11", + "minimatch": "^3.0.4", + "read-pkg-up": "^2.0.0", + "resolve": "^1.9.0" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, + "doctrine": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-1.5.0.tgz", + "integrity": "sha1-N53Ocw9hZvds76TmcHoVmwLFpvo=", + "dev": true, + "requires": { + "esutils": "^2.0.2", + "isarray": "^1.0.0" + } + } + } + }, + "eslint-restricted-globals": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/eslint-restricted-globals/-/eslint-restricted-globals-0.1.1.tgz", + "integrity": "sha1-NfDVy8ZMLj7WLpO0saevBbp+1Nc=", + "dev": true + }, + "eslint-scope": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.0.tgz", + "integrity": "sha512-1G6UTDi7Jc1ELFwnR58HV4fK9OQK4S6N985f166xqXxpjU6plxFISJa2Ba9KCQuFa8RCnj/lSFJbHo7UFDBnUA==", + "dev": true, + "requires": { + "esrecurse": "^4.1.0", + "estraverse": "^4.1.1" + } + }, + "eslint-utils": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.3.1.tgz", + "integrity": "sha512-Z7YjnIldX+2XMcjr7ZkgEsOj/bREONV60qYeB/bjMAqqqZ4zxKyWX+BOUkdmRmA9riiIPVvo5x86m5elviOk0Q==", + "dev": true + }, + "eslint-visitor-keys": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.0.0.tgz", + "integrity": "sha512-qzm/XxIbxm/FHyH341ZrbnMUpe+5Bocte9xkmFMzPMjRaZMcXww+MpBptFvtU+79L362nqiLhekCxCxDPaUMBQ==", + "dev": true + }, + "espree": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-5.0.0.tgz", + "integrity": "sha512-1MpUfwsdS9MMoN7ZXqAr9e9UKdVHDcvrJpyx7mm1WuQlx/ygErEQBzgi5Nh5qBHIoYweprhtMkTCb9GhcAIcsA==", + "dev": true, + "requires": { + "acorn": "^6.0.2", + "acorn-jsx": "^5.0.0", + "eslint-visitor-keys": "^1.0.0" + } + }, + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true + }, + "esquery": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.0.1.tgz", + "integrity": "sha512-SmiyZ5zIWH9VM+SRUReLS5Q8a7GxtRdxEBVZpm98rJM7Sb+A9DVCndXfkeFUd3byderg+EbDkfnevfCwynWaNA==", + "dev": true, + "requires": { + "estraverse": "^4.0.0" + } + }, + "esrecurse": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz", + "integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==", + "dev": true, + "requires": { + "estraverse": "^4.1.0" + } + }, + "estraverse": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.2.0.tgz", + "integrity": "sha1-De4/7TH81GlhjOc0IJn8GvoL2xM=", + "dev": true + }, + "esutils": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.2.tgz", + "integrity": "sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs=", + "dev": true + }, + "external-editor": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.0.3.tgz", + "integrity": "sha512-bn71H9+qWoOQKyZDo25mOMVpSmXROAsTJVVVYzrrtol3d4y+AsKjf4Iwl2Q+IuT0kFSQ1qo166UuIwqYq7mGnA==", + "dev": true, + "requires": { + "chardet": "^0.7.0", + "iconv-lite": "^0.4.24", + "tmp": "^0.0.33" + } + }, + "fast-deep-equal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz", + "integrity": "sha1-ewUhjd+WZ79/Nwv3/bLLFf3Qqkk=", + "dev": true + }, + "fast-json-stable-stringify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz", + "integrity": "sha1-1RQsDK7msRifh9OnYREGT4bIu/I=", + "dev": true + }, + "fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", + "dev": true + }, + "figures": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz", + "integrity": "sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI=", + "dev": true, + "requires": { + "escape-string-regexp": "^1.0.5" + } + }, + "file-entry-cache": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-2.0.0.tgz", + "integrity": "sha1-w5KZDD5oR4PYOLjISkXYoEhFg2E=", + "dev": true, + "requires": { + "flat-cache": "^1.2.1", + "object-assign": "^4.0.1" + } + }, + "find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "dev": true, + "requires": { + "locate-path": "^2.0.0" + } + }, + "flat-cache": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-1.3.4.tgz", + "integrity": "sha512-VwyB3Lkgacfik2vhqR4uv2rvebqmDvFu4jlN/C1RzWoJEo8I7z4Q404oiqYCkq41mni8EzQnm95emU9seckwtg==", + "dev": true, + "requires": { + "circular-json": "^0.3.1", + "graceful-fs": "^4.1.2", + "rimraf": "~2.6.2", + "write": "^0.2.1" + } + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "dev": true + }, + "functional-red-black-tree": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", + "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=", + "dev": true + }, + "glob": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", + "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "globals": { + "version": "11.10.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.10.0.tgz", + "integrity": "sha512-0GZF1RiPKU97IHUO5TORo9w1PwrH/NBPl+fS7oMLdaTRiYmYbwK4NWoZWrAdd0/abG9R2BU+OiwyQpTpE6pdfQ==", + "dev": true + }, + "graceful-fs": { + "version": "4.1.15", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.15.tgz", + "integrity": "sha512-6uHUhOPEBgQ24HM+r6b/QwWfZq+yiFcipKFrOFiBEnWdy5sdzYoi+pJeQaPI5qOLRFqWmAXUPQNsielzdLoecA==", + "dev": true + }, + "growl": { + "version": "1.10.5", + "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", + "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", + "dev": true + }, + "has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dev": true, + "requires": { + "function-bind": "^1.1.1" + } + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "dev": true + }, + "has-symbols": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.0.tgz", + "integrity": "sha1-uhqPGvKg/DllD1yFA2dwQSIGO0Q=", + "dev": true + }, + "he": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/he/-/he-1.1.1.tgz", + "integrity": "sha1-k0EP0hsAlzUVH4howvJx80J+I/0=", + "dev": true + }, + "hosted-git-info": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.7.1.tgz", + "integrity": "sha512-7T/BxH19zbcCTa8XkMlbK5lTo1WtgkFi3GvdWEyNuc4Vex7/9Dqbnpsf4JMydcfj9HCg4zUWFTL3Za6lapg5/w==", + "dev": true + }, + "iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dev": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "ignore": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", + "dev": true + }, + "import-fresh": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.0.0.tgz", + "integrity": "sha512-pOnA9tfM3Uwics+SaBLCNyZZZbK+4PTu0OPZtLlMIrv17EdBoC15S9Kn8ckJ9TZTyKb3ywNE5y1yeDxxGA7nTQ==", + "dev": true, + "requires": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + } + }, + "imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", + "dev": true + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=", + "dev": true + }, + "inquirer": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-6.2.2.tgz", + "integrity": "sha512-Z2rREiXA6cHRR9KBOarR3WuLlFzlIfAEIiB45ll5SSadMg7WqOh1MKEjjndfuH5ewXdixWCxqnVfGOQzPeiztA==", + "dev": true, + "requires": { + "ansi-escapes": "^3.2.0", + "chalk": "^2.4.2", + "cli-cursor": "^2.1.0", + "cli-width": "^2.0.0", + "external-editor": "^3.0.3", + "figures": "^2.0.0", + "lodash": "^4.17.11", + "mute-stream": "0.0.7", + "run-async": "^2.2.0", + "rxjs": "^6.4.0", + "string-width": "^2.1.0", + "strip-ansi": "^5.0.0", + "through": "^2.3.6" + }, + "dependencies": { + "ansi-regex": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.0.0.tgz", + "integrity": "sha512-iB5Dda8t/UqpPI/IjsejXu5jOGDrzn41wJyljwPH65VCIbk6+1BzFIMJGFwTNrYXT1CrD+B4l19U7awiQ8rk7w==", + "dev": true + }, + "strip-ansi": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.0.0.tgz", + "integrity": "sha512-Uu7gQyZI7J7gn5qLn1Np3G9vcYGTVqB+lFTytnDJv83dd8T22aGH451P3jueT2/QemInJDfxHB5Tde5OzgG1Ow==", + "dev": true, + "requires": { + "ansi-regex": "^4.0.0" + } + } + } + }, + "is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", + "dev": true + }, + "is-callable": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.4.tgz", + "integrity": "sha512-r5p9sxJjYnArLjObpjA4xu5EKI3CuKHkJXMhT7kwbpUyIFD1n5PMAsoPvWnvtZiNz7LjkYDRZhd7FlI0eMijEA==", + "dev": true + }, + "is-date-object": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.1.tgz", + "integrity": "sha1-mqIOtq7rv/d/vTPnTKAbM1gdOhY=", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true + }, + "is-promise": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.1.0.tgz", + "integrity": "sha1-eaKp7OfwlugPNtKy87wWwf9L8/o=", + "dev": true + }, + "is-regex": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.0.4.tgz", + "integrity": "sha1-VRdIm1RwkbCTDglWVM7SXul+lJE=", + "dev": true, + "requires": { + "has": "^1.0.1" + } + }, + "is-symbol": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.2.tgz", + "integrity": "sha512-HS8bZ9ox60yCJLH9snBpIwv9pYUAkcuLhSA1oero1UB5y9aiQpRA8y2ex945AOtCZL1lJDeIk3G5LthswI46Lw==", + "dev": true, + "requires": { + "has-symbols": "^1.0.0" + } + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "js-yaml": { + "version": "3.12.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.1.tgz", + "integrity": "sha512-um46hB9wNOKlwkHgiuyEVAybXBjwFUV0Z/RaHJblRd9DXltue9FTYvzCr9ErQrK9Adz5MU4gHWVaNUfdmrC8qA==", + "dev": true, + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", + "dev": true + }, + "levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "dev": true, + "requires": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + } + }, + "load-json-file": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-2.0.0.tgz", + "integrity": "sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg=", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "parse-json": "^2.2.0", + "pify": "^2.0.0", + "strip-bom": "^3.0.0" + } + }, + "locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "dev": true, + "requires": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + } + }, + "lodash": { + "version": "4.17.11", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz", + "integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==", + "dev": true + }, + "mimic-fn": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", + "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==", + "dev": true + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "0.0.8", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=", + "dev": true + }, + "mkdirp": { + "version": "0.5.1", + "resolved": "http://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", + "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "dev": true, + "requires": { + "minimist": "0.0.8" + } + }, + "mocha": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-5.2.0.tgz", + "integrity": "sha512-2IUgKDhc3J7Uug+FxMXuqIyYzH7gJjXECKe/w43IGgQHTSj3InJi+yAA7T24L9bQMRKiUEHxEX37G5JpVUGLcQ==", + "dev": true, + "requires": { + "browser-stdout": "1.3.1", + "commander": "2.15.1", + "debug": "3.1.0", + "diff": "3.5.0", + "escape-string-regexp": "1.0.5", + "glob": "7.1.2", + "growl": "1.10.5", + "he": "1.1.1", + "minimatch": "3.0.4", + "mkdirp": "0.5.1", + "supports-color": "5.4.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, + "mute-stream": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.7.tgz", + "integrity": "sha1-MHXOk7whuPq0PhvE2n6BFe0ee6s=", + "dev": true + }, + "natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", + "dev": true + }, + "nice-try": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", + "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==", + "dev": true + }, + "node-addon-api": { "version": "1.3.0", - "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.3.0.tgz", - "integrity": "sha1-s0b27PapX1qBXFg5/HzbIlAvHtc=" + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-1.3.0.tgz", + "integrity": "sha512-yagD4yKkZLeG4EJkh+8Qbqhqw+owDQ/PowqD8vb5a5rfNXS/PRC21SGyIbUVXfPp/jl4s+jyeZj6xnLnDPLazw==" + }, + "normalize-package-data": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, + "requires": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "dev": true + }, + "object-keys": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.0.12.tgz", + "integrity": "sha512-FTMyFUm2wBcGHnH2eXmz7tC6IwlqQZ6mVZ+6dm6vZ4IQIHjs6FdNsQBuKGPuUUUY6NfJw2PshC08Tn6LzLDOag==", + "dev": true + }, + "object.assign": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz", + "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==", + "dev": true, + "requires": { + "define-properties": "^1.1.2", + "function-bind": "^1.1.1", + "has-symbols": "^1.0.0", + "object-keys": "^1.0.11" + } + }, + "object.entries": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.0.tgz", + "integrity": "sha512-l+H6EQ8qzGRxbkHOd5I/aHRhHDKoQXQ8g0BYt4uSweQU1/J6dZUOyWh9a2Vky35YCKjzmgxOzta2hH6kf9HuXA==", + "dev": true, + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.12.0", + "function-bind": "^1.1.1", + "has": "^1.0.3" + } + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "requires": { + "wrappy": "1" + } + }, + "onetime": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-2.0.1.tgz", + "integrity": "sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ=", + "dev": true, + "requires": { + "mimic-fn": "^1.0.0" + } + }, + "optionator": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.2.tgz", + "integrity": "sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q=", + "dev": true, + "requires": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.4", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "wordwrap": "~1.0.0" + } + }, + "os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", + "dev": true + }, + "p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "dev": true, + "requires": { + "p-try": "^1.0.0" + } + }, + "p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "dev": true, + "requires": { + "p-limit": "^1.1.0" + } + }, + "p-try": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", + "dev": true + }, + "parent-module": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.0.tgz", + "integrity": "sha512-8Mf5juOMmiE4FcmzYc4IaiS9L3+9paz2KOiXzkRviCP6aDmN49Hz6EMWz0lGNp9pX80GvvAuLADtyGfW/Em3TA==", + "dev": true, + "requires": { + "callsites": "^3.0.0" + } + }, + "parse-json": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", + "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", + "dev": true, + "requires": { + "error-ex": "^1.2.0" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "http://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true + }, + "path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=", + "dev": true + }, + "path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", + "dev": true + }, + "path-parse": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", + "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==", + "dev": true + }, + "path-type": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz", + "integrity": "sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM=", + "dev": true, + "requires": { + "pify": "^2.0.0" + } + }, + "pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", + "dev": true + }, + "pkg-dir": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz", + "integrity": "sha1-9tXREJ4Z1j7fQo4L1X4Sd3YVM0s=", + "dev": true, + "requires": { + "find-up": "^2.1.0" + } + }, + "prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", + "dev": true + }, + "progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", + "dev": true + }, + "punycode": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", + "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", + "dev": true + }, + "read-pkg": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz", + "integrity": "sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg=", + "dev": true, + "requires": { + "load-json-file": "^2.0.0", + "normalize-package-data": "^2.3.2", + "path-type": "^2.0.0" + } + }, + "read-pkg-up": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-2.0.0.tgz", + "integrity": "sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4=", + "dev": true, + "requires": { + "find-up": "^2.0.0", + "read-pkg": "^2.0.0" + } + }, + "regexpp": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz", + "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==", + "dev": true + }, + "resolve": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.10.0.tgz", + "integrity": "sha512-3sUr9aq5OfSg2S9pNtPA9hL1FVEAjvfOC4leW0SNf/mpnaakz2a9femSd6LqAww2RaFctwyf1lCqnTHuF1rxDg==", + "dev": true, + "requires": { + "path-parse": "^1.0.6" + } + }, + "resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true + }, + "restore-cursor": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-2.0.0.tgz", + "integrity": "sha1-n37ih/gv0ybU/RYpI9YhKe7g368=", + "dev": true, + "requires": { + "onetime": "^2.0.0", + "signal-exit": "^3.0.2" + } + }, + "rimraf": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", + "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "dev": true, + "requires": { + "glob": "^7.1.3" + }, + "dependencies": { + "glob": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.3.tgz", + "integrity": "sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + } + } + }, + "run-async": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.3.0.tgz", + "integrity": "sha1-A3GrSuC91yDUFm19/aZP96RFpsA=", + "dev": true, + "requires": { + "is-promise": "^2.1.0" + } + }, + "rxjs": { + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.4.0.tgz", + "integrity": "sha512-Z9Yfa11F6B9Sg/BK9MnqnQ+aQYicPLtilXBp2yUtDt2JRCE0h26d33EnfO3ZxoNxG0T92OUucP3Ct7cpfkdFfw==", + "dev": true, + "requires": { + "tslib": "^1.9.0" + } + }, + "safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true + }, + "semver": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.6.0.tgz", + "integrity": "sha512-RS9R6R35NYgQn++fkDWaOmqGoj4Ek9gGs+DPxNUZKuwE183xjJroKvyo1IzVFeXvUrvmALy6FWD5xrdJT25gMg==", + "dev": true + }, + "shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "dev": true, + "requires": { + "shebang-regex": "^1.0.0" + } + }, + "shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", + "dev": true + }, + "signal-exit": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", + "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", + "dev": true + }, + "slice-ansi": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz", + "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.0", + "astral-regex": "^1.0.0", + "is-fullwidth-code-point": "^2.0.0" + } + }, + "spdx-correct": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.0.tgz", + "integrity": "sha512-lr2EZCctC2BNR7j7WzJ2FpDznxky1sjfxvvYEyzxNyb6lZXHODmEoJeFu4JupYlkfha1KZpJyoqiJ7pgA1qq8Q==", + "dev": true, + "requires": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-exceptions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.2.0.tgz", + "integrity": "sha512-2XQACfElKi9SlVb1CYadKDXvoajPgBVPn/gOQLrTvHdElaVhr7ZEbqJaRnJLVNeaI4cMEAgVCeBMKF6MWRDCRA==", + "dev": true + }, + "spdx-expression-parse": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz", + "integrity": "sha512-Yg6D3XpRD4kkOmTpdgbUiEJFKghJH03fiC1OPll5h/0sO6neh2jqRDVHOQ4o/LMea0tgCkbMgea5ip/e+MkWyg==", + "dev": true, + "requires": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-license-ids": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.3.tgz", + "integrity": "sha512-uBIcIl3Ih6Phe3XHK1NqboJLdGfwr1UN3k6wSD1dZpmPsIkb8AGNbZYJ1fOBk834+Gxy8rpfDxrS6XLEMZMY2g==", + "dev": true + }, + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", + "dev": true + }, + "string-width": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", + "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "dev": true, + "requires": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + } + }, + "strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "dev": true, + "requires": { + "ansi-regex": "^3.0.0" + } + }, + "strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", + "dev": true + }, + "strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", + "dev": true + }, + "supports-color": { + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", + "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + }, + "table": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/table/-/table-5.2.2.tgz", + "integrity": "sha512-f8mJmuu9beQEDkKHLzOv4VxVYlU68NpdzjbGPl69i4Hx0sTopJuNxuzJd17iV2h24dAfa93u794OnDA5jqXvfQ==", + "dev": true, + "requires": { + "ajv": "^6.6.1", + "lodash": "^4.17.11", + "slice-ansi": "^2.0.0", + "string-width": "^2.1.1" + } + }, + "text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", + "dev": true + }, + "through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", + "dev": true + }, + "tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dev": true, + "requires": { + "os-tmpdir": "~1.0.2" + } + }, + "tslib": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.9.3.tgz", + "integrity": "sha512-4krF8scpejhaOgqzBEcGM7yDIEfi0/8+8zDRZhNZZ2kjmHJ4hv3zCbQWxoJGz1iw5U0Jl0nma13xzHXcncMavQ==", + "dev": true + }, + "type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "dev": true, + "requires": { + "prelude-ls": "~1.1.2" + } + }, + "uri-js": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", + "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", + "dev": true, + "requires": { + "punycode": "^2.1.0" + } + }, + "validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "dev": true, + "requires": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=", + "dev": true + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true }, - "nan": { - "version": "2.10.0", - "resolved": "https://npm.paviliongift.com/nan/-/nan-2.10.0.tgz", - "integrity": "sha1-ltDNYQ69WNS03pzAxoKM2pnHVI8=" + "write": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/write/-/write-0.2.1.tgz", + "integrity": "sha1-X8A4KOJkzqP+kUVUdvejxWbLB1c=", + "dev": true, + "requires": { + "mkdirp": "^0.5.1" + } } } } diff --git a/package.json b/package.json old mode 100644 new mode 100755 index b001b25..fc7e231 --- a/package.json +++ b/package.json @@ -1,10 +1,9 @@ { "name": "odbc", "description": "unixodbc bindings for node", - "version": "1.4.5", - "main": "lib/odbc.js", - "types": "./lib/odbc.d.ts", + "version": "2.0.0-beta.0", "homepage": "http://github.com/wankdanker/node-odbc/", + "main": "./lib/odbc.js", "repository": { "type": "git", "url": "git://github.com/wankdanker/node-odbc.git" @@ -13,6 +12,10 @@ "url": "https://github.com/w1nk/node-odbc/issues" }, "contributors": [ + { + "name": "Mark Irish", + "email": "mirish@ibm.com" + }, { "name": "Dan VerWeire", "email": "dverweire@gmail.com" @@ -30,11 +33,17 @@ }, "scripts": { "install": "node-gyp configure build", - "test": "cd test && node run-tests.js" + "test": "mocha --slow 5000 --timeout 10000" }, "dependencies": { - "bindings": "^1.3.0", - "nan": "^2.10.0" + "dotenv": "^6.2.0", + "node-addon-api": "^1.3.0" }, - "gypfile": true + "gypfile": true, + "devDependencies": { + "eslint": "^5.13.0", + "eslint-config-airbnb-base": "^13.1.0", + "eslint-plugin-import": "^2.16.0", + "mocha": "^5.2.0" + } } diff --git a/src/dynodbc.cpp b/src/dynodbc.cpp old mode 100644 new mode 100755 diff --git a/src/dynodbc.h b/src/dynodbc.h old mode 100644 new mode 100755 diff --git a/src/odbc.cpp b/src/odbc.cpp old mode 100644 new mode 100755 index 114b1f4..1dcead1 --- a/src/odbc.cpp +++ b/src/odbc.cpp @@ -15,797 +15,984 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include -#include -#include -#include +#include #include -#include +#include #include "odbc.h" #include "odbc_connection.h" -#include "odbc_result.h" #include "odbc_statement.h" #ifdef dynodbc #include "dynodbc.h" #endif -#ifdef _WIN32 -#include "strptime.h" -#endif - -using namespace v8; -using namespace node; - uv_mutex_t ODBC::g_odbcMutex; +SQLHENV ODBC::hEnv; -Nan::Persistent ODBC::constructor; +Napi::Value ODBC::Init(Napi::Env env, Napi::Object exports) { -void ODBC::Init(v8::Handle exports) { - DEBUG_PRINTF("ODBC::Init\n"); - Nan::HandleScope scope; + hEnv = NULL; + Napi::HandleScope scope(env); - Local constructor_template = Nan::New(New); + // Wrap ODBC constants in an object that we can then expand + std::vector ODBC_CONSTANTS; - // Constructor Template - constructor_template->SetClassName(Nan::New("ODBC").ToLocalChecked()); + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("ODBCVER", Napi::Number::New(env, ODBCVER), napi_enumerable)); - // Reserve space for one Handle - Local instance_template = constructor_template->InstanceTemplate(); - instance_template->SetInternalFieldCount(1); + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_COMMIT", Napi::Number::New(env, SQL_COMMIT), napi_enumerable)); + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ROLLBACK", Napi::Number::New(env, SQL_ROLLBACK), napi_enumerable)); - // Constants -#if (NODE_MODULE_VERSION < NODE_0_12_MODULE_VERSION) - -#else - -#endif - PropertyAttribute constant_attributes = static_cast(ReadOnly | DontDelete); - constructor_template->Set(Nan::New("SQL_CLOSE").ToLocalChecked(), Nan::New(SQL_CLOSE), constant_attributes); - constructor_template->Set(Nan::New("SQL_DROP").ToLocalChecked(), Nan::New(SQL_DROP), constant_attributes); - constructor_template->Set(Nan::New("SQL_UNBIND").ToLocalChecked(), Nan::New(SQL_UNBIND), constant_attributes); - constructor_template->Set(Nan::New("SQL_RESET_PARAMS").ToLocalChecked(), Nan::New(SQL_RESET_PARAMS), constant_attributes); - constructor_template->Set(Nan::New("SQL_DESTROY").ToLocalChecked(), Nan::New(SQL_DESTROY), constant_attributes); - constructor_template->Set(Nan::New("FETCH_ARRAY").ToLocalChecked(), Nan::New(FETCH_ARRAY), constant_attributes); - constructor_template->Set(Nan::New("SQL_USER_NAME").ToLocalChecked(), Nan::New(SQL_USER_NAME), constant_attributes); - NODE_ODBC_DEFINE_CONSTANT(constructor_template, FETCH_OBJECT); - - // Prototype Methods - Nan::SetPrototypeMethod(constructor_template, "createConnection", CreateConnection); - Nan::SetPrototypeMethod(constructor_template, "createConnectionSync", CreateConnectionSync); - - // Attach the Database Constructor to the target object - constructor.Reset(constructor_template->GetFunction()); - exports->Set(Nan::New("ODBC").ToLocalChecked(), - constructor_template->GetFunction()); + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_USER_NAME", Napi::Number::New(env, SQL_USER_NAME), napi_enumerable)); + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_PARAM_INPUT", Napi::Number::New(env, SQL_PARAM_INPUT), napi_enumerable)); + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_PARAM_INPUT_OUTPUT", Napi::Number::New(env, SQL_PARAM_INPUT_OUTPUT), napi_enumerable)); + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_PARAM_OUTPUT", Napi::Number::New(env, SQL_PARAM_OUTPUT), napi_enumerable)); + + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_VARCHAR", Napi::Number::New(env, SQL_VARCHAR), napi_enumerable)); + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_INTEGER", Napi::Number::New(env, SQL_INTEGER), napi_enumerable)); + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_SMALLINT", Napi::Number::New(env, SQL_SMALLINT), napi_enumerable)); + + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_NO_NULLS", Napi::Number::New(env, SQL_NO_NULLS), napi_enumerable)); + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_NULLABLE", Napi::Number::New(env, SQL_NULLABLE), napi_enumerable)); + ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_NULLABLE_UNKNOWN", Napi::Number::New(env, SQL_NULLABLE_UNKNOWN), napi_enumerable)); + + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_USER_NAME", Napi::Number::New(env, SQL_USER_NAME), napi_enumerable)); + + // // connection attributes + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ACCESS_MODE", Napi::Number::New(env, SQL_ACCESS_MODE), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_AUTOCOMMIT", Napi::Number::New(env, SQL_AUTOCOMMIT), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_LOGIN_TIMEOUT", Napi::Number::New(env, SQL_LOGIN_TIMEOUT), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_OPT_TRACE", Napi::Number::New(env, SQL_OPT_TRACE), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_OPT_TRACEFILE", Napi::Number::New(env, SQL_OPT_TRACEFILE), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_TRANSLATE_DLL", Napi::Number::New(env, SQL_TRANSLATE_DLL), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_TRANSLATE_OPTION", Napi::Number::New(env, SQL_TRANSLATE_OPTION), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_TXN_ISOLATION", Napi::Number::New(env, SQL_TXN_ISOLATION), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_CURRENT_QUALIFIER", Napi::Number::New(env, SQL_CURRENT_QUALIFIER), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ODBC_CURSORS", Napi::Number::New(env, SQL_ODBC_CURSORS), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_QUIET_MODE", Napi::Number::New(env, SQL_QUIET_MODE), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_PACKET_SIZE", Napi::Number::New(env, SQL_PACKET_SIZE), napi_enumerable)); + + // // connection attributes with new names + // #if (ODBCVER >= 0x0300) + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_ACCESS_MODE", Napi::Number::New(env, SQL_ATTR_ACCESS_MODE), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_AUTOCOMMIT", Napi::Number::New(env, SQL_ATTR_AUTOCOMMIT), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_CONNECTION_TIMEOUT", Napi::Number::New(env, SQL_ATTR_CONNECTION_TIMEOUT), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_CURRENT_CATALOG", Napi::Number::New(env, SQL_ATTR_CURRENT_CATALOG), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_DISCONNECT_BEHAVIOR", Napi::Number::New(env, SQL_ATTR_DISCONNECT_BEHAVIOR), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_ENLIST_IN_DTC", Napi::Number::New(env, SQL_ATTR_ENLIST_IN_DTC), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_ENLIST_IN_XA", Napi::Number::New(env, SQL_ATTR_ENLIST_IN_XA), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_LOGIN_TIMEOUT", Napi::Number::New(env, SQL_ATTR_LOGIN_TIMEOUT), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_ODBC_CURSORS", Napi::Number::New(env, SQL_ATTR_ODBC_CURSORS), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_PACKET_SIZE", Napi::Number::New(env, SQL_ATTR_PACKET_SIZE), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_QUIET_MODE", Napi::Number::New(env, SQL_ATTR_QUIET_MODE), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_TRACE", Napi::Number::New(env, SQL_ATTR_TRACE), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_TRACEFILE", Napi::Number::New(env, SQL_ATTR_TRACEFILE), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_TRANSLATE_LIB", Napi::Number::New(env, SQL_ATTR_TRANSLATE_LIB), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_TRANSLATE_OPTION", Napi::Number::New(env, SQL_ATTR_TRANSLATE_OPTION), napi_enumerable)); + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_TXN_ISOLATION", Napi::Number::New(env, SQL_ATTR_TXN_ISOLATION), napi_enumerable)); + // #endif + + // ODBC_CONSTANTS.push_back(Napi::PropertyDescriptor::Value("SQL_ATTR_CONNECTION_DEAD", Napi::Number::New(env, SQL_ATTR_CONNECTION_DEAD), napi_enumerable)); + + exports.DefineProperties(ODBC_CONSTANTS); + + exports.Set("connect", Napi::Function::New(env, ODBC::Connect)); + exports.Set("connectSync", Napi::Function::New(env, ODBC::ConnectSync)); // Initialize the cross platform mutex provided by libuv uv_mutex_init(&ODBC::g_odbcMutex); -} - -ODBC::~ODBC() { - DEBUG_PRINTF("ODBC::~ODBC\n"); - this->Free(); -} -void ODBC::Free() { - DEBUG_PRINTF("ODBC::Free\n"); - if (m_hEnv) { - uv_mutex_lock(&ODBC::g_odbcMutex); - - if (m_hEnv) { - SQLFreeHandle(SQL_HANDLE_ENV, m_hEnv); - m_hEnv = NULL; - } - - uv_mutex_unlock(&ODBC::g_odbcMutex); - } -} - -NAN_METHOD(ODBC::New) { - DEBUG_PRINTF("ODBC::New\n"); - Nan::HandleScope scope; - ODBC* dbo = new ODBC(); - - dbo->Wrap(info.Holder()); - - dbo->m_hEnv = NULL; - uv_mutex_lock(&ODBC::g_odbcMutex); - // Initialize the Environment handle - int ret = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &dbo->m_hEnv); - + SQLRETURN sqlReturnCode = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &hEnv); uv_mutex_unlock(&ODBC::g_odbcMutex); - if (!SQL_SUCCEEDED(ret)) { + if (!SQL_SUCCEEDED(sqlReturnCode)) { DEBUG_PRINTF("ODBC::New - ERROR ALLOCATING ENV HANDLE!!\n"); - - Local objError = ODBC::GetSQLError(SQL_HANDLE_ENV, dbo->m_hEnv); - - return Nan::ThrowError(objError); + Napi::Error(env, Napi::String::New(env, ODBC::GetSQLError(SQL_HANDLE_ENV, hEnv))).ThrowAsJavaScriptException(); + return env.Null(); } // Use ODBC 3.x behavior - SQLSetEnvAttr(dbo->m_hEnv, SQL_ATTR_ODBC_VERSION, (SQLPOINTER) SQL_OV_ODBC3, SQL_IS_UINTEGER); - - info.GetReturnValue().Set(info.Holder()); + SQLSetEnvAttr(hEnv, SQL_ATTR_ODBC_VERSION, (SQLPOINTER) SQL_OV_ODBC3, SQL_IS_UINTEGER); + + return exports; } -//void ODBC::WatcherCallback(uv_async_t *w, int revents) { -// DEBUG_PRINTF("ODBC::WatcherCallback\n"); -// //i don't know if we need to do anything here -//} +ODBC::~ODBC() { + DEBUG_PRINTF("ODBC::~ODBC\n"); + + uv_mutex_lock(&ODBC::g_odbcMutex); + + if (hEnv) { + SQLFreeHandle(SQL_HANDLE_ENV, hEnv); + hEnv = NULL; + } + + uv_mutex_unlock(&ODBC::g_odbcMutex); +} /* - * CreateConnection + * Connect */ +class ConnectAsyncWorker : public Napi::AsyncWorker { -NAN_METHOD(ODBC::CreateConnection) { - DEBUG_PRINTF("ODBC::CreateConnection\n"); - Nan::HandleScope scope; + public: + ConnectAsyncWorker(HENV hEnv, SQLTCHAR *connectionStringPtr, int count, Napi::Function& callback) : Napi::AsyncWorker(callback), + connectionStringPtr(connectionStringPtr), + count(count), + hEnv(hEnv) {} - Local cb = info[0].As(); - Nan::Callback *callback = new Nan::Callback(cb); - //REQ_FUN_ARG(0, cb); + ~ConnectAsyncWorker() {} - ODBC* dbo = Nan::ObjectWrap::Unwrap(info.Holder()); - - //initialize work request - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + private: - //initialize our data - create_connection_work_data* data = - (create_connection_work_data *) (calloc(1, sizeof(create_connection_work_data))); + SQLTCHAR *connectionStringPtr; + int count; + SQLHENV hEnv; - data->cb = callback; - data->dbo = dbo; + std::vector hDBCs; + SQLUSMALLINT canHaveMoreResults; + SQLRETURN sqlReturnCode; - work_req->data = data; - - uv_queue_work(uv_default_loop(), work_req, UV_CreateConnection, (uv_after_work_cb)UV_AfterCreateConnection); + void Execute() { - dbo->Ref(); + DEBUG_PRINTF("ODBC::ConnectAsyncWorker::Execute\n"); - info.GetReturnValue().Set(Nan::Undefined()); -} + uv_mutex_lock(&ODBC::g_odbcMutex); -void ODBC::UV_CreateConnection(uv_work_t* req) { - DEBUG_PRINTF("ODBC::UV_CreateConnection\n"); - - //get our work data - create_connection_work_data* data = (create_connection_work_data *)(req->data); - - uv_mutex_lock(&ODBC::g_odbcMutex); + // when we pool, want to create more than one connection in the AsyncWoker + for (int i = 0; i < count; i++) { - //allocate a new connection handle - data->result = SQLAllocHandle(SQL_HANDLE_DBC, data->dbo->m_hEnv, &data->hDBC); - - uv_mutex_unlock(&ODBC::g_odbcMutex); -} + SQLHDBC hDBC; -void ODBC::UV_AfterCreateConnection(uv_work_t* req, int status) { - DEBUG_PRINTF("ODBC::UV_AfterCreateConnection\n"); - Nan::HandleScope scope; + sqlReturnCode = SQLAllocHandle(SQL_HANDLE_DBC, hEnv, &hDBC); - create_connection_work_data* data = (create_connection_work_data *)(req->data); - - Nan::TryCatch try_catch; - - if (!SQL_SUCCEEDED(data->result)) { - Local info[1]; - - info[0] = ODBC::GetSQLError(SQL_HANDLE_ENV, data->dbo->m_hEnv); - - data->cb->Call(1, info); + unsigned int connectTimeout = 5; + unsigned int loginTimeout = 5; + + if (connectTimeout > 0) { + sqlReturnCode = SQLSetConnectAttr( + hDBC, // ConnectionHandle + SQL_ATTR_CONNECTION_TIMEOUT, // Attribute + (SQLPOINTER) size_t(connectTimeout), // ValuePtr + SQL_IS_UINTEGER); // StringLength + } + + if (loginTimeout > 0) { + sqlReturnCode = SQLSetConnectAttr( + hDBC, // ConnectionHandle + SQL_ATTR_LOGIN_TIMEOUT, // Attribute + (SQLPOINTER) size_t(loginTimeout), // ValuePtr + SQL_IS_UINTEGER); // StringLength + } + + //Attempt to connect + sqlReturnCode = SQLDriverConnect( + hDBC, // ConnectionHandle + NULL, // WindowHandle + connectionStringPtr, // InConnectionString + SQL_NTS, // StringLength1 + NULL, // OutConnectionString + 0, // BufferLength - in characters + NULL, // StringLength2Ptr + SQL_DRIVER_NOPROMPT); // DriverCompletion + + if (SQL_SUCCEEDED(sqlReturnCode)) { + + // odbcConnectionObject->connected = true; + + HSTMT hStmt; + + //allocate a temporary statment + sqlReturnCode = SQLAllocHandle(SQL_HANDLE_STMT, hDBC, &hStmt); + + //try to determine if the driver can handle + //multiple recordsets + sqlReturnCode = SQLGetFunctions( + hDBC, + SQL_API_SQLMORERESULTS, + &canHaveMoreResults + ); + + if (!SQL_SUCCEEDED(sqlReturnCode)) { + canHaveMoreResults = 0; + } + + //free the handle + sqlReturnCode = SQLFreeHandle(SQL_HANDLE_STMT, hStmt); + + hDBCs.push_back(hDBC); + + } else { + SetError(ODBC::GetSQLError(SQL_HANDLE_DBC, hDBC, (char *) "[node-odbc] Error in ConnectAsyncWorker")); + } + } + + uv_mutex_unlock(&ODBC::g_odbcMutex); + } + + void OnOK() { + + DEBUG_PRINTF("ODBC::ConnectAsyncWorker::OnOk\n"); + + Napi::Env env = Env(); + Napi::HandleScope scope(env); + + Napi::Array connections = Napi::Array::New(env); + + for (unsigned int i = 0; i < hDBCs.size(); i++) { + // pass the HENV and HDBC values to the ODBCConnection constructor + std::vector connectionArguments; + connectionArguments.push_back(Napi::External::New(env, &hEnv)); // connectionArguments[0] + connectionArguments.push_back(Napi::External::New(env, &hDBCs[i])); // connectionArguments[1] + + // create a new ODBCConnection object as a Napi::Value + connections.Set(i, ODBCConnection::constructor.New(connectionArguments)); + } + + // pass the arguments to the callback function + std::vector callbackArguments; + callbackArguments.push_back(env.Null()); // callbackArguments[0] + callbackArguments.push_back(connections); // callbackArguments[1] + + Callback().Call(callbackArguments); + + } +}; + +// Connect +Napi::Value ODBC::Connect(const Napi::CallbackInfo& info) { + + DEBUG_PRINTF("ODBC::CreateConnection\n"); + + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + + Napi::String connectionString; + Napi::Function callback; + int count; + + SQLTCHAR *connectionStringPtr = nullptr; + + if(info.Length() != 3) { + Napi::TypeError::New(env, "connect(connectionString, count, callback) requires 2 parameters.").ThrowAsJavaScriptException(); + return env.Null(); } - else { - Local info[2]; - info[0] = Nan::New(data->dbo->m_hEnv); - info[1] = Nan::New(data->hDBC); - - Local js_result = Nan::NewInstance(Nan::New(ODBCConnection::constructor), 2, info).ToLocalChecked(); - info[0] = Nan::Null(); - info[1] = js_result; + if (info[0].IsString()) { + connectionString = info[0].As(); + connectionStringPtr = ODBC::NapiStringToSQLTCHAR(connectionString); + } else { + Napi::TypeError::New(env, "connect: first parameter must be a string.").ThrowAsJavaScriptException(); + return env.Null(); + } - data->cb->Call(data->dbo->handle(), 2, info); + if (info[1].IsNumber()) { + count = info[1].ToNumber().Int32Value(); + } else { + Napi::TypeError::New(env, "connect: second parameter must be a number.").ThrowAsJavaScriptException(); + return env.Null(); } - - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); + + if (info[2].IsFunction()) { + callback = info[2].As(); + } else { + Napi::TypeError::New(env, "connect: third parameter must be a function.").ThrowAsJavaScriptException(); + return env.Null(); } - - data->dbo->Unref(); - delete data->cb; + ConnectAsyncWorker *worker = new ConnectAsyncWorker(hEnv, connectionStringPtr, count, callback); + worker->Queue(); - free(data); - free(req); + return env.Undefined(); } -/* - * CreateConnectionSync - */ +Napi::Value ODBC::ConnectSync(const Napi::CallbackInfo& info) { + + DEBUG_PRINTF("ODBC::CreateConnection\n"); -NAN_METHOD(ODBC::CreateConnectionSync) { - DEBUG_PRINTF("ODBC::CreateConnectionSync\n"); - Nan::HandleScope scope; + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - ODBC* dbo = Nan::ObjectWrap::Unwrap(info.Holder()); - - HDBC hDBC; - - uv_mutex_lock(&ODBC::g_odbcMutex); - - //allocate a new connection handle - SQLRETURN ret = SQLAllocHandle(SQL_HANDLE_DBC, dbo->m_hEnv, &hDBC); - - if (!SQL_SUCCEEDED(ret)) { - //TODO: do something! + Napi::Value error; + Napi::Value returnValue; + + SQLUSMALLINT canHaveMoreResults; + SQLRETURN sqlReturnCode; + SQLHDBC hDBC; + + if(info.Length() != 1) { + Napi::TypeError::New(env, "connectSync(connectionString) requires 1 parameter.").ThrowAsJavaScriptException(); + return env.Null(); } - - uv_mutex_unlock(&ODBC::g_odbcMutex); - Local params[2]; - params[0] = Nan::New(dbo->m_hEnv); - params[1] = Nan::New(hDBC); + if (!info[0].IsString()) { + Napi::TypeError::New(env, "connectSync: first parameter must be a string.").ThrowAsJavaScriptException(); + return env.Null(); + } - Local js_result = Nan::NewInstance(Nan::New(ODBCConnection::constructor), 2, params).ToLocalChecked(); + Napi::String connectionString = info[0].As(); + SQLTCHAR *connectionStringPtr = ODBC::NapiStringToSQLTCHAR(connectionString); - info.GetReturnValue().Set(js_result); -} + uv_mutex_lock(&ODBC::g_odbcMutex); + sqlReturnCode = SQLAllocHandle(SQL_HANDLE_DBC, hEnv, &hDBC); -/* - * GetColumns - */ + if (!SQL_SUCCEEDED(sqlReturnCode)) { + Napi::Error::New(env, ODBC::GetSQLError(SQL_HANDLE_DBC, hDBC)).ThrowAsJavaScriptException(); + returnValue = env.Null(); + } -Column* ODBC::GetColumns(SQLHSTMT hStmt, short* colCount) { - SQLRETURN ret; - SQLSMALLINT buflen; + unsigned int connectTimeout = 5; + unsigned int loginTimeout = 5; + + if (connectTimeout > 0) { + sqlReturnCode = SQLSetConnectAttr( + hDBC, // ConnectionHandle + SQL_ATTR_CONNECTION_TIMEOUT, // Attribute + (SQLPOINTER) size_t(connectTimeout), // ValuePtr + SQL_IS_UINTEGER); // StringLength + } + + if (!SQL_SUCCEEDED(sqlReturnCode)) { + Napi::Error::New(env, ODBC::GetSQLError(SQL_HANDLE_DBC, hDBC)).ThrowAsJavaScriptException(); + returnValue = env.Null(); + } - //always reset colCount for the current result set to 0; - *colCount = 0; + if (loginTimeout > 0) { + sqlReturnCode = SQLSetConnectAttr( + hDBC, // ConnectionHandle + SQL_ATTR_LOGIN_TIMEOUT, // Attribute + (SQLPOINTER) size_t(loginTimeout), // ValuePtr + SQL_IS_UINTEGER); // StringLength + } - //get the number of columns in the result set - ret = SQLNumResultCols(hStmt, colCount); - - if (!SQL_SUCCEEDED(ret)) { - return new Column[0]; + if (!SQL_SUCCEEDED(sqlReturnCode)) { + Napi::Error::New(env, ODBC::GetSQLError(SQL_HANDLE_DBC, hDBC)).ThrowAsJavaScriptException(); + returnValue = env.Null(); } + + //Attempt to connect + //NOTE: SQLDriverConnect requires the thread to be locked + sqlReturnCode = SQLDriverConnect( + hDBC, // ConnectionHandle + NULL, // WindowHandle + connectionStringPtr, // InConnectionString + SQL_NTS, // StringLength1 + NULL, // OutConnectionString + 0, // BufferLength - in characters + NULL, // StringLength2Ptr + SQL_DRIVER_NOPROMPT // DriverCompletion + ); + + - Column *columns = new Column[*colCount]; + if (SQL_SUCCEEDED(sqlReturnCode)) { - for (int i = 0; i < *colCount; i++) { - //save the index number of this column - columns[i].index = i + 1; - //TODO:that's a lot of memory for each field name.... - columns[i].name = new unsigned char[MAX_FIELD_SIZE]; - - //set the first byte of name to \0 instead of memsetting the entire buffer - columns[i].name[0] = '\0'; + HSTMT hStmt; - //get the column name - ret = SQLColAttribute( hStmt, - columns[i].index, -#ifdef STRICT_COLUMN_NAMES - SQL_DESC_NAME, -#else - SQL_DESC_LABEL, -#endif - columns[i].name, - (SQLSMALLINT) MAX_FIELD_SIZE, - (SQLSMALLINT *) &buflen, - NULL); + //allocate a temporary statment + sqlReturnCode = SQLAllocHandle(SQL_HANDLE_STMT, hDBC, &hStmt); - //store the len attribute - columns[i].len = buflen; + //try to determine if the driver can handle + //multiple recordsets + sqlReturnCode = SQLGetFunctions( + hDBC, + SQL_API_SQLMORERESULTS, + &canHaveMoreResults); + + if (!SQL_SUCCEEDED(sqlReturnCode)) { + canHaveMoreResults = 0; + } + + //free the handle + sqlReturnCode = SQLFreeHandle(SQL_HANDLE_STMT, hStmt); + returnValue = Napi::Boolean::New(env, true); + + } else { + Napi::Error::New(env, ODBC::GetSQLError(SQL_HANDLE_DBC, hDBC)).ThrowAsJavaScriptException(); + returnValue = env.Null(); + } + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + // return the SQLError + if (!SQL_SUCCEEDED(sqlReturnCode)) { + Napi::Error::New(env, ODBC::GetSQLError(SQL_HANDLE_ENV, hEnv)).ThrowAsJavaScriptException(); + return env.Null(); + } + // return the Connection + else { + + // pass the HENV and HDBC values to the ODBCConnection constructor + std::vector connectionArguments; + connectionArguments.push_back(Napi::External::New(env, &hEnv)); // connectionArguments[0] + connectionArguments.push_back(Napi::External::New(env, &hDBC)); // connectionArguments[1] - //get the column type and store it directly in column[i].type - ret = SQLColAttribute( hStmt, - columns[i].index, - SQL_DESC_TYPE, - NULL, - 0, - NULL, - &columns[i].type); + // create a new ODBCConnection object as a Napi::Value + return ODBCConnection::constructor.New(connectionArguments); } - - return columns; } -/* - * FreeColumns - */ +/////////////////////////////////////////////////////////////////////////////// +///////////////////////////// UTILITY FUNCTIONS /////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + +// Take a Napi::String, and convert it to an SQLTCHAR*, which maps to: +// UNICODE : SQLWCHAR* +// no UNICODE : SQLCHAR* +SQLTCHAR* ODBC::NapiStringToSQLTCHAR(Napi::String string) { + + #ifdef UNICODE + std::u16string tempString = string.Utf16Value(); + #else + std::string tempString = string.Utf8Value(); + #endif + std::vector *stringVector = new std::vector(tempString.begin(), tempString.end()); + stringVector->push_back('\0'); -void ODBC::FreeColumns(Column* columns, short* colCount) { - for(int i = 0; i < *colCount; i++) { - delete [] columns[i].name; + return &(*stringVector)[0]; +} + +// Encapsulates the workflow after a result set is returned (many paths require this workflow). +// Does the following: +// Calls SQLRowCount, which returns the number of rows affected by the statement. +// Calls ODBC::BindColumns, which calls: +// SQLNumResultCols to return the number of columns, +// SQLDescribeCol to describe those columns, and +// SQLBindCol to bind the column return data to buffers +// Calls ODBC::FetchAll, which calls: +// SQLFetch to fetch all of the result rows +// SQLCloseCursor to close the cursor on the result set +SQLRETURN ODBC::RetrieveResultSet(QueryData *data) { + SQLRETURN returnCode = SQL_SUCCESS; + + returnCode = SQLRowCount( + data->hSTMT, // StatementHandle + &data->rowCount // RowCountPtr + ); + if (!SQL_SUCCEEDED(returnCode)) { + // if SQLRowCount failed, return early with the returnCode + return returnCode; } - delete [] columns; - - *colCount = 0; + + returnCode = ODBC::BindColumns(data); + if (!SQL_SUCCEEDED(returnCode)) { + // if BindColumns failed, return early with the returnCode + return returnCode; + } + + // data->columnCount is set in ODBC::BindColumns above + if (data->columnCount > 0) { + returnCode = ODBC::FetchAll(data); + // if we got to the end of the result set, thats the happy path + if (returnCode == SQL_NO_DATA) { + return SQL_SUCCESS; + } + } + + return returnCode; } -/* - * GetColumnValue - */ +/****************************************************************************** + ****************************** BINDING COLUMNS ******************************* + *****************************************************************************/ +SQLRETURN ODBC::BindColumns(QueryData *data) { -Handle ODBC::GetColumnValue( SQLHSTMT hStmt, Column column, - uint16_t* buffer, int bufferLength) { - Nan::EscapableHandleScope scope; - SQLLEN len = 0; + SQLRETURN returnCode = SQL_SUCCESS; - //reset the buffer - buffer[0] = '\0'; + // SQLNumResultCols returns the number of columns in a result set. + returnCode = SQLNumResultCols( + data->hSTMT, // StatementHandle + &data->columnCount // ColumnCountPtr + ); + + // if there was an error, set columnCount to 0 and return + if (!SQL_SUCCEEDED(returnCode)) { + data->columnCount = 0; + return returnCode; + } - //TODO: SQLGetData can supposedly return multiple chunks, need to do this to - //retrieve large fields - int ret; - - switch ((int) column.type) { - case SQL_INTEGER : - case SQL_SMALLINT : - case SQL_TINYINT : { - int32_t value = 0; - - ret = SQLGetData( - hStmt, - column.index, - SQL_C_SLONG, - &value, - sizeof(value), - &len); - - DEBUG_PRINTF("ODBC::GetColumnValue - Integer: index=%i name=%s type=%lli len=%lli ret=%i val=%li\n", - column.index, column.name, column.type, len, ret, value); - - if (len == SQL_NULL_DATA) { - return scope.Escape(Nan::Null()); - } - else { - return scope.Escape(Nan::New(value)); - } - } - break; - case SQL_NUMERIC : - case SQL_DECIMAL : - case SQL_BIGINT : - case SQL_FLOAT : - case SQL_REAL : - case SQL_DOUBLE : { - double value; - - ret = SQLGetData( - hStmt, - column.index, - SQL_C_DOUBLE, - &value, - sizeof(value), - &len); - - DEBUG_PRINTF("ODBC::GetColumnValue - Number: index=%i name=%s type=%lli len=%lli ret=%i val=%f\n", - column.index, column.name, column.type, len, ret, value); - - if (len == SQL_NULL_DATA) { - return scope.Escape(Nan::Null()); - //return Null(); - } - else { - return scope.Escape(Nan::New(value)); - //return Number::New(value); - } - } - break; - case SQL_DATETIME : - case SQL_TIMESTAMP : { - //I am not sure if this is locale-safe or cross database safe, but it - //works for me on MSSQL -#ifdef _WIN32 - struct tm timeInfo = {}; - - ret = SQLGetData( - hStmt, - column.index, - SQL_C_CHAR, - (char *) buffer, - bufferLength, - &len); - - DEBUG_PRINTF("ODBC::GetColumnValue - W32 Timestamp: index=%i name=%s type=%lli len=%lli\n", - column.index, column.name, column.type, len); - - if (len == SQL_NULL_DATA) { - return scope.Escape(Nan::Null()); - //return Null(); - } - else { - if (strptime((char *) buffer, "%Y-%m-%d %H:%M:%S", &timeInfo)) { - //a negative value means that mktime() should use timezone information - //and system databases to attempt to determine whether DST is in effect - //at the specified time. - timeInfo.tm_isdst = -1; - - //return scope.Escape(Date::New(Isolate::GetCurrent(), (double(mktime(&timeInfo)) * 1000)); - return scope.Escape(Nan::New(double(mktime(&timeInfo)) * 1000).ToLocalChecked()); - } - else { - return scope.Escape(Nan::New((char *)buffer).ToLocalChecked()); - } - } -#else - struct tm timeInfo = { - tm_sec : 0 - , tm_min : 0 - , tm_hour : 0 - , tm_mday : 0 - , tm_mon : 0 - , tm_year : 0 - , tm_wday : 0 - , tm_yday : 0 - , tm_isdst : 0 - #ifndef _AIX //AIX does not have these - , tm_gmtoff : 0 - , tm_zone : 0 - #endif - }; - - SQL_TIMESTAMP_STRUCT odbcTime; - - ret = SQLGetData( - hStmt, - column.index, - SQL_C_TYPE_TIMESTAMP, - &odbcTime, - bufferLength, - &len); - - DEBUG_PRINTF("ODBC::GetColumnValue - Unix Timestamp: index=%i name=%s type=%i len=%i\n", - column.index, column.name, column.type, len); - - if (len == SQL_NULL_DATA) { - return scope.Escape(Nan::Null()); - //return Null(); - } - else { - timeInfo.tm_year = odbcTime.year - 1900; - timeInfo.tm_mon = odbcTime.month - 1; - timeInfo.tm_mday = odbcTime.day; - timeInfo.tm_hour = odbcTime.hour; - timeInfo.tm_min = odbcTime.minute; - timeInfo.tm_sec = odbcTime.second; - - //a negative value means that mktime() should use timezone information - //and system databases to attempt to determine whether DST is in effect - //at the specified time. - timeInfo.tm_isdst = -1; -#ifdef TIMEGM - return scope.Escape(Nan::New((double(timegm(&timeInfo)) * 1000) - + (odbcTime.fraction / 1000000)).ToLocalChecked()); -#else -#ifdef _AIX - #define timelocal mktime -#endif - return scope.Escape(Nan::New((double(timelocal(&timeInfo)) * 1000) - + (odbcTime.fraction / 1000000)).ToLocalChecked()); -#endif - //return Date::New((double(timegm(&timeInfo)) * 1000) - // + (odbcTime.fraction / 1000000)); - } -#endif - } break; - case SQL_BIT : - //again, i'm not sure if this is cross database safe, but it works for - //MSSQL - ret = SQLGetData( - hStmt, - column.index, - SQL_C_CHAR, - (char *) buffer, - bufferLength, - &len); - - DEBUG_PRINTF("ODBC::GetColumnValue - Bit: index=%i name=%s type=%lli len=%lli\n", - column.index, column.name, column.type, len); - - if (len == SQL_NULL_DATA) { - return scope.Escape(Nan::Null()); - } - else { - return scope.Escape(Nan::New((*buffer == '0') ? false : true)); - } - default : - Local str; - int count = 0; - - do { - ret = SQLGetData( - hStmt, - column.index, - SQL_C_TCHAR, - (char *) buffer, - bufferLength, - &len); - - DEBUG_PRINTF("ODBC::GetColumnValue - String: index=%i name=%s type=%lli len=%lli value=%s ret=%i bufferLength=%i\n", - column.index, column.name, column.type, len,(char *) buffer, ret, bufferLength); - - if (len == SQL_NULL_DATA && str.IsEmpty()) { - return scope.Escape(Nan::Null()); - //return Null(); - } - - if (SQL_NO_DATA == ret) { - //we have captured all of the data - //double check that we have some data else return null - if (str.IsEmpty()){ - return scope.Escape(Nan::Null()); - } + // create Columns for the column data to go into + data->columns = new Column*[data->columnCount]; + data->boundRow = new SQLTCHAR*[data->columnCount](); - break; - } - else if (SQL_SUCCEEDED(ret)) { - //we have not captured all of the data yet - - if (count == 0) { - //no concatenation required, this is our first pass -#ifdef UNICODE - str = Nan::New((uint16_t*) buffer).ToLocalChecked(); -#else - str = Nan::New((char *) buffer).ToLocalChecked(); -#endif - } - else { - //we need to concatenate -#ifdef UNICODE - str = String::Concat(str, Nan::New((uint16_t*) buffer).ToLocalChecked()); -#else - str = String::Concat(str, Nan::New((char *) buffer).ToLocalChecked()); -#endif - } - - //if len is zero let's break out of the loop now and not attempt to - //call SQLGetData again. The specific reason for this is because - //some ODBC drivers may not correctly report SQL_NO_DATA the next - //time around causing an infinite loop here - if (len == 0) { - break; - } - - count += 1; - } - else { - //an error has occured - //possible values for ret are SQL_ERROR (-1) and SQL_INVALID_HANDLE (-2) - - //If we have an invalid handle, then stuff is way bad and we should abort - //immediately. Memory errors are bound to follow as we must be in an - //inconsisant state. - assert(ret != SQL_INVALID_HANDLE); - - //Not sure if throwing here will work out well for us but we can try - //since we should have a valid handle and the error is something we - //can look into - - Local objError = ODBC::GetSQLError( - SQL_HANDLE_STMT, - hStmt, - (char *) "[node-odbc] Error in ODBC::GetColumnValue" - ); + for (int i = 0; i < data->columnCount; i++) { - Nan::ThrowError(objError); - return scope.Escape(Nan::Undefined()); - break; - } - } while (true); + Column *column = new Column(); + + column->ColumnName = new SQLTCHAR[SQL_MAX_COLUMN_NAME_LEN](); + + returnCode = SQLDescribeCol( + data->hSTMT, // StatementHandle + i + 1, // ColumnNumber + column->ColumnName, // ColumnName + SQL_MAX_COLUMN_NAME_LEN, // BufferLength, + &column->NameLength, // NameLengthPtr, + &column->DataType, // DataTypePtr + &column->ColumnSize, // ColumnSizePtr, + &column->DecimalDigits, // DecimalDigitsPtr, + &column->Nullable // NullablePtr + ); + + if (!SQL_SUCCEEDED(returnCode)) { + delete column; + return returnCode; + } + + data->columns[i] = column; + + SQLLEN maxColumnLength; + SQLSMALLINT targetType; + + // bind depending on the column + switch(column->DataType) { + + case SQL_DECIMAL : + case SQL_NUMERIC : + + maxColumnLength = column->ColumnSize + column->DecimalDigits + 1; + targetType = SQL_C_CHAR; + + break; + + case SQL_DOUBLE : + + maxColumnLength = column->ColumnSize; + targetType = SQL_C_DOUBLE; + break; + + case SQL_INTEGER: + case SQL_SMALLINT: + + maxColumnLength = column->ColumnSize; + targetType = SQL_C_SLONG; + break; + + case SQL_BIGINT : + + maxColumnLength = column->ColumnSize; + targetType = SQL_C_SBIGINT; + break; + + case SQL_BINARY: + case SQL_VARBINARY: + case SQL_LONGVARBINARY: + + maxColumnLength = column->ColumnSize; + targetType = SQL_C_BINARY; + break; + + case SQL_WCHAR: + case SQL_WVARCHAR: + case SQL_WLONGVARCHAR: + + maxColumnLength = (column->ColumnSize + 1) * sizeof(SQL_C_WCHAR); + targetType = SQL_C_WCHAR; + break; + + case SQL_CHAR: + case SQL_VARCHAR: + case SQL_LONGVARCHAR: + default: - return scope.Escape(str); + maxColumnLength = (column->ColumnSize + 1) * sizeof(SQL_C_CHAR); + targetType = SQL_C_CHAR; + break; + } + + data->boundRow[i] = new SQLCHAR[maxColumnLength](); + + // SQLBindCol binds application data buffers to columns in the result set. + returnCode = SQLBindCol( + data->hSTMT, // StatementHandle + i + 1, // ColumnNumber + targetType, // TargetType + data->boundRow[i], // TargetValuePtr + maxColumnLength, // BufferLength + &column->StrLen_or_IndPtr // StrLen_or_Ind + ); + + if (!SQL_SUCCEEDED(data->sqlReturnCode)) { + return returnCode; + } } + + return returnCode; } -/* - * GetRecordTuple - */ +SQLRETURN ODBC::FetchAll(QueryData *data) { -Local ODBC::GetRecordTuple ( SQLHSTMT hStmt, Column* columns, - short* colCount, uint16_t* buffer, - int bufferLength) { - Nan::EscapableHandleScope scope; - - Local tuple = Nan::New(); - - for(int i = 0; i < *colCount; i++) { -#ifdef UNICODE - tuple->Set( Nan::New((uint16_t *) columns[i].name).ToLocalChecked(), - GetColumnValue( hStmt, columns[i], buffer, bufferLength)); -#else - tuple->Set( Nan::New((const char *) columns[i].name).ToLocalChecked(), - GetColumnValue( hStmt, columns[i], buffer, bufferLength)); -#endif + // continue call SQLFetch, with results going in the boundRow array + SQLRETURN returnCode; + + while(SQL_SUCCEEDED(returnCode = SQLFetch(data->hSTMT))) { + + ColumnData *row = new ColumnData[data->columnCount]; + + // Iterate over each column, putting the data in the row object + for (int i = 0; i < data->columnCount; i++) { + + row[i].size = data->columns[i]->StrLen_or_IndPtr; + if (row[i].size == SQL_NULL_DATA) { + row[i].data = NULL; + } else { + row[i].data = new SQLCHAR[row[i].size]; + memcpy(row[i].data, data->boundRow[i], row[i].size); + } + } + + data->storedRows.push_back(row); } - - return scope.Escape(tuple); + + // will return either SQL_ERROR or SQL_NO_DATA + SQLCloseCursor(data->hSTMT); + return returnCode; } -/* - * GetRecordArray - */ +// If we have a parameter with input/output params (e.g. calling a procedure), +// then we need to take the Parameter structures of the QueryData and create +// a Napi::Array from them. +Napi::Array ODBC::ParametersToArray(Napi::Env env, QueryData *data) { + Parameter **parameters = data->parameters; + Napi::Array napiParameters = Napi::Array::New(env); + + for (SQLSMALLINT i = 0; i < data->parameterCount; i++) { + Napi::Value value; + + // check for null data + if (parameters[i]->StrLen_or_IndPtr == SQL_NULL_DATA) { + value = env.Null(); + } else { + + switch(parameters[i]->ParameterType) { + case SQL_REAL: + case SQL_NUMERIC : + case SQL_DECIMAL : + // value = Napi::String::New(env, (const char*)parameters[i]->ParameterValuePtr, parameters[i]->StrLen_or_IndPtr); + value = Napi::Number::New(env, atof((const char*)parameters[i]->ParameterValuePtr)); + break; + // Napi::Number + case SQL_FLOAT : + case SQL_DOUBLE : + // value = Napi::String::New(env, (const char*)parameters[i]->ParameterValuePtr); + value = Napi::Number::New(env, *(double*)parameters[i]->ParameterValuePtr); + break; + case SQL_INTEGER : + case SQL_SMALLINT : + case SQL_BIGINT : + value = Napi::Number::New(env, *(int*)parameters[i]->ParameterValuePtr); + break; + // Napi::ArrayBuffer + case SQL_BINARY : + case SQL_VARBINARY : + case SQL_LONGVARBINARY : + value = Napi::ArrayBuffer::New(env, parameters[i]->ParameterValuePtr, parameters[i]->StrLen_or_IndPtr); + break; + // Napi::String (char16_t) + case SQL_WCHAR : + case SQL_WVARCHAR : + case SQL_WLONGVARCHAR : + value = Napi::String::New(env, (const char16_t*)parameters[i]->ParameterValuePtr, parameters[i]->StrLen_or_IndPtr/sizeof(SQLWCHAR)); + break; + // Napi::String (char) + case SQL_CHAR : + case SQL_VARCHAR : + case SQL_LONGVARCHAR : + default: + value = Napi::String::New(env, (const char*)parameters[i]->ParameterValuePtr); + break; + } + } -Local ODBC::GetRecordArray ( SQLHSTMT hStmt, Column* columns, - short* colCount, uint16_t* buffer, - int bufferLength) { - Nan::EscapableHandleScope scope; - - Local array = Nan::New(); - - for(int i = 0; i < *colCount; i++) { - array->Set( Nan::New(i), - GetColumnValue( hStmt, columns[i], buffer, bufferLength)); + napiParameters.Set(i, value); } - - return scope.Escape(array); + + return napiParameters; } -/* - * GetParametersFromArray - */ +// All of data has been loaded into data->storedRows. Have to take the data +// stored in there and convert it it into JavaScript to be given to the +// Node.js runtime. +Napi::Array ODBC::ProcessDataForNapi(Napi::Env env, QueryData *data) { -Parameter* ODBC::GetParametersFromArray (Local values, int *paramCount) { - DEBUG_PRINTF("ODBC::GetParametersFromArray\n"); - *paramCount = values->Length(); + std::vector *storedRows = &data->storedRows; + Column **columns = data->columns; + SQLSMALLINT columnCount = data->columnCount; - Parameter* params = NULL; - - if (*paramCount > 0) { - params = (Parameter *) malloc(*paramCount * sizeof(Parameter)); + // The rows array is the data structure that is returned from query results. + // This array holds the records that were returned from the query as objects, + // with the column names as the property keys on the object and the table + // values as the property values. + // Additionally, there are four properties that are added directly onto the + // array: + // 'count' : The returned from SQLRowCount, which returns "the + // number of rows affected by an UPDATE, INSERT, or DELETE + // statement." For SELECT statements and other statements + // where data is not available, returns -1. + // 'columns' : An array containing the columns of the result set as + // objects, with two properties: + // 'name' : The name of the column + // 'dataType': The integer representation of the SQL dataType + // for that column. + // 'parameters' : An array containing all the parameter values for the + // query. If calling a statement, then parameter values are + // unchanged from the call. If calling a procedure, in/out + // or out parameters may have their values changed. + // 'return' : For some procedures, a return code is returned and stored + // in this property. + // 'statement' : The SQL statement that was sent to the server. Parameter + // markers are not altered, but parameters passed can be + // determined from the parameters array on this object + Napi::Array rows = Napi::Array::New(env); + + // set the 'statement' property + if (data->sql == NULL) { + rows.Set(Napi::String::New(env, STATEMENT), env.Null()); + } else { + rows.Set(Napi::String::New(env, STATEMENT), Napi::String::New(env, (const char*)data->sql)); } - - for (int i = 0; i < *paramCount; i++) { - Local value = values->Get(i); - - params[i].ColumnSize = 0; - params[i].StrLen_or_IndPtr = SQL_NULL_DATA; - params[i].BufferLength = 0; - params[i].DecimalDigits = 0; - DEBUG_PRINTF("ODBC::GetParametersFromArray - param[%i].length = %lli\n", - i, params[i].StrLen_or_IndPtr); + // set the 'parameters' property + Napi::Array params = ODBC::ParametersToArray(env, data); + rows.Set(Napi::String::New(env, PARAMETERS), ODBC::ParametersToArray(env, data)); - if (value->IsString()) { - Local string = value->ToString(); - - params[i].ValueType = SQL_C_TCHAR; - params[i].ColumnSize = 0; //SQL_SS_LENGTH_UNLIMITED -#ifdef UNICODE - params[i].ParameterType = SQL_WVARCHAR; - params[i].BufferLength = (string->Length() * sizeof(uint16_t)) + sizeof(uint16_t); -#else - params[i].ParameterType = SQL_VARCHAR; - params[i].BufferLength = string->Utf8Length() + 1; -#endif - params[i].ParameterValuePtr = malloc(params[i].BufferLength); - params[i].StrLen_or_IndPtr = SQL_NTS;//params[i].BufferLength; + // set the 'return' property + rows.Set(Napi::String::New(env, RETURN), env.Undefined()); // TODO: This doesn't exist on my DBMS of choice, need to test on MSSQL Server or similar -#ifdef UNICODE - string->Write((uint16_t *) params[i].ParameterValuePtr); -#else - string->WriteUtf8((char *) params[i].ParameterValuePtr); -#endif + // set the 'count' property + rows.Set(Napi::String::New(env, COUNT), Napi::Number::New(env, data->rowCount)); - DEBUG_PRINTF("ODBC::GetParametersFromArray - IsString(): params[%i] c_type=%i type=%i buffer_length=%lli size=%lli length=%lli value=%s\n", - i, params[i].ValueType, params[i].ParameterType, - params[i].BufferLength, params[i].ColumnSize, params[i].StrLen_or_IndPtr, - (char*) params[i].ParameterValuePtr); - } - else if (value->IsNull()) { - params[i].ValueType = SQL_C_DEFAULT; - params[i].ParameterType = SQL_VARCHAR; - params[i].StrLen_or_IndPtr = SQL_NULL_DATA; - - DEBUG_PRINTF("ODBC::GetParametersFromArray - IsNull(): params[%i] c_type=%i type=%i buffer_length=%lli size=%lli length=%lli\n", - i, params[i].ValueType, params[i].ParameterType, - params[i].BufferLength, params[i].ColumnSize, params[i].StrLen_or_IndPtr); - } - else if (value->IsInt32()) { - int64_t *number = new int64_t(value->IntegerValue()); - params[i].ValueType = SQL_C_SBIGINT; - params[i].ParameterType = SQL_BIGINT; - params[i].ParameterValuePtr = number; - params[i].StrLen_or_IndPtr = 0; - - DEBUG_PRINTF("ODBC::GetParametersFromArray - IsInt32(): params[%i] c_type=%i type=%i buffer_length=%lli size=%lli length=%lli value=%lld\n", - i, params[i].ValueType, params[i].ParameterType, - params[i].BufferLength, params[i].ColumnSize, params[i].StrLen_or_IndPtr, - *number); - } - else if (value->IsNumber()) { - double *number = new double(value->NumberValue()); - - params[i].ValueType = SQL_C_DOUBLE; - params[i].ParameterType = SQL_DECIMAL; - params[i].ParameterValuePtr = number; - params[i].BufferLength = sizeof(double); - params[i].StrLen_or_IndPtr = params[i].BufferLength; - params[i].DecimalDigits = 7; - params[i].ColumnSize = sizeof(double); - - DEBUG_PRINTF("ODBC::GetParametersFromArray - IsNumber(): params[%i] c_type=%i type=%i buffer_length=%lli size=%lli length=%lli value=%f\n", - i, params[i].ValueType, params[i].ParameterType, - params[i].BufferLength, params[i].ColumnSize, params[i].StrLen_or_IndPtr, - *number); - } - else if (value->IsBoolean()) { - bool *boolean = new bool(value->BooleanValue()); - params[i].ValueType = SQL_C_BIT; - params[i].ParameterType = SQL_BIT; - params[i].ParameterValuePtr = boolean; - params[i].StrLen_or_IndPtr = 0; - - DEBUG_PRINTF("ODBC::GetParametersFromArray - IsBoolean(): params[%i] c_type=%i type=%i buffer_length=%lli size=%lli length=%lli\n", - i, params[i].ValueType, params[i].ParameterType, - params[i].BufferLength, params[i].ColumnSize, params[i].StrLen_or_IndPtr); + // construct the array for the 'columns' property and then set + Napi::Array napiColumns = Napi::Array::New(env); + + for (SQLSMALLINT h = 0; h < columnCount; h++) { + Napi::Object column = Napi::Object::New(env); + column.Set(Napi::String::New(env, NAME), Napi::String::New(env, (const char*)columns[h]->ColumnName)); + column.Set(Napi::String::New(env, DATA_TYPE), Napi::Number::New(env, columns[h]->DataType)); + napiColumns.Set(h, column); + } + rows.Set(Napi::String::New(env, COLUMNS), napiColumns); + + // iterate over all of the stored rows, + for (size_t i = 0; i < storedRows->size(); i++) { + + Napi::Object row = Napi::Object::New(env); + + ColumnData *storedRow = (*storedRows)[i]; + + // Iterate over each column, putting the data in the row object + for (SQLSMALLINT j = 0; j < columnCount; j++) { + + Napi::Value value; + + // check for null data + if (storedRow[j].size == SQL_NULL_DATA) { + + value = env.Null(); + + } else { + + switch(columns[j]->DataType) { + case SQL_REAL: + case SQL_NUMERIC : + value = Napi::Number::New(env, atof((const char*)storedRow[j].data)); + break; + // Napi::Number + case SQL_DECIMAL : + case SQL_FLOAT : + case SQL_DOUBLE : + value = Napi::Number::New(env, *(double*)storedRow[j].data); + break; + case SQL_INTEGER : + case SQL_SMALLINT : + case SQL_BIGINT : + value = Napi::Number::New(env, *(int*)storedRow[j].data); + break; + // Napi::ArrayBuffer + case SQL_BINARY : + case SQL_VARBINARY : + case SQL_LONGVARBINARY : + value = Napi::ArrayBuffer::New(env, storedRow[j].data, storedRow[j].size); + break; + // Napi::String (char16_t) + case SQL_WCHAR : + case SQL_WVARCHAR : + case SQL_WLONGVARCHAR : + value = Napi::String::New(env, (const char16_t*)storedRow[j].data, storedRow[j].size/sizeof(SQLWCHAR)); + break; + // Napi::String (char) + case SQL_CHAR : + case SQL_VARCHAR : + case SQL_LONGVARCHAR : + default: + value = Napi::String::New(env, (const char*)storedRow[j].data, storedRow[j].size); + break; + } + } + + row.Set(Napi::String::New(env, (const char*)columns[j]->ColumnName), value); + + delete storedRow[j].data; } - } - - return params; + rows.Set(i, row); + } + + storedRows->clear(); + return rows; } +/****************************************************************************** + **************************** BINDING PARAMETERS ****************************** + *****************************************************************************/ + /* - * CallbackSQLError + * GetParametersFromArray + * Array of parameters can hold either/and: + * Value: + * One value to bind, In/Out defaults to SQL_PARAM_INPUT, dataType defaults based on the value + * Arrays: + * between 1 and 3 entries in lenth, with the following signfigance and default values: + * 1. Value (REQUIRED): The value to bind + * 2. In/Out (Optional): Defaults to SQL_PARAM_INPUT + * 3. DataType (Optional): Defaults based on the value + * Objects: + * can hold any of the following properties (but requires at least 'value' property) + * value (Requited): The value to bind + * inOut (Optional): the In/Out type to use, Defaults to SQL_PARAM_INPUT + * dataType (Optional): The data type, defaults based on the value + * + * */ -Handle ODBC::CallbackSQLError (SQLSMALLINT handleType, - SQLHANDLE handle, - Nan::Callback* cb) { - Nan::EscapableHandleScope scope; - - return scope.Escape(CallbackSQLError( - handleType, - handle, - (char *) "[node-odbc] SQL_ERROR", - cb)); + +// This function solves the problem of losing access to "Napi::Value"s when entering +// an AsyncWorker. In Connection::Query, once we enter an AsyncWorker we do not leave it again, +// but we can't call SQLNumParams and SQLDescribeParam until after SQLPrepare. So the array of +// values to bind to parameters must be saved off in the closest, largest data type to then +// convert to the right C Type once the SQL Type of the parameter is known. +void ODBC::StoreBindValues(Napi::Array *values, Parameter **parameters) { + + uint32_t numParameters = values->Length(); + + for (uint32_t i = 0; i < numParameters; i++) { + + Napi::Value value = values->Get(i); + Parameter *parameter = parameters[i]; + + if(value.IsNull()) { + parameter->ValueType = SQL_C_DEFAULT; + parameter->ParameterValuePtr = NULL; + parameter->StrLen_or_IndPtr = SQL_NULL_DATA; + } else if (value.IsNumber()) { + double double_val = value.As().DoubleValue(); + int64_t int_val = value.As().Int64Value(); + if ((int64_t)double_val == int_val) { + parameter->ValueType = SQL_C_SBIGINT; + parameter->ParameterValuePtr = new int64_t(value.As().Int64Value()); + } else { + parameter->ValueType = SQL_C_DOUBLE; + parameter->ParameterValuePtr = new double(value.As().DoubleValue()); + } + } else if (value.IsBoolean()) { + parameter->ValueType = SQL_C_BIT; + parameter->ParameterValuePtr = new bool(value.As().Value()); + } else if (value.IsString()) { + Napi::String string = value.ToString(); + parameter->ValueType = SQL_C_TCHAR; + parameter->BufferLength = (string.Utf8Value().length() + 1) * sizeof(SQLTCHAR); + parameter->ParameterValuePtr = new SQLTCHAR[parameter->BufferLength]; + memcpy((SQLTCHAR*) parameter->ParameterValuePtr, string.Utf8Value().c_str(), parameter->BufferLength); + parameter->StrLen_or_IndPtr = SQL_NTS; + } else { + // TODO: Throw error, don't support other types + } + } } -Local ODBC::CallbackSQLError (SQLSMALLINT handleType, - SQLHANDLE handle, - char* message, - Nan::Callback* cb) { - Nan::EscapableHandleScope scope; - - Local objError = ODBC::GetSQLError( - handleType, - handle, - message - ); - - Local info[1]; - info[0] = objError; - cb->Call(1, info); - - return scope.Escape(Nan::Undefined()); +SQLRETURN ODBC::DescribeParameters(SQLHSTMT hSTMT, Parameter **parameters, SQLSMALLINT parameterCount) { + + SQLRETURN returnCode = SQL_SUCCESS; // if no parameters, will return SQL_SUCCESS + + for (SQLSMALLINT i = 0; i < parameterCount; i++) { + + Parameter *parameter = parameters[i]; + + // "Except in calls to procedures, all parameters in SQL statements are input parameters." + parameter->InputOutputType = SQL_PARAM_INPUT; + + returnCode = SQLDescribeParam( + hSTMT, // StatementHandle, + i + 1, // ParameterNumber, + ¶meter->ParameterType, // DataTypePtr, + ¶meter->ColumnSize, // ParameterSizePtr, + ¶meter->DecimalDigits, // DecimalDigitsPtr, + NULL // NullablePtr // Don't care for this package, send NULLs and get error + ); + + // if there is an error, return early and retrieve error in calling function + if (!SQL_SUCCEEDED(returnCode)) { + return returnCode; + } + } + + return returnCode; } +SQLRETURN ODBC::BindParameters(SQLHSTMT hSTMT, Parameter **parameters, SQLSMALLINT parameterCount) { + + SQLRETURN sqlReturnCode = SQL_SUCCESS; // if no parameters, will return SQL_SUCCESS + + for (int i = 0; i < parameterCount; i++) { + + Parameter* parameter = parameters[i]; + + sqlReturnCode = SQLBindParameter( + hSTMT, // StatementHandle + i + 1, // ParameterNumber + parameter->InputOutputType, // InputOutputType + parameter->ValueType, // ValueType + parameter->ParameterType, // ParameterType + parameter->ColumnSize, // ColumnSize + parameter->DecimalDigits, // DecimalDigits + parameter->ParameterValuePtr, // ParameterValuePtr + parameter->BufferLength, // BufferLength + ¶meter->StrLen_or_IndPtr // StrLen_or_IndPtr + ); + + // If there was an error, return early + if (!SQL_SUCCEEDED(sqlReturnCode)) { + return sqlReturnCode; + } + } + + // If returns success, know that SQLBindParameter returned SUCCESS or + // SUCCESS_WITH_INFO for all calls to SQLBindParameter. + return sqlReturnCode; +} + +/****************************************************************************** + ********************************** ERRORS ************************************ + *****************************************************************************/ + /* * GetSQLError */ -Local ODBC::GetSQLError (SQLSMALLINT handleType, SQLHANDLE handle) { - Nan::EscapableHandleScope scope; - - return scope.Escape(GetSQLError( - handleType, - handle, - (char *) "[node-odbc] SQL_ERROR")); +std::string ODBC::GetSQLError(SQLSMALLINT handleType, SQLHANDLE handle) { + + std::string error = GetSQLError(handleType, handle, "[node-odbc] SQL_ERROR"); + return error; } -Local ODBC::GetSQLError (SQLSMALLINT handleType, SQLHANDLE handle, char* message) { - Nan::EscapableHandleScope scope; - +std::string ODBC::GetSQLError(SQLSMALLINT handleType, SQLHANDLE handle, const char* message) { + DEBUG_PRINTF("ODBC::GetSQLError : handleType=%i, handle=%p\n", handleType, handle); + + std::string errorMessageStr; - Local objError = Nan::New(); + // Napi::Object objError = Napi::Object::New(env); int32_t i = 0; SQLINTEGER native; @@ -816,22 +1003,29 @@ Local ODBC::GetSQLError (SQLSMALLINT handleType, SQLHANDLE handle, char* char errorSQLState[14]; char errorMessage[ERROR_MESSAGE_BUFFER_BYTES]; - ret = SQLGetDiagField( + ret = SQLGetDiagField ( handleType, handle, 0, SQL_DIAG_NUMBER, &statusRecCount, SQL_IS_INTEGER, - &len); + &len + ); // Windows seems to define SQLINTEGER as long int, unixodbc as just int... %i should cover both DEBUG_PRINTF("ODBC::GetSQLError : called SQLGetDiagField; ret=%i, statusRecCount=%i\n", ret, statusRecCount); - Local errors = Nan::New(); - objError->Set(Nan::New("errors").ToLocalChecked(), errors); + // Napi::Array errors = Napi::Array::New(env); + + if (statusRecCount > 1) { + // objError.Set(Napi::String::New(env, "errors"), errors); + } + + errorMessageStr += "\"errors\": ["; - for (i = 0; i < statusRecCount; i++){ + for (i = 0; i < statusRecCount; i++) { + DEBUG_PRINTF("ODBC::GetSQLError : calling SQLGetDiagRec; i=%i, statusRecCount=%i\n", i, statusRecCount); ret = SQLGetDiagRec( @@ -848,137 +1042,66 @@ Local ODBC::GetSQLError (SQLSMALLINT handleType, SQLHANDLE handle, char* if (SQL_SUCCEEDED(ret)) { DEBUG_PRINTF("ODBC::GetSQLError : errorMessage=%s, errorSQLState=%s\n", errorMessage, errorSQLState); + - if (i == 0) { - // First error is assumed the primary error - objError->Set(Nan::New("error").ToLocalChecked(), Nan::New(message).ToLocalChecked()); -#ifdef UNICODE - Nan::SetPrototype(objError, Exception::Error(Nan::New((uint16_t *) errorMessage).ToLocalChecked())); - objError->Set(Nan::New("message").ToLocalChecked(), Nan::New((uint16_t *)errorMessage).ToLocalChecked()); - objError->Set(Nan::New("state").ToLocalChecked(), Nan::New((uint16_t *)errorSQLState).ToLocalChecked()); -#else - Nan::SetPrototype(objError, Exception::Error(Nan::New(errorMessage).ToLocalChecked())); - objError->Set(Nan::New("message").ToLocalChecked(), Nan::New(errorMessage).ToLocalChecked()); - objError->Set(Nan::New("state").ToLocalChecked(), Nan::New(errorSQLState).ToLocalChecked()); -#endif + if (i != 0) { + errorMessageStr += ','; } - Local subError = Nan::New(); - #ifdef UNICODE - subError->Set(Nan::New("message").ToLocalChecked(), Nan::New((uint16_t *)errorMessage).ToLocalChecked()); - subError->Set(Nan::New("state").ToLocalChecked(), Nan::New((uint16_t *)errorSQLState).ToLocalChecked()); +// TODO: + std::string error = message; + std::string message = errorMessage; + std::string SQLstate = errorSQLState; #else - subError->Set(Nan::New("message").ToLocalChecked(), Nan::New(errorMessage).ToLocalChecked()); - subError->Set(Nan::New("state").ToLocalChecked(), Nan::New(errorSQLState).ToLocalChecked()); + std::string error = message; + std::string message = errorMessage; + std::string SQLstate = errorSQLState; #endif - errors->Set(Nan::New(i), subError); + errorMessageStr += "{ \"error\": \"" + error + "\", \"message\": \"" + errorMessage + "\", \"SQLState\": \"" + SQLstate + "\"}"; } else if (ret == SQL_NO_DATA) { break; } } - if (statusRecCount == 0) { - //Create a default error object if there were no diag records - objError->Set(Nan::New("error").ToLocalChecked(), Nan::New(message).ToLocalChecked()); - Nan::SetPrototype(objError, Exception::Error(Nan::New(message).ToLocalChecked())); - objError->Set(Nan::New("message").ToLocalChecked(), Nan::New( - (const char *) "[node-odbc] An error occurred but no diagnostic information was available.").ToLocalChecked()); - } + // if (statusRecCount == 0) { + // //Create a default error object if there were no diag records + // objError.Set(Napi::String::New(env, "error"), Napi::String::New(env, message)); + // //objError.SetPrototype(Napi::Error(Napi::String::New(env, message))); + // objError.Set(Napi::String::New(env, "message"), Napi::String::New(env, + // (const char *) "[node-odbc] An error occurred but no diagnostic information was available.")); + // } + + errorMessageStr += "]"; - return scope.Escape(objError); + return errorMessageStr; } -/* - * GetAllRecordsSync - */ +Napi::Object InitAll(Napi::Env env, Napi::Object exports) { -Local ODBC::GetAllRecordsSync (HENV hENV, - HDBC hDBC, - HSTMT hSTMT, - uint16_t* buffer, - int bufferLength) { - DEBUG_PRINTF("ODBC::GetAllRecordsSync\n"); - - Nan::EscapableHandleScope scope; - - Local objError = Nan::New(); - - int count = 0; - int errorCount = 0; - short colCount = 0; - - Column* columns = GetColumns(hSTMT, &colCount); - - Local rows = Nan::New(); - - //loop through all records - while (true) { - SQLRETURN ret = SQLFetch(hSTMT); - - //check to see if there was an error - if (ret == SQL_ERROR) { - //TODO: what do we do when we actually get an error here... - //should we throw?? - - errorCount++; - - objError = ODBC::GetSQLError( - SQL_HANDLE_STMT, - hSTMT, - (char *) "[node-odbc] Error in ODBC::GetAllRecordsSync" - ); - - break; - } - - //check to see if we are at the end of the recordset - if (ret == SQL_NO_DATA) { - ODBC::FreeColumns(columns, &colCount); - - break; - } + ODBC::Init(env, exports); + ODBCConnection::Init(env, exports); + ODBCStatement::Init(env, exports); - rows->Set( - Nan::New(count), - ODBC::GetRecordTuple( - hSTMT, - columns, - &colCount, - buffer, - bufferLength) - ); + #ifdef dynodbc + exports.Set(Napi::String::New(env, "loadODBCLibrary"), + Napi::Function::New(env, ODBC::LoadODBCLibrary);()); + #endif - count++; - } - //TODO: what do we do about errors!?! - //we throw them - return scope.Escape(rows); + return exports; } #ifdef dynodbc -NAN_METHOD(ODBC::LoadODBCLibrary) { - Nan::HandleScope scope; +Napi::Value ODBC::LoadODBCLibrary(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); REQ_STR_ARG(0, js_library); bool result = DynLoadODBC(*js_library); - info.GetReturnValue().Set((result) ? Nan::True() : Nan::False()); + return (result) ? env.True() : env.False();W } #endif -extern "C" void init(v8::Handle exports) { -#ifdef dynodbc - exports->Set(Nan::New("loadODBCLibrary").ToLocalChecked(), - Nan::New(ODBC::LoadODBCLibrary)->GetFunction()); -#endif - - ODBC::Init(exports); - ODBCResult::Init(exports); - ODBCConnection::Init(exports); - ODBCStatement::Init(exports); -} - -NODE_MODULE(odbc_bindings, init) +NODE_API_MODULE(odbc_bindings, InitAll) diff --git a/src/odbc.h b/src/odbc.h old mode 100644 new mode 100755 index 550fd45..48f4090 --- a/src/odbc.h +++ b/src/odbc.h @@ -18,9 +18,8 @@ #ifndef _SRC_ODBC_H #define _SRC_ODBC_H -#include -#include -#include +#include +#include #include #include @@ -33,9 +32,6 @@ #include #endif -using namespace v8; -using namespace node; - #define MAX_FIELD_SIZE 1024 #define MAX_VALUE_SIZE 1048576 @@ -47,120 +43,189 @@ using namespace node; #define ERROR_MESSAGE_BUFFER_CHARS 2048 #endif -#define MODE_COLLECT_AND_CALLBACK 1 -#define MODE_CALLBACK_FOR_EACH 2 #define FETCH_ARRAY 3 #define FETCH_OBJECT 4 #define SQL_DESTROY 9999 - -typedef struct { - unsigned char *name; - unsigned int len; - SQLLEN type; - SQLUSMALLINT index; +// object keys for the result object +static const std::string NAME = "name"; +static const std::string DATA_TYPE = "dataType"; +static const std::string STATEMENT = "statement"; +static const std::string PARAMETERS = "parameters"; +static const std::string RETURN = "return"; +static const std::string COUNT = "count"; +static const std::string COLUMNS = "columns"; + +typedef struct Column { + SQLUSMALLINT index; + SQLTCHAR *ColumnName = NULL; + SQLSMALLINT BufferLength; + SQLSMALLINT NameLength; + SQLSMALLINT DataType; + SQLULEN ColumnSize; + SQLSMALLINT DecimalDigits; + SQLLEN StrLen_or_IndPtr; + SQLSMALLINT Nullable; } Column; -typedef struct { - SQLSMALLINT ValueType; - SQLSMALLINT ParameterType; - SQLLEN ColumnSize; - SQLSMALLINT DecimalDigits; - void *ParameterValuePtr; - SQLLEN BufferLength; - SQLLEN StrLen_or_IndPtr; +// Amalgamation of the information returned by SQLDescribeParam/SQLProcedureColumns and the +// information needed by SQLBindParameter +typedef struct Parameter { + SQLSMALLINT InputOutputType; // not returned by SQLDescribeParam, but is by SQLProcedureColumns + SQLSMALLINT ValueType; + SQLSMALLINT ParameterType; + SQLULEN ColumnSize; + SQLSMALLINT DecimalDigits; + SQLPOINTER ParameterValuePtr; + SQLLEN BufferLength; + SQLLEN StrLen_or_IndPtr; + SQLSMALLINT Nullable; } Parameter; -class ODBC : public Nan::ObjectWrap { +typedef struct ColumnData { + SQLTCHAR *data; + SQLLEN size; + + ~ColumnData() { + delete this->data; + } + +} ColumnData; + +// QueryData +typedef struct QueryData { + + SQLHSTMT hSTMT; + + // parameters + SQLSMALLINT parameterCount = 0; // returned by SQLNumParams + SQLSMALLINT bindValueCount = 0; // number of values passed from JavaScript + Parameter** parameters = NULL; + + // columns and rows + Column **columns = NULL; + SQLSMALLINT columnCount; + SQLTCHAR **boundRow = NULL; + std::vector storedRows; + SQLLEN rowCount; + + // query options + SQLTCHAR *sql = NULL; + SQLTCHAR *catalog = NULL; + SQLTCHAR *schema = NULL; + SQLTCHAR *table = NULL; + SQLTCHAR *type = NULL; + SQLTCHAR *column = NULL; + SQLTCHAR *procedure = NULL; + + SQLRETURN sqlReturnCode; + + ~QueryData() { + this->clear(); + } + + void deleteColumns() { + if (this->columnCount > 0) { + for (int i = 0; i < this->columnCount; i++) { + delete this->columns[i]->ColumnName; + delete this->columns[i]; + } + } + + storedRows.clear(); + + delete columns; columns = NULL; + delete boundRow; boundRow = NULL; + delete sql; sql = NULL; + } + + void clear() { + if (this->bindValueCount > 0 || this->parameterCount > 0) { + + Parameter* parameter; + + for (int i = 0; i < this->bindValueCount; i++) { + if (parameter = this->parameters[i], parameter->ParameterValuePtr != NULL) { + switch (parameter->ValueType) { + case SQL_C_SBIGINT: + delete (int64_t*)parameter->ParameterValuePtr; + break; + case SQL_C_DOUBLE: + delete (double*)parameter->ParameterValuePtr; + break; + case SQL_C_BIT: + delete (bool*)parameter->ParameterValuePtr; + break; + case SQL_C_TCHAR: + default: + delete (SQLTCHAR*)parameter->ParameterValuePtr; + break; + } + } + parameter->ParameterValuePtr = NULL; + } + + delete this->parameters; this->parameters = NULL; + this->bindValueCount = 0; + this->parameterCount = 0; + } + + if (this->columnCount > 0) { + for (int i = 0; i < this->columnCount; i++) { + delete this->columns[i]->ColumnName; + delete this->columns[i]; + } + } + + delete columns; columns = NULL; + delete boundRow; boundRow = NULL; + + delete this->sql; this->sql = NULL; + delete this->catalog; this->catalog = NULL; + delete this->schema; this->schema = NULL; + delete this->table; this->table = NULL; + delete this->type; this->type = NULL; + delete this->column; this->column = NULL; + } + +} QueryData; + +class ODBC { + public: - static Nan::Persistent constructor; static uv_mutex_t g_odbcMutex; - - static void Init(v8::Handle exports); - static Column* GetColumns(SQLHSTMT hStmt, short* colCount); - static void FreeColumns(Column* columns, short* colCount); - static Handle GetColumnValue(SQLHSTMT hStmt, Column column, uint16_t* buffer, int bufferLength); - static Local GetRecordTuple (SQLHSTMT hStmt, Column* columns, short* colCount, uint16_t* buffer, int bufferLength); - static Local GetRecordArray (SQLHSTMT hStmt, Column* columns, short* colCount, uint16_t* buffer, int bufferLength); - static Handle CallbackSQLError(SQLSMALLINT handleType, SQLHANDLE handle, Nan::Callback* cb); - static Local CallbackSQLError (SQLSMALLINT handleType, SQLHANDLE handle, char* message, Nan::Callback* cb); - static Local GetSQLError (SQLSMALLINT handleType, SQLHANDLE handle); - static Local GetSQLError (SQLSMALLINT handleType, SQLHANDLE handle, char* message); - static Local GetAllRecordsSync (HENV hENV, HDBC hDBC, HSTMT hSTMT, uint16_t* buffer, int bufferLength); -#ifdef dynodbc - static NAN_METHOD(LoadODBCLibrary); -#endif - static Parameter* GetParametersFromArray (Local values, int* paramCount); - - void Free(); - - protected: - ODBC() {} + static SQLHENV hEnv; - ~ODBC(); + static Napi::Value Init(Napi::Env env, Napi::Object exports); - public: - static NAN_METHOD(New); + static std::string GetSQLError(SQLSMALLINT handleType, SQLHANDLE handle); + static std::string GetSQLError(SQLSMALLINT handleType, SQLHANDLE handle, const char* message); - //async methods - static NAN_METHOD(CreateConnection); - protected: - static void UV_CreateConnection(uv_work_t* work_req); - static void UV_AfterCreateConnection(uv_work_t* work_req, int status); - - static void WatcherCallback(uv_async_t* w, int revents); - - //sync methods - public: - static NAN_METHOD(CreateConnectionSync); - protected: - - ODBC *self(void) { return this; } + static SQLTCHAR* NapiStringToSQLTCHAR(Napi::String string); - HENV m_hEnv; -}; + static SQLRETURN RetrieveResultSet(QueryData *data); + static SQLRETURN BindColumns(QueryData *data); + static SQLRETURN FetchAll(QueryData *data); -struct create_connection_work_data { - Nan::Callback* cb; - ODBC *dbo; - HDBC hDBC; - int result; -}; + static void StoreBindValues(Napi::Array *values, Parameter **parameters); + static SQLRETURN DescribeParameters(SQLHSTMT hSTMT, Parameter **parameters, SQLSMALLINT parameterCount); + static SQLRETURN BindParameters(SQLHSTMT hSTMT, Parameter **parameters, SQLSMALLINT parameterCount); + static Napi::Array ParametersToArray(Napi::Env env, QueryData *data); -struct open_request { - Nan::Persistent cb; - ODBC *dbo; - int result; - char connection[1]; -}; + static Napi::Array ProcessDataForNapi(Napi::Env env, QueryData *data); -struct close_request { - Nan::Persistent cb; - ODBC *dbo; - int result; -}; + void Free(); -struct query_request { - Nan::Persistent cb; - ODBC *dbo; - HSTMT hSTMT; - int affectedRows; - char *sql; - char *catalog; - char *schema; - char *table; - char *type; - char *column; - Parameter *params; - int paramCount; - int result; -}; + ~ODBC(); + + static Napi::Value Connect(const Napi::CallbackInfo& info); + static Napi::Value ConnectSync(const Napi::CallbackInfo& info); + static Napi::Value ConnectMany(const Napi::CallbackInfo& info); -#ifdef UNICODE -#define SQL_T(x) (L##x) -#else -#define SQL_T(x) (x) -#endif + #ifdef dynodbc + static Napi::Value LoadODBCLibrary(const Napi::CallbackInfo& info); + #endif +}; #ifdef DEBUG #define DEBUG_TPRINTF(...) fprintf(stdout, __VA_ARGS__) @@ -170,78 +235,13 @@ struct query_request { #define DEBUG_TPRINTF(...) (void)0 #endif -#define REQ_ARGS(N) \ - if (info.Length() < (N)) \ - return Nan::ThrowTypeError("Expected " #N "arguments"); - -//Require String Argument; Save String as Utf8 -#define REQ_STR_ARG(I, VAR) \ - if (info.Length() <= (I) || !info[I]->IsString()) \ - return Nan::ThrowTypeError("Argument " #I " must be a string"); \ - String::Utf8Value VAR(info[I]->ToString()); - -//Require String Argument; Save String as Wide String (UCS2) -#define REQ_WSTR_ARG(I, VAR) \ - if (info.Length() <= (I) || !info[I]->IsString()) \ - return Nan::ThrowTypeError("Argument " #I " must be a string"); \ - String::Value VAR(info[I]->ToString()); - -//Require String Argument; Save String as Object -#define REQ_STRO_ARG(I, VAR) \ - if (info.Length() <= (I) || !info[I]->IsString()) \ - return Nan::ThrowTypeError("Argument " #I " must be a string"); \ - Local VAR(info[I]->ToString()); - -//Require String or Null Argument; Save String as Utf8 -#define REQ_STR_OR_NULL_ARG(I, VAR) \ - if ( info.Length() <= (I) || (!info[I]->IsString() && !info[I]->IsNull()) ) \ - return Nan::ThrowTypeError("Argument " #I " must be a string or null"); \ - String::Utf8Value VAR(info[I]->ToString()); - -//Require String or Null Argument; Save String as Wide String (UCS2) -#define REQ_WSTR_OR_NULL_ARG(I, VAR) \ - if ( info.Length() <= (I) || (!info[I]->IsString() && !info[I]->IsNull()) ) \ - return Nan::ThrowTypeError("Argument " #I " must be a string or null"); \ - String::Value VAR(info[I]->ToString()); - -//Require String or Null Argument; save String as String Object -#define REQ_STRO_OR_NULL_ARG(I, VAR) \ - if ( info.Length() <= (I) || (!info[I]->IsString() && !info[I]->IsNull()) ) { \ - Nan::ThrowTypeError("Argument " #I " must be a string or null"); \ - return; \ - } \ - Local VAR(info[I]->ToString()); - -#define REQ_FUN_ARG(I, VAR) \ - if (info.Length() <= (I) || !info[I]->IsFunction()) \ - return Nan::ThrowTypeError("Argument " #I " must be a function"); \ - Local VAR = Local::Cast(info[I]); - -#define REQ_BOOL_ARG(I, VAR) \ - if (info.Length() <= (I) || !info[I]->IsBoolean()) \ - return Nan::ThrowTypeError("Argument " #I " must be a boolean"); \ - Local VAR = (info[I]->ToBoolean()); - -#define REQ_EXT_ARG(I, VAR) \ - if (info.Length() <= (I) || !info[I]->IsExternal()) \ - return Nan::ThrowTypeError("Argument " #I " invalid"); \ - Local VAR = Local::Cast(info[I]); - -#define OPT_INT_ARG(I, VAR, DEFAULT) \ - int VAR; \ - if (info.Length() <= (I)) { \ - VAR = (DEFAULT); \ - } else if (info[I]->IsInt32()) { \ - VAR = info[I]->Int32Value(); \ - } else { \ - return Nan::ThrowTypeError("Argument " #I " must be an integer"); \ - } - +#define ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(returnCode, handletype, handle, context, sqlFunction) ({\ + if(!SQL_SUCCEEDED(returnCode)) {\ + char errorString[255];\ + sprintf(errorString, "[Node.js::odbc] %s: Error in ODBC function %s", context, sqlFunction);\ + SetError(ODBC::GetSQLError(handletype, handle, errorString));\ + return;\ + }\ +}) -// From node v10 NODE_DEFINE_CONSTANT -#define NODE_ODBC_DEFINE_CONSTANT(constructor_template, constant) \ - (constructor_template)->Set(Nan::New(#constant).ToLocalChecked(), \ - Nan::New(constant), \ - static_cast(v8::ReadOnly|v8::DontDelete)) - -#endif +#endif \ No newline at end of file diff --git a/src/odbc_connection.cpp b/src/odbc_connection.cpp old mode 100644 new mode 100755 index 3e748ee..7cb29c5 --- a/src/odbc_connection.cpp +++ b/src/odbc_connection.cpp @@ -15,1699 +15,1390 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include -#include -#include -#include -#include -#include - #include "odbc.h" #include "odbc_connection.h" -#include "odbc_result.h" #include "odbc_statement.h" -using namespace v8; -using namespace node; +Napi::FunctionReference ODBCConnection::constructor; -Nan::Persistent ODBCConnection::constructor; -Nan::Persistent ODBCConnection::OPTION_SQL; -Nan::Persistent ODBCConnection::OPTION_PARAMS; -Nan::Persistent ODBCConnection::OPTION_NORESULTS; +Napi::Object ODBCConnection::Init(Napi::Env env, Napi::Object exports) { -void ODBCConnection::Init(v8::Handle exports) { DEBUG_PRINTF("ODBCConnection::Init\n"); - Nan::HandleScope scope; - OPTION_SQL.Reset(Nan::New("sql").ToLocalChecked()); - OPTION_PARAMS.Reset(Nan::New("params").ToLocalChecked()); - OPTION_NORESULTS.Reset(Nan::New("noResults").ToLocalChecked()); + Napi::HandleScope scope(env); - Local constructor_template = Nan::New(New); + Napi::Function constructorFunction = DefineClass(env, "ODBCConnection", { - // Constructor Template - constructor_template->SetClassName(Nan::New("ODBCConnection").ToLocalChecked()); + InstanceMethod("close", &ODBCConnection::Close), + InstanceMethod("createStatement", &ODBCConnection::CreateStatement), + InstanceMethod("query", &ODBCConnection::Query), + InstanceMethod("beginTransaction", &ODBCConnection::BeginTransaction), + InstanceMethod("commit", &ODBCConnection::Commit), + InstanceMethod("rollback", &ODBCConnection::Rollback), + InstanceMethod("callProcedure", &ODBCConnection::CallProcedure), + InstanceMethod("getUsername", &ODBCConnection::GetUsername), + InstanceMethod("tables", &ODBCConnection::Tables), + InstanceMethod("columns", &ODBCConnection::Columns), - // Reserve space for one Handle - Local instance_template = constructor_template->InstanceTemplate(); - instance_template->SetInternalFieldCount(1); - - // Properties - //Nan::SetAccessor(instance_template, Nan::New("mode").ToLocalChecked(), ModeGetter, ModeSetter); - Nan::SetAccessor(instance_template, Nan::New("connected").ToLocalChecked(), ConnectedGetter); - Nan::SetAccessor(instance_template, Nan::New("connectTimeout").ToLocalChecked(), ConnectTimeoutGetter, ConnectTimeoutSetter); - Nan::SetAccessor(instance_template, Nan::New("loginTimeout").ToLocalChecked(), LoginTimeoutGetter, LoginTimeoutSetter); - - // Prototype Methods - Nan::SetPrototypeMethod(constructor_template, "open", Open); - Nan::SetPrototypeMethod(constructor_template, "openSync", OpenSync); - Nan::SetPrototypeMethod(constructor_template, "close", Close); - Nan::SetPrototypeMethod(constructor_template, "closeSync", CloseSync); - Nan::SetPrototypeMethod(constructor_template, "createStatement", CreateStatement); - Nan::SetPrototypeMethod(constructor_template, "createStatementSync", CreateStatementSync); - Nan::SetPrototypeMethod(constructor_template, "query", Query); - Nan::SetPrototypeMethod(constructor_template, "querySync", QuerySync); - - Nan::SetPrototypeMethod(constructor_template, "beginTransaction", BeginTransaction); - Nan::SetPrototypeMethod(constructor_template, "beginTransactionSync", BeginTransactionSync); - Nan::SetPrototypeMethod(constructor_template, "endTransaction", EndTransaction); - Nan::SetPrototypeMethod(constructor_template, "endTransactionSync", EndTransactionSync); + InstanceAccessor("connected", &ODBCConnection::ConnectedGetter, nullptr), + InstanceAccessor("autocommit", &ODBCConnection::AutocommitGetter, nullptr), + InstanceAccessor("connectionTimeout", &ODBCConnection::ConnectTimeoutGetter, &ODBCConnection::ConnectTimeoutSetter), + InstanceAccessor("loginTimeout", &ODBCConnection::LoginTimeoutGetter, &ODBCConnection::LoginTimeoutSetter) - Nan::SetPrototypeMethod(constructor_template, "getInfoSync", GetInfoSync); + }); - Nan::SetPrototypeMethod(constructor_template, "columns", Columns); - Nan::SetPrototypeMethod(constructor_template, "tables", Tables); - - // Attach the Database Constructor to the target object - constructor.Reset(constructor_template->GetFunction()); - exports->Set( Nan::New("ODBCConnection").ToLocalChecked(), constructor_template->GetFunction()); -} + constructor = Napi::Persistent(constructorFunction); + constructor.SuppressDestruct(); -ODBCConnection::~ODBCConnection() { - DEBUG_PRINTF("ODBCConnection::~ODBCConnection\n"); - this->Free(); + return exports; } -void ODBCConnection::Free() { - DEBUG_PRINTF("ODBCConnection::Free\n"); - if (m_hDBC) { - uv_mutex_lock(&ODBC::g_odbcMutex); - - if (m_hDBC) { - SQLDisconnect(m_hDBC); - SQLFreeHandle(SQL_HANDLE_DBC, m_hDBC); - m_hDBC = NULL; - } - - uv_mutex_unlock(&ODBC::g_odbcMutex); - } -} +Napi::Value ODBCConnection::AutocommitGetter(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); -/* - * New - */ + SQLINTEGER autocommit; -NAN_METHOD(ODBCConnection::New) { - DEBUG_PRINTF("ODBCConnection::New\n"); - Nan::HandleScope scope; - - REQ_EXT_ARG(0, js_henv); - REQ_EXT_ARG(1, js_hdbc); - - HENV hENV = static_cast(js_henv->Value()); - HDBC hDBC = static_cast(js_hdbc->Value()); - - ODBCConnection* conn = new ODBCConnection(hENV, hDBC); - - conn->Wrap(info.Holder()); - - //set default connectTimeout to 0 seconds - conn->connectTimeout = 0; - //set default loginTimeout to 5 seconds - conn->loginTimeout = 5; + SQLGetConnectAttr( + this->hDBC, // ConnectionHandle + SQL_ATTR_AUTOCOMMIT, // Attribute + &autocommit, // ValuePtr + 0, // BufferLength + NULL // StringLengthPtr + ); + + if (autocommit == SQL_AUTOCOMMIT_OFF) { + return Napi::Boolean::New(env, false); + } else if (autocommit == SQL_AUTOCOMMIT_ON) { + return Napi::Boolean::New(env, true); + } - info.GetReturnValue().Set(info.Holder()); + return Napi::Boolean::New(env, false); } -NAN_GETTER(ODBCConnection::ConnectedGetter) { - Nan::HandleScope scope; +ODBCConnection::ODBCConnection(const Napi::CallbackInfo& info) : Napi::ObjectWrap(info) { - ODBCConnection *obj = Nan::ObjectWrap::Unwrap(info.Holder()); + this->hENV = *(info[0].As>().Data()); + this->hDBC = *(info[1].As>().Data()); - info.GetReturnValue().Set(obj->connected ? Nan::True() : Nan::False()); + this->connectionTimeout = 0; + this->loginTimeout = 5; } -NAN_GETTER(ODBCConnection::ConnectTimeoutGetter) { - Nan::HandleScope scope; +ODBCConnection::~ODBCConnection() { - ODBCConnection *obj = Nan::ObjectWrap::Unwrap(info.Holder()); + DEBUG_PRINTF("ODBCConnection::~ODBCConnection\n"); - info.GetReturnValue().Set(Nan::New(obj->connectTimeout)); + this->Free(); } -NAN_SETTER(ODBCConnection::ConnectTimeoutSetter) { - Nan::HandleScope scope; +SQLRETURN ODBCConnection::Free() { - ODBCConnection *obj = Nan::ObjectWrap::Unwrap(info.Holder()); - - if (value->IsNumber()) { - obj->connectTimeout = value->Uint32Value(); - } -} + SQLRETURN returnCode = SQL_SUCCESS; -NAN_GETTER(ODBCConnection::LoginTimeoutGetter) { - Nan::HandleScope scope; + DEBUG_PRINTF("ODBCConnection::Free\n"); - ODBCConnection *obj = Nan::ObjectWrap::Unwrap(info.Holder()); + uv_mutex_lock(&ODBC::g_odbcMutex); - info.GetReturnValue().Set(Nan::New(obj->loginTimeout)); -} + if (this->hDBC) { + returnCode = SQLDisconnect(this->hDBC); + if (!SQL_SUCCEEDED(returnCode)) { + printf("\nSQLDisconnect"); + uv_mutex_unlock(&ODBC::g_odbcMutex); + return returnCode; + } -NAN_SETTER(ODBCConnection::LoginTimeoutSetter) { - Nan::HandleScope scope; + returnCode = SQLFreeHandle(SQL_HANDLE_DBC, this->hDBC); + if (!SQL_SUCCEEDED(returnCode)) { + printf("\nSQLFreeHandle"); + uv_mutex_unlock(&ODBC::g_odbcMutex); + return returnCode; + } - ODBCConnection *obj = Nan::ObjectWrap::Unwrap(info.Holder()); - - if (value->IsNumber()) { - obj->loginTimeout = value->Uint32Value(); - } + hDBC = NULL; + } + + uv_mutex_unlock(&ODBC::g_odbcMutex); + return returnCode; } -/* - * Open - * - */ - -//Handle ODBCConnection::Open(const Arguments& info) { -NAN_METHOD(ODBCConnection::Open) { - DEBUG_PRINTF("ODBCConnection::Open\n"); - Nan::HandleScope scope; +Napi::Value ODBCConnection::ConnectedGetter(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - REQ_STRO_ARG(0, connection); - REQ_FUN_ARG(1, cb); + SQLINTEGER connection; - //get reference to the connection object - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - //create a uv work request - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - //allocate our worker data - open_connection_work_data* data = (open_connection_work_data *) - calloc(1, sizeof(open_connection_work_data)); - - //copy the connection string to the work data -#ifdef UNICODE - data->connectionLength = connection->Length() + 1; - data->connection = (uint16_t *) malloc(sizeof(uint16_t) * data->connectionLength); - connection->Write((uint16_t*) data->connection); -#else - data->connectionLength = connection->Utf8Length() + 1; - data->connection = (char *) malloc(sizeof(char) * data->connectionLength); - connection->WriteUtf8((char*) data->connection); -#endif - - data->cb = new Nan::Callback(cb); - data->conn = conn; - - work_req->data = data; - - //queue the work - uv_queue_work(uv_default_loop(), - work_req, - UV_Open, - (uv_after_work_cb)UV_AfterOpen); + SQLGetConnectAttr( + this->hDBC, // ConnectionHandle + SQL_ATTR_CONNECTION_DEAD, // Attribute + &connection, // ValuePtr + 0, // BufferLength + NULL // StringLengthPtr + ); - conn->Ref(); + if (connection == SQL_CD_TRUE) { + return Napi::Boolean::New(env, false); + } else if (connection == SQL_CD_FALSE) { + return Napi::Boolean::New(env, true); + } - info.GetReturnValue().Set(info.Holder()); + return Napi::Boolean::New(env, false); } -void ODBCConnection::UV_Open(uv_work_t* req) { - DEBUG_PRINTF("ODBCConnection::UV_Open\n"); - open_connection_work_data* data = (open_connection_work_data *)(req->data); - - ODBCConnection* self = data->conn->self(); - - DEBUG_PRINTF("ODBCConnection::UV_Open : connectTimeout=%i, loginTimeout = %i\n", *&(self->connectTimeout), *&(self->loginTimeout)); - - uv_mutex_lock(&ODBC::g_odbcMutex); - - if (self->connectTimeout > 0) { - //NOTE: SQLSetConnectAttr requires the thread to be locked - SQLSetConnectAttr( - self->m_hDBC, //ConnectionHandle - SQL_ATTR_CONNECTION_TIMEOUT, //Attribute - (SQLPOINTER) size_t(self->connectTimeout), //ValuePtr - SQL_IS_UINTEGER); //StringLength - } - - if (self->loginTimeout > 0) { - //NOTE: SQLSetConnectAttr requires the thread to be locked - SQLSetConnectAttr( - self->m_hDBC, //ConnectionHandle - SQL_ATTR_LOGIN_TIMEOUT, //Attribute - (SQLPOINTER) size_t(self->loginTimeout), //ValuePtr - SQL_IS_UINTEGER); //StringLength - } - - //Attempt to connect - //NOTE: SQLDriverConnect requires the thread to be locked - int ret = SQLDriverConnect( - self->m_hDBC, //ConnectionHandle - NULL, //WindowHandle - (SQLTCHAR*) data->connection, //InConnectionString - data->connectionLength, //StringLength1 - NULL, //OutConnectionString - 0, //BufferLength - in characters - NULL, //StringLength2Ptr - SQL_DRIVER_NOPROMPT); //DriverCompletion +Napi::Value ODBCConnection::ConnectTimeoutGetter(const Napi::CallbackInfo& info) { - if (SQL_SUCCEEDED(ret)) { - HSTMT hStmt; - - //allocate a temporary statment - ret = SQLAllocHandle(SQL_HANDLE_STMT, self->m_hDBC, &hStmt); - - //try to determine if the driver can handle - //multiple recordsets - ret = SQLGetFunctions( - self->m_hDBC, - SQL_API_SQLMORERESULTS, - &(self->canHaveMoreResults)); - - if (!SQL_SUCCEEDED(ret)) { - self->canHaveMoreResults = 0; - } - - //free the handle - ret = SQLFreeHandle( SQL_HANDLE_STMT, hStmt); - } + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - uv_mutex_unlock(&ODBC::g_odbcMutex); - - data->result = ret; + return Napi::Number::New(env, this->connectionTimeout); } -void ODBCConnection::UV_AfterOpen(uv_work_t* req, int status) { - DEBUG_PRINTF("ODBCConnection::UV_AfterOpen\n"); - Nan::HandleScope scope; - - open_connection_work_data* data = (open_connection_work_data *)(req->data); - - Local argv[1]; - - bool err = false; +void ODBCConnection::ConnectTimeoutSetter(const Napi::CallbackInfo& info, const Napi::Value& value) { - if (data->result) { - err = true; + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - Local objError = ODBC::GetSQLError(SQL_HANDLE_DBC, data->conn->self()->m_hDBC); - - argv[0] = objError; + if (value.IsNumber()) { + this->connectionTimeout = value.As().Uint32Value(); } +} - if (!err) { - data->conn->self()->connected = true; - } +Napi::Value ODBCConnection::LoginTimeoutGetter(const Napi::CallbackInfo& info) { - Nan::TryCatch try_catch; + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - data->conn->Unref(); - data->cb->Call(data->conn->handle(), err ? 1 : 0, argv); + return Napi::Number::New(env, this->loginTimeout); +} - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); - } +void ODBCConnection::LoginTimeoutSetter(const Napi::CallbackInfo& info, const Napi::Value& value) { - delete data->cb; - - free(data->connection); - free(data); - free(req); + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + + if (value.IsNumber()) { + this->loginTimeout = value.As().Uint32Value(); + } } -/* - * OpenSync - */ +/****************************************************************************** + ********************************** CLOSE ************************************* + *****************************************************************************/ -NAN_METHOD(ODBCConnection::OpenSync) { - DEBUG_PRINTF("ODBCConnection::OpenSync\n"); - Nan::HandleScope scope; +// CloseAsyncWorker, used by Close function (see below) +class CloseAsyncWorker : public Napi::AsyncWorker { - REQ_STRO_ARG(0, connection); + public: + CloseAsyncWorker(ODBCConnection *odbcConnectionObject, Napi::Function& callback) : Napi::AsyncWorker(callback), + odbcConnectionObject(odbcConnectionObject) {} - //get reference to the connection object - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - DEBUG_PRINTF("ODBCConnection::OpenSync : connectTimeout=%i, loginTimeout = %i\n", *&(conn->connectTimeout), *&(conn->loginTimeout)); + ~CloseAsyncWorker() {} - Local objError; - SQLRETURN ret; - bool err = false; - -#ifdef UNICODE - int connectionLength = connection->Length() + 1; - uint16_t* connectionString = (uint16_t *) malloc(connectionLength * sizeof(uint16_t)); - connection->Write(connectionString); -#else - int connectionLength = connection->Utf8Length() + 1; - char* connectionString = (char *) malloc(connectionLength); - connection->WriteUtf8(connectionString); -#endif - - uv_mutex_lock(&ODBC::g_odbcMutex); - - if (conn->connectTimeout > 0) { - //NOTE: SQLSetConnectAttr requires the thread to be locked - SQLSetConnectAttr( - conn->m_hDBC, //ConnectionHandle - SQL_ATTR_CONNECTION_TIMEOUT, //Attribute - (SQLPOINTER) size_t(conn->connectTimeout), //ValuePtr - SQL_IS_UINTEGER); //StringLength - } + private: + ODBCConnection *odbcConnectionObject; + SQLRETURN sqlReturnCode; - if (conn->loginTimeout > 0) { - //NOTE: SQLSetConnectAttr requires the thread to be locked - SQLSetConnectAttr( - conn->m_hDBC, //ConnectionHandle - SQL_ATTR_LOGIN_TIMEOUT, //Attribute - (SQLPOINTER) size_t(conn->loginTimeout), //ValuePtr - SQL_IS_UINTEGER); //StringLength - } - - //Attempt to connect - //NOTE: SQLDriverConnect requires the thread to be locked - ret = SQLDriverConnect( - conn->m_hDBC, //ConnectionHandle - NULL, //WindowHandle - (SQLTCHAR*) connectionString, //InConnectionString - connectionLength, //StringLength1 - NULL, //OutConnectionString - 0, //BufferLength - in characters - NULL, //StringLength2Ptr - SQL_DRIVER_NOPROMPT); //DriverCompletion - - if (!SQL_SUCCEEDED(ret)) { - err = true; - - objError = ODBC::GetSQLError(SQL_HANDLE_DBC, conn->self()->m_hDBC); - } - else { - HSTMT hStmt; - - //allocate a temporary statment - ret = SQLAllocHandle(SQL_HANDLE_STMT, conn->m_hDBC, &hStmt); - - //try to determine if the driver can handle - //multiple recordsets - ret = SQLGetFunctions( - conn->m_hDBC, - SQL_API_SQLMORERESULTS, - &(conn->canHaveMoreResults)); - - if (!SQL_SUCCEEDED(ret)) { - conn->canHaveMoreResults = 0; + void Execute() { + + DEBUG_PRINTF("ODBCConnection::CloseAsyncWorker::Execute\n"); + + // When closing, make sure any transactions are closed as well. Because we don't know whether + // we should commit or rollback, so we default to rollback. + if (odbcConnectionObject->hDBC != NULL) { + sqlReturnCode = SQLEndTran( + SQL_HANDLE_DBC, // HandleType + odbcConnectionObject->hDBC, // Handle + SQL_ROLLBACK // CompletionType + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(sqlReturnCode, SQL_HANDLE_DBC, odbcConnectionObject->hDBC, "CloseAsyncWorker::Execute", "SQLEndTran"); + + sqlReturnCode = odbcConnectionObject->Free(); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(sqlReturnCode, SQL_HANDLE_DBC, odbcConnectionObject->hDBC, "CloseAsyncWorker::Execute", "Free()"); + } } - - //free the handle - ret = SQLFreeHandle( SQL_HANDLE_STMT, hStmt); - - conn->self()->connected = true; - } - uv_mutex_unlock(&ODBC::g_odbcMutex); + void OnOK() { - free(connectionString); - - if (err) { - return Nan::ThrowError(objError); - } - else { - info.GetReturnValue().Set(Nan::True()); - } -} + DEBUG_PRINTF("ODBCConnection::CloseAsyncWorker::OnOK\n"); + + Napi::Env env = Env(); + Napi::HandleScope scope(env); + + std::vector callbackArguments; + callbackArguments.push_back(env.Null()); + + Callback().Call(callbackArguments); + } +}; /* - * Close + * ODBCConnection::Close (Async) + * + * Description: Closes the connection asynchronously. + * + * Parameters: * + * const Napi::CallbackInfo& info: + * The information passed by Napi from the JavaScript call, including + * arguments from the JavaScript function. + * + * info[0]: Function: callback function, in the following format: + * function(error) + * error: An error object if the connection was not closed, or + * null if operation was successful. + * + * Return: + * Napi::Value: + * Undefined. (The return values are attached to the callback function). */ +Napi::Value ODBCConnection::Close(const Napi::CallbackInfo& info) { -NAN_METHOD(ODBCConnection::Close) { DEBUG_PRINTF("ODBCConnection::Close\n"); - Nan::HandleScope scope; - - REQ_FUN_ARG(0, cb); - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - close_connection_work_data* data = (close_connection_work_data *) - (calloc(1, sizeof(close_connection_work_data))); + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - data->cb = new Nan::Callback(cb); - data->conn = conn; + if (!info[0].IsFunction()) { + + } - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_Close, - (uv_after_work_cb)UV_AfterClose); + Napi::Function callback = info[0].As(); - conn->Ref(); + CloseAsyncWorker *worker = new CloseAsyncWorker(this, callback); + worker->Queue(); - info.GetReturnValue().Set(Nan::Undefined()); + return env.Undefined(); } -void ODBCConnection::UV_Close(uv_work_t* req) { - DEBUG_PRINTF("ODBCConnection::UV_Close\n"); - close_connection_work_data* data = (close_connection_work_data *)(req->data); - ODBCConnection* conn = data->conn; - - //TODO: check to see if there are any open statements - //on this connection - - conn->Free(); - - data->result = 0; -} +/****************************************************************************** + ***************************** CREATE STATEMENT ******************************* + *****************************************************************************/ + +// CreateStatementAsyncWorker, used by CreateStatement function (see below) +class CreateStatementAsyncWorker : public Napi::AsyncWorker { + + private: + ODBCConnection *odbcConnectionObject; + SQLRETURN sqlReturnCode; + HSTMT hSTMT; + + void Execute() { + + DEBUG_PRINTF("ODBCConnection::CreateStatementAsyncWorker:Execute - hDBC=%X hDBC=%X\n", + odbcConnectionObject->hENV, + odbcConnectionObject->hDBC, + ); + + uv_mutex_lock(&ODBC::g_odbcMutex); + sqlReturnCode = SQLAllocHandle( + SQL_HANDLE_STMT, // HandleType + odbcConnectionObject->hDBC, // InputHandle + &hSTMT // OutputHandlePtr + ); + uv_mutex_unlock(&ODBC::g_odbcMutex); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(sqlReturnCode, SQL_HANDLE_DBC, odbcConnectionObject->hDBC, "CreateStatementAsyncWorker::Execute", "SQLAllocHandle"); + } -void ODBCConnection::UV_AfterClose(uv_work_t* req, int status) { - DEBUG_PRINTF("ODBCConnection::UV_AfterClose\n"); - Nan::HandleScope scope; + void OnOK() { - close_connection_work_data* data = (close_connection_work_data *)(req->data); + DEBUG_PRINTF("ODBCConnection::CreateStatementAsyncWorker::OnOK - hDBC=%X hDBC=%X hSTMT=%X\n", + odbcConnectionObject->hENV, + odbcConnectionObject->hDBC, + hSTMT + ); - ODBCConnection* conn = data->conn; - - Local argv[1]; - bool err = false; - - if (data->result) { - err = true; - argv[0] = Exception::Error(Nan::New("Error closing database").ToLocalChecked()); - } - else { - conn->connected = false; - } + Napi::Env env = Env(); + Napi::HandleScope scope(env); - Nan::TryCatch try_catch; + // arguments for the ODBCStatement constructor + std::vector statementArguments; + statementArguments.push_back(Napi::External::New(env, &(odbcConnectionObject->hENV))); + statementArguments.push_back(Napi::External::New(env, &(odbcConnectionObject->hDBC))); + statementArguments.push_back(Napi::External::New(env, &hSTMT)); + + // create a new ODBCStatement object as a Napi::Value + Napi::Value statementObject = ODBCStatement::constructor.New(statementArguments); - data->conn->Unref(); - data->cb->Call(err ? 1 : 0, argv); + std::vector callbackArguments; + callbackArguments.push_back(env.Null()); // callbackArguments[0] + callbackArguments.push_back(statementObject); // callbackArguments[1] - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); - } + Callback().Call(callbackArguments); + } - delete data->cb; + public: + CreateStatementAsyncWorker(ODBCConnection *odbcConnectionObject, Napi::Function& callback) : Napi::AsyncWorker(callback), + odbcConnectionObject(odbcConnectionObject) {} - free(data); - free(req); -} + ~CreateStatementAsyncWorker() {} +}; /* - * CloseSync + * ODBCConnection::CreateStatement + * + * Description: Create an ODBCStatement to manually prepare, bind, and + * execute. + * + * Parameters: + * const Napi::CallbackInfo& info: + * The information passed from the JavaSript environment, including the + * function arguments for 'endTransactionSync'. + * + * info[0]: Function: callback function: + * function(error, statement) + * error: An error object if there was an error creating the + * statement, or null if operation was successful. + * statement: The newly created ODBCStatement object + * + * Return: + * Napi::Value: + * Undefined (results returned in callback) */ +Napi::Value ODBCConnection::CreateStatement(const Napi::CallbackInfo& info) { -NAN_METHOD(ODBCConnection::CloseSync) { - DEBUG_PRINTF("ODBCConnection::CloseSync\n"); - Nan::HandleScope scope; - - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - //TODO: check to see if there are any open statements - //on this connection - - conn->Free(); - - conn->connected = false; - - info.GetReturnValue().Set(Nan::True()); -} + DEBUG_PRINTF("ODBCConnection::CreateStatement\n"); -/* - * CreateStatementSync - * - */ + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); -NAN_METHOD(ODBCConnection::CreateStatementSync) { - DEBUG_PRINTF("ODBCConnection::CreateStatementSync\n"); - Nan::HandleScope scope; + Napi::Function callback = info[0].As(); - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - HSTMT hSTMT; + CreateStatementAsyncWorker *worker = new CreateStatementAsyncWorker(this, callback); + worker->Queue(); - uv_mutex_lock(&ODBC::g_odbcMutex); - - SQLAllocHandle( - SQL_HANDLE_STMT, - conn->m_hDBC, - &hSTMT); - - uv_mutex_unlock(&ODBC::g_odbcMutex); - - Local params[3]; - params[0] = Nan::New(conn->m_hENV); - params[1] = Nan::New(conn->m_hDBC); - params[2] = Nan::New(hSTMT); - - Local js_result(Nan::NewInstance(Nan::New(ODBCStatement::constructor), 3, params).ToLocalChecked()); - - info.GetReturnValue().Set(js_result); + return env.Undefined(); } -/* - * CreateStatement - * - */ +/****************************************************************************** + ********************************** QUERY ************************************* + *****************************************************************************/ -NAN_METHOD(ODBCConnection::CreateStatement) { - DEBUG_PRINTF("ODBCConnection::CreateStatement\n"); - Nan::HandleScope scope; +// QueryAsyncWorker, used by Query function (see below) +class QueryAsyncWorker : public Napi::AsyncWorker { - REQ_FUN_ARG(0, cb); + private: - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - //initialize work request - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - //initialize our data - create_statement_work_data* data = - (create_statement_work_data *) (calloc(1, sizeof(create_statement_work_data))); + ODBCConnection *odbcConnectionObject; + QueryData *data; - data->cb = new Nan::Callback(cb); - data->conn = conn; + void Execute() { - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_CreateStatement, - (uv_after_work_cb)UV_AfterCreateStatement); + DEBUG_PRINTF("\nODBCConnection::QueryAsyncWorke::Execute"); - conn->Ref(); + DEBUG_PRINTF("ODBCConnection::Query : sqlLen=%i, sqlSize=%i, sql=%s\n", + data->sqlLen, data->sqlSize, (char*)data->sql); + + // allocate a new statement handle + uv_mutex_lock(&ODBC::g_odbcMutex); + data->sqlReturnCode = SQLAllocHandle( + SQL_HANDLE_STMT, + odbcConnectionObject->hDBC, + &(data->hSTMT) + ); + uv_mutex_unlock(&ODBC::g_odbcMutex); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "SQLExecute"); + + // querying with parameters, need to prepare, bind, execute + if (data->bindValueCount > 0) { + + // binds all parameters to the query + data->sqlReturnCode = SQLPrepare( + data->hSTMT, + data->sql, + SQL_NTS + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "SQLPrepare"); + + data->sqlReturnCode = SQLNumParams( + data->hSTMT, + &data->parameterCount + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "SQLNumParams"); + + if (data->parameterCount != data->bindValueCount) { + SetError("[node-odbc] The number of parameters in the statement does not equal the number of bind values passed to the function."); + return; + } - info.GetReturnValue().Set(Nan::Undefined()); -} + data->sqlReturnCode = ODBC::DescribeParameters(data->hSTMT, data->parameters, data->parameterCount); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "SQLDescribeParam"); -void ODBCConnection::UV_CreateStatement(uv_work_t* req) { - DEBUG_PRINTF("ODBCConnection::UV_CreateStatement\n"); - - //get our work data - create_statement_work_data* data = (create_statement_work_data *)(req->data); - - DEBUG_PRINTF("ODBCConnection::UV_CreateStatement\n"); - //DEBUG_PRINTF("ODBCConnection::UV_CreateStatement m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", - // data->conn->m_hENV, - // data->conn->m_hDBC, - // data->hSTMT - //); - - uv_mutex_lock(&ODBC::g_odbcMutex); - - //allocate a new statment handle - SQLAllocHandle( SQL_HANDLE_STMT, - data->conn->m_hDBC, - &data->hSTMT); + data->sqlReturnCode = ODBC::BindParameters(data->hSTMT, data->parameters, data->parameterCount); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "SQLBindParameter"); - uv_mutex_unlock(&ODBC::g_odbcMutex); - - DEBUG_PRINTF("ODBCConnection::UV_CreateStatement\n"); - //DEBUG_PRINTF("ODBCConnection::UV_CreateStatement m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", - // data->conn->m_hENV, - // data->conn->m_hDBC, - // data->hSTMT - //); -} + data->sqlReturnCode = SQLExecute(data->hSTMT); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "SQLExecute"); + + } + // querying without parameters, can just execdirect + else { + data->sqlReturnCode = SQLExecDirect( + data->hSTMT, + data->sql, + SQL_NTS + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "SQLExecDirect"); + } -void ODBCConnection::UV_AfterCreateStatement(uv_work_t* req, int status) { - DEBUG_PRINTF("ODBCConnection::UV_AfterCreateStatement\n"); - Nan::HandleScope scope; + data->sqlReturnCode = ODBC::RetrieveResultSet(data); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "ODBC::RetrieveResultSet"); + } - create_statement_work_data* data = (create_statement_work_data *)(req->data); + void OnOK() { - DEBUG_PRINTF("ODBCConnection::UV_AfterCreateStatement\n"); - //DEBUG_PRINTF("ODBCConnection::UV_AfterCreateStatement m_hDBC=%X m_hDBC=%X hSTMT=%X\n", - // data->conn->m_hENV, - // data->conn->m_hDBC, - // data->hSTMT - //); + DEBUG_PRINTF("ODBCConnection::QueryAsyncWorker::OnOk : data->sqlReturnCode=%i\n", data->sqlReturnCode); - Local info[3]; - info[0] = Nan::New(data->conn->m_hENV); - info[1] = Nan::New(data->conn->m_hDBC); - info[2] = Nan::New(data->hSTMT); - - Local js_result = Nan::NewInstance(Nan::New(ODBCStatement::constructor), 3, info).ToLocalChecked(); + Napi::Env env = Env(); + Napi::HandleScope scope(env); - info[0] = Nan::Null(); - info[1] = js_result; + std::vector callbackArguments; - Nan::TryCatch try_catch; + Napi::Array rows = ODBC::ProcessDataForNapi(env, data); - data->cb->Call( 2, info); + callbackArguments.push_back(env.Null()); + callbackArguments.push_back(rows); - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); - } - - data->conn->Unref(); - delete data->cb; + // return results object + Callback().Call(callbackArguments); + } - free(data); - free(req); -} + public: + QueryAsyncWorker(ODBCConnection *odbcConnectionObject, QueryData *data, Napi::Function& callback) : Napi::AsyncWorker(callback), + odbcConnectionObject(odbcConnectionObject), + data(data) {} + + ~QueryAsyncWorker() { + //delete data; + } +}; /* - * Query + * ODBCConnection::Query + * + * Description: Returns the info requested from the connection. + * + * Parameters: + * const Napi::CallbackInfo& info: + * The information passed from the JavaSript environment, including the + * function arguments for 'query'. + * + * info[0]: String: the SQL string to execute + * info[1?]: Array: optional array of parameters to bind to the query + * info[1/2]: Function: callback function: + * function(error, result) + * error: An error object if the connection was not opened, or + * null if operation was successful. + * result: A string containing the info requested. + * + * Return: + * Napi::Value: + * Undefined (results returned in callback) */ +Napi::Value ODBCConnection::Query(const Napi::CallbackInfo& info) { -NAN_METHOD(ODBCConnection::Query) { DEBUG_PRINTF("ODBCConnection::Query\n"); - Nan::HandleScope scope; - - Local cb; - - Local sql; - - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - query_work_data* data = (query_work_data *) calloc(1, sizeof(query_work_data)); - //Check arguments for different variations of calling this function - if (info.Length() == 3) { - //handle Query("sql string", [params], function cb () {}); - - if ( !info[0]->IsString() ) { - return Nan::ThrowTypeError("Argument 0 must be an String."); - } - else if ( !info[1]->IsArray() ) { - return Nan::ThrowTypeError("Argument 1 must be an Array."); - } - else if ( !info[2]->IsFunction() ) { - return Nan::ThrowTypeError("Argument 2 must be a Function."); - } + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - sql = info[0]->ToString(); - - data->params = ODBC::GetParametersFromArray( - Local::Cast(info[1]), - &data->paramCount); - - cb = Local::Cast(info[2]); - } - else if (info.Length() == 2 ) { - //handle either Query("sql", cb) or Query({ settings }, cb) - - if (!info[1]->IsFunction()) { - return Nan::ThrowTypeError("ODBCConnection::Query(): Argument 1 must be a Function."); - } - - cb = Local::Cast(info[1]); - - if (info[0]->IsString()) { - //handle Query("sql", function cb () {}) - - sql = info[0]->ToString(); - - data->paramCount = 0; + QueryData *data = new QueryData(); + std::vector values; + + // check if parameters were passed or not + if (info.Length() == 3 && info[0].IsString() && info[1].IsArray() && info[2].IsFunction()) { + Napi::Array parameterArray = info[1].As(); + data->bindValueCount = (SQLSMALLINT)parameterArray.Length(); + data->parameters = new Parameter*[data->bindValueCount]; + for (SQLSMALLINT i = 0; i < data->bindValueCount; i++) { + data->parameters[i] = new Parameter(); } - else if (info[0]->IsObject()) { - //NOTE: going forward this is the way we should expand options - //rather than adding more arguments to the function signature. - //specify options on an options object. - //handle Query({}, function cb () {}); - - Local obj = info[0]->ToObject(); - - Local optionSqlKey = Nan::New(OPTION_SQL); - if (obj->Has(optionSqlKey) && obj->Get(optionSqlKey)->IsString()) { - sql = obj->Get(optionSqlKey)->ToString(); + ODBC::StoreBindValues(¶meterArray, data->parameters); + } else if ((info.Length() == 2 && info[0].IsString() && info[1].IsFunction()) || (info.Length() == 3 && info[0].IsString() && info[1].IsNull() && info[2].IsFunction())) { + data->bindValueCount = 0; + data->parameters = NULL; + } else { + Napi::TypeError::New(env, "[node-odbc]: Wrong function signature in call to Connection.query({string}, {array}[optional], {function}).").ThrowAsJavaScriptException(); + return env.Null(); + } + + Napi::String sql = info[0].ToString(); + Napi::Function callback = info[info.Length() - 1].As(); + + data->sql = ODBC::NapiStringToSQLTCHAR(sql); + + QueryAsyncWorker *worker; + + worker = new QueryAsyncWorker(this, data, callback); + worker->Queue(); + return env.Undefined(); +} + +/****************************************************************************** + ***************************** CALL PROCEDURE ********************************* + *****************************************************************************/ + +// CallProcedureAsyncWorker, used by CreateProcedure function (see below) +class CallProcedureAsyncWorker : public Napi::AsyncWorker { + + private: + + ODBCConnection *odbcConnectionObject; + QueryData *data; + + void Execute() { + + char *combinedProcedureName = new char[255](); + if (data->catalog != NULL) { + strcat(combinedProcedureName, (const char*)data->catalog); + strcat(combinedProcedureName, "."); } - else { - sql = Nan::New("").ToLocalChecked(); + if (data->schema != NULL) { + strcat(combinedProcedureName, (const char*)data->schema); + strcat(combinedProcedureName, "."); } + strcat(combinedProcedureName, (const char*)data->procedure); + + DEBUG_PRINTF("\nODBCConnection::CallProcedureAsyncWorker::Execute"); - Local optionParamsKey = Nan::New(OPTION_PARAMS); - if (obj->Has(optionParamsKey) && obj->Get(optionParamsKey)->IsArray()) { - data->params = ODBC::GetParametersFromArray( - Local::Cast(obj->Get(optionParamsKey)), - &data->paramCount); - } - else { - data->paramCount = 0; + // allocate a new statement handle + uv_mutex_lock(&ODBC::g_odbcMutex); + data->sqlReturnCode = SQLAllocHandle( + SQL_HANDLE_STMT, // HandleType + odbcConnectionObject->hDBC, // InputHandle + &data->hSTMT // OutputHandlePtr + ); + uv_mutex_unlock(&ODBC::g_odbcMutex); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "CallProcedureAsyncWorker::Execute", "SQLAllocHandle"); + + data->sqlReturnCode = SQLProcedures( + data->hSTMT, // StatementHandle + data->catalog, // CatalogName + SQL_NTS, // NameLengh1 + data->schema, // SchemaName + SQL_NTS, // NameLength2 + data->procedure, // ProcName + SQL_NTS // NameLength3 + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "CallProcedureAsyncWorker::Execute", "SQLProcedures"); + + data->sqlReturnCode = ODBC::RetrieveResultSet(data); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "ODBC::RetrieveResultSet"); + + if (data->storedRows.size() == 0) { + char errorString[255]; + sprintf(errorString, "[Node.js::odbc] CallProcedureAsyncWorker::Execute: Stored procedure %s doesn't exit", combinedProcedureName); + SetError(errorString); + return; } - Local optionNoResultsKey = Nan::New(OPTION_NORESULTS); - if (obj->Has(optionNoResultsKey) && obj->Get(optionNoResultsKey)->IsBoolean()) { - data->noResultObject = obj->Get(optionNoResultsKey)->ToBoolean()->Value(); + data->deleteColumns(); // delete data in columns for next result set + + data->sqlReturnCode = SQLProcedureColumns( + data->hSTMT, // StatementHandle + data->catalog, // CatalogName + SQL_NTS, // NameLengh1 + data->schema, // SchemaName + SQL_NTS, // NameLength2 + data->procedure, // ProcName + SQL_NTS, // NameLength3 + NULL, // ColumnName + SQL_NTS // NameLength4 + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "CallProcedureAsyncWorker::Execute", "SQLProcedureColumns"); + + data->sqlReturnCode = ODBC::RetrieveResultSet(data); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "ODBC::RetrieveResultSet"); + + data->parameterCount = data->storedRows.size(); + if (data->bindValueCount != (SQLSMALLINT)data->storedRows.size()) { + SetError("[Node.js::odbc] The number of parameters in the procedure and the number of passes parameters is not equal."); + return; } - else { - data->noResultObject = false; + + // get stored column parameter data from the result set + for (int i = 0; i < data->parameterCount; i++) { + data->parameters[i]->InputOutputType = *(SQLSMALLINT*)data->storedRows[i][4].data; + data->parameters[i]->ParameterType = *(SQLSMALLINT*)data->storedRows[i][5].data; // DataType -> ParameterType + data->parameters[i]->ColumnSize = *(SQLSMALLINT*)data->storedRows[i][7].data; // ParameterSize -> ColumnSize + data->parameters[i]->DecimalDigits = *(SQLSMALLINT*)data->storedRows[i][9].data; + data->parameters[i]->Nullable = *(SQLSMALLINT*)data->storedRows[i][11].data; + data->parameters[i]->StrLen_or_IndPtr = 0; + + if (data->parameters[i]->InputOutputType == SQL_PARAM_OUTPUT) { + SQLSMALLINT bufferSize = 0; + switch(data->parameters[i]->ParameterType) { + case SQL_DECIMAL : + case SQL_NUMERIC : + bufferSize = (data->parameters[i]->ColumnSize + 1) * sizeof(SQLCHAR); + data->parameters[i]->ValueType = SQL_C_CHAR; + data->parameters[i]->ParameterValuePtr = new SQLCHAR[bufferSize]; + data->parameters[i]->BufferLength = bufferSize; + break; + + case SQL_DOUBLE : + bufferSize = (data->parameters[i]->ColumnSize + data->parameters[i]->ColumnSize); + data->parameters[i]->ValueType = SQL_C_DOUBLE; + data->parameters[i]->ParameterValuePtr = new SQLDOUBLE[bufferSize]; + data->parameters[i]->BufferLength = bufferSize; + break; + + case SQL_INTEGER: + case SQL_SMALLINT: + case SQL_BIGINT: + bufferSize = (data->parameters[i]->ColumnSize + data->parameters[i]->ColumnSize); + data->parameters[i]->ValueType = SQL_C_SBIGINT; + data->parameters[i]->ParameterValuePtr = new SQLBIGINT[bufferSize]; + data->parameters[i]->BufferLength = bufferSize; + break; + + case SQL_BINARY: + case SQL_VARBINARY: + case SQL_LONGVARBINARY: + bufferSize = (data->parameters[i]->ColumnSize + 1) * sizeof(SQLCHAR); + data->parameters[i]->ValueType = SQL_C_CHAR; + data->parameters[i]->ParameterValuePtr = new SQLCHAR[bufferSize]; + data->parameters[i]->BufferLength = bufferSize; + break; + + case SQL_WCHAR: + case SQL_WVARCHAR: + case SQL_WLONGVARCHAR: + bufferSize = (data->parameters[i]->ColumnSize + 1) * sizeof(SQLWCHAR); + data->parameters[i]->ValueType = SQL_C_WCHAR; + data->parameters[i]->ParameterValuePtr = new SQLWCHAR[bufferSize]; + data->parameters[i]->BufferLength = bufferSize; + break; + + case SQL_CHAR: + case SQL_VARCHAR: + case SQL_LONGVARCHAR: + default: + bufferSize = (data->parameters[i]->ColumnSize + 1) * sizeof(SQLCHAR); + data->parameters[i]->ValueType = SQL_C_CHAR; + data->parameters[i]->ParameterValuePtr = new SQLCHAR[bufferSize]; + data->parameters[i]->BufferLength = bufferSize; + break; + } + } } - } - else { - return Nan::ThrowTypeError("ODBCConnection::Query(): Argument 0 must be a String or an Object."); - } - } - else { - return Nan::ThrowTypeError("ODBCConnection::Query(): Requires either 2 or 3 Arguments. "); - } - //Done checking arguments - - data->cb = new Nan::Callback(cb); - -#ifdef UNICODE - data->sqlLen = sql->Length(); - data->sqlSize = (data->sqlLen * sizeof(uint16_t)) + sizeof(uint16_t); - data->sql = (uint16_t *) malloc(data->sqlSize); - sql->Write((uint16_t *) data->sql); -#else - data->sqlLen = sql->Utf8Length(); - data->sqlSize = data->sqlLen + 1; - data->sql = (char *) malloc(data->sqlSize); - sql->WriteUtf8((char *) data->sql); -#endif - - DEBUG_PRINTF("ODBCConnection::Query : sqlLen=%i, sqlSize=%i, sql=%s\n", - data->sqlLen, data->sqlSize, (char*) data->sql); - - data->conn = conn; - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_Query, - (uv_after_work_cb)UV_AfterQuery); - conn->Ref(); + data->sqlReturnCode = ODBC::BindParameters(data->hSTMT, data->parameters, data->parameterCount); + + // // create the statement to call the stored procedure using the ODBC Call escape sequence: + // SQLTCHAR callString[255]; + // need to create the string "?,?,?,?" where the number of '?' is the number of parameters; + SQLTCHAR parameterString[(data->parameterCount * 2) - 1]; + // TODO: Can maybe add this for loop to the one above. + for (int i = 0; i < data->parameterCount; i++) { + if (i == (data->parameterCount - 1)) { + strcat((char *)parameterString, "?"); // for last parameter, don't add ',' + } else { + strcat((char *)parameterString, "?,"); + } + } - info.GetReturnValue().Set(Nan::Undefined()); -} + data->deleteColumns(); // delete data in columns for next result set -void ODBCConnection::UV_Query(uv_work_t* req) { - DEBUG_PRINTF("ODBCConnection::UV_Query\n"); - - query_work_data* data = (query_work_data *)(req->data); - - Parameter prm; - SQLRETURN ret; + data->sql = new SQLTCHAR[255](); + sprintf((char *)data->sql, "{ CALL %s (?) }", combinedProcedureName); + + data->sqlReturnCode = SQLExecDirect( + data->hSTMT, // StatementHandle + data->sql, // StatementText + SQL_NTS // TextLength + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "CallProcedureAsyncWorker::Execute", "SQLExecDirect"); + + data->sqlReturnCode = ODBC::RetrieveResultSet(data); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "ODBC::RetrieveResultSet"); + } + + void OnOK() { + + DEBUG_PRINTF("ODBCConnection::QueryAsyncWorker::OnOk : data->sqlReturnCode=%i\n", data->sqlReturnCode); - uv_mutex_lock(&ODBC::g_odbcMutex); + Napi::Env env = Env(); + Napi::HandleScope scope(env); - //allocate a new statment handle - SQLAllocHandle( SQL_HANDLE_STMT, - data->conn->m_hDBC, - &data->hSTMT ); + std::vector callbackArguments; - uv_mutex_unlock(&ODBC::g_odbcMutex); + Napi::Array rows = ODBC::ProcessDataForNapi(env, data); - // SQLExecDirect will use bound parameters, but without the overhead of SQLPrepare - // for a single execution. - if (data->paramCount) { - for (int i = 0; i < data->paramCount; i++) { - prm = data->params[i]; - - - /*DEBUG_TPRINTF( - SQL_T("ODBCConnection::UV_Query - param[%i]: ValueType=%i type=%i BufferLength=%i size=%i length=%i &length=%X\n"), i, prm.ValueType, prm.ParameterType, - prm.BufferLength, prm.ColumnSize, prm.length, &data->params[i].length);*/ - - ret = SQLBindParameter( - data->hSTMT, //StatementHandle - i + 1, //ParameterNumber - SQL_PARAM_INPUT, //InputOutputType - prm.ValueType, - prm.ParameterType, - prm.ColumnSize, - prm.DecimalDigits, - prm.ParameterValuePtr, - prm.BufferLength, - &data->params[i].StrLen_or_IndPtr); - - if (ret == SQL_ERROR) { - data->result = ret; - return; - } + callbackArguments.push_back(env.Null()); + callbackArguments.push_back(rows); + + // return results object + Callback().Call(callbackArguments); } - } - // execute the query directly - ret = SQLExecDirect( - data->hSTMT, - (SQLTCHAR *)data->sql, - data->sqlLen); + public: + CallProcedureAsyncWorker(ODBCConnection *odbcConnectionObject, QueryData *data, Napi::Function& callback) : Napi::AsyncWorker(callback), + odbcConnectionObject(odbcConnectionObject), + data(data) {} - // this will be checked later in UV_AfterQuery - data->result = ret; -} + ~CallProcedureAsyncWorker() { + delete data; + } +}; -void ODBCConnection::UV_AfterQuery(uv_work_t* req, int status) { - DEBUG_PRINTF("ODBCConnection::UV_AfterQuery\n"); - - Nan::HandleScope scope; - - query_work_data* data = (query_work_data *)(req->data); +/* TODO: Change + * ODBCConnection::CallProcedure + * + * Description: Calls a procedure in the database. + * + * Parameters: + * const Napi::CallbackInfo& info: + * The information passed from the JavaSript environment, including the + * function arguments for 'query'. + * + * info[0]: String: the name of the procedure + * info[1?]: Array: optional array of parameters to bind to the procedure call + * info[1/2]: Function: callback function: + * function(error, result) + * error: An error object if the connection was not opened, or + * null if operation was successful. + * result: A string containing the info requested. + * + * Return: + * Napi::Value: + * Undefined (results returned in callback) + */ +Napi::Value ODBCConnection::CallProcedure(const Napi::CallbackInfo& info) { - Nan::TryCatch try_catch; + DEBUG_PRINTF("\nODBCConnection::CallProcedure"); - DEBUG_PRINTF("ODBCConnection::UV_AfterQuery : data->result=%i, data->noResultObject=%i\n", data->result, data->noResultObject); + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (data->result != SQL_ERROR && data->noResultObject) { - //We have been requested to not create a result object - //this means we should release the handle now and call back - //with Nan::True() - - uv_mutex_lock(&ODBC::g_odbcMutex); - - SQLFreeHandle(SQL_HANDLE_STMT, data->hSTMT); - - uv_mutex_unlock(&ODBC::g_odbcMutex); - - Local info[2]; - info[0] = Nan::Null(); - info[1] = Nan::True(); - - data->cb->Call(2, info); + QueryData *data = new QueryData(); + std::vector values; + + if (info[0].IsString()) { + data->catalog = ODBC::NapiStringToSQLTCHAR(info[0].ToString()); + } else if (!info[0].IsNull()) { + Napi::TypeError::New(env, "callProcedure: first argument must be a string or null").ThrowAsJavaScriptException(); + delete data; + return env.Null(); } - else { - Local info[4]; - bool* canFreeHandle = new bool(true); - - info[0] = Nan::New(data->conn->m_hENV); - info[1] = Nan::New(data->conn->m_hDBC); - info[2] = Nan::New(data->hSTMT); - info[3] = Nan::New(canFreeHandle); - - Local js_result = Nan::NewInstance(Nan::New(ODBCResult::constructor), 4, info).ToLocalChecked(); - // Check now to see if there was an error (as there may be further result sets) - if (data->result == SQL_ERROR) { - info[0] = ODBC::GetSQLError(SQL_HANDLE_STMT, data->hSTMT, (char *) "[node-odbc] SQL_ERROR"); - } else { - info[0] = Nan::Null(); - } - info[1] = js_result; - - data->cb->Call(2, info); + if (info[1].IsString()) { + data->schema = ODBC::NapiStringToSQLTCHAR(info[1].ToString()); + } else if (!info[1].IsNull()) { + Napi::TypeError::New(env, "callProcedure: second argument must be a string or null").ThrowAsJavaScriptException(); + delete data; + return env.Null(); } - - data->conn->Unref(); - - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); + + if (info[2].IsString()) { + data->procedure = ODBC::NapiStringToSQLTCHAR(info[2].ToString()); + } else { + Napi::TypeError::New(env, "callProcedure: third argument must be a string").ThrowAsJavaScriptException(); + delete data; + return env.Null(); } - - delete data->cb; - - if (data->paramCount) { - Parameter prm; - // free parameters - for (int i = 0; i < data->paramCount; i++) { - if (prm = data->params[i], prm.ParameterValuePtr != NULL) { - switch (prm.ValueType) { - case SQL_C_WCHAR: free(prm.ParameterValuePtr); break; - case SQL_C_CHAR: free(prm.ParameterValuePtr); break; - case SQL_C_LONG: delete (int64_t *)prm.ParameterValuePtr; break; - case SQL_C_DOUBLE: delete (double *)prm.ParameterValuePtr; break; - case SQL_C_BIT: delete (bool *)prm.ParameterValuePtr; break; - } - } + + // check if parameters were passed or not + if (info.Length() == 5 && info[3].IsArray() && info[4].IsFunction()) { + Napi::Array parameterArray = info[3].As(); + data->bindValueCount = parameterArray.Length(); + data->parameters = new Parameter*[data->bindValueCount](); + for (SQLSMALLINT i = 0; i < data->bindValueCount; i++) { + data->parameters[i] = new Parameter(); } - - free(data->params); + ODBC::StoreBindValues(¶meterArray, data->parameters); + } else if ((info.Length() == 4 && info[4].IsFunction()) || (info.Length() == 5 && info[3].IsNull() && info[4].IsFunction())) { + data->parameters = 0; + } else { + Napi::TypeError::New(env, "[node-odbc]: Wrong function signature in call to Connection.callProcedure({string}, {array}[optional], {function}).").ThrowAsJavaScriptException(); + return env.Null(); } - - free(data->sql); - free(data->catalog); - free(data->schema); - free(data->table); - free(data->type); - free(data->column); - free(data); - free(req); -} + Napi::Function callback = info[info.Length() - 1].As(); + + CallProcedureAsyncWorker *worker = new CallProcedureAsyncWorker(this, data, callback); + worker->Queue(); + return env.Undefined(); +} /* - * QuerySync + * ODBCConnection::GetUsername + * + * Description: Returns the username requested from the connection. + * + * Parameters: + * const Napi::CallbackInfo& info: + * The information passed from the JavaSript environment, including the + * function arguments for 'getInfo'. + * + * info[0]: Number: option + * info[4]: Function: callback function: + * function(error, result) + * error: An error object if the connection was not opened, or + * null if operation was successful. + * result: A string containing the info requested. + * + * Return: + * Napi::Value: + * Undefined (results returned in callback) */ +Napi::Value ODBCConnection::GetUsername(const Napi::CallbackInfo& info) { -NAN_METHOD(ODBCConnection::QuerySync) { - DEBUG_PRINTF("ODBCConnection::QuerySync\n"); - Nan::HandleScope scope; + DEBUG_PRINTF("ODBCConnection::GetUsername\n"); -#ifdef UNICODE - String::Value* sql; -#else - String::Utf8Value* sql; -#endif + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - Parameter* params = new Parameter[0]; - Parameter prm; - SQLRETURN ret; - HSTMT hSTMT; - int paramCount = 0; - bool noResultObject = false; - - //Check arguments for different variations of calling this function - if (info.Length() == 2) { - //handle QuerySync("sql string", [params]); - - if ( !info[0]->IsString() ) { - return Nan::ThrowTypeError("ODBCConnection::QuerySync(): Argument 0 must be an String."); - } - else if (!info[1]->IsArray()) { - return Nan::ThrowTypeError("ODBCConnection::QuerySync(): Argument 1 must be an Array."); - } + return this->GetInfo(env, SQL_USER_NAME); +} -#ifdef UNICODE - sql = new String::Value(info[0]->ToString()); -#else - sql = new String::Utf8Value(info[0]->ToString()); -#endif +Napi::Value ODBCConnection::GetInfo(const Napi::Env env, const SQLUSMALLINT option) { + + SQLTCHAR infoValue[255]; + SQLSMALLINT infoLength; + SQLRETURN sqlReturnCode = SQLGetInfo( + this->hDBC, // ConnectionHandle + SQL_USER_NAME, // InfoType + infoValue, // InfoValuePtr + sizeof(infoValue), // BufferLength + &infoLength); // StringLengthPtr + + if (SQL_SUCCEEDED(sqlReturnCode)) { + #ifdef UNICODE + return Napi::String::New(env, (const char16_t *)infoValue, infoLength); + #else + return Napi::String::New(env, (const char *) infoValue, infoLength); + #endif + } - params = ODBC::GetParametersFromArray( - Local::Cast(info[1]), - ¶mCount); + Napi::Error(env, Napi::String::New(env, ODBC::GetSQLError(SQL_HANDLE_DBC, this->hDBC, (char *) "[node-odbc] Error in ODBCConnection::GetInfo"))).ThrowAsJavaScriptException(); + return env.Null(); +} - } - else if (info.Length() == 1 ) { - //handle either QuerySync("sql") or QuerySync({ settings }) - - if (info[0]->IsString()) { - //handle Query("sql") -#ifdef UNICODE - sql = new String::Value(info[0]->ToString()); -#else - sql = new String::Utf8Value(info[0]->ToString()); -#endif - - paramCount = 0; - } - else if (info[0]->IsObject()) { - //NOTE: going forward this is the way we should expand options - //rather than adding more arguments to the function signature. - //specify options on an options object. - //handle Query({}, function cb () {}); - - Local obj = info[0]->ToObject(); - - Local optionSqlKey = Nan::New(OPTION_SQL); - if (obj->Has(optionSqlKey) && obj->Get(optionSqlKey)->IsString()) { -#ifdef UNICODE - sql = new String::Value(obj->Get(optionSqlKey)->ToString()); -#else - sql = new String::Utf8Value(obj->Get(optionSqlKey)->ToString()); -#endif - } - else { -#ifdef UNICODE - sql = new String::Value(Nan::New("").ToLocalChecked()); -#else - sql = new String::Utf8Value(Nan::New("").ToLocalChecked()); -#endif - } +/****************************************************************************** + ********************************** TABLES ************************************ + *****************************************************************************/ - Local optionParamsKey = Nan::New(OPTION_PARAMS); - if (obj->Has(optionParamsKey) && obj->Get(optionParamsKey)->IsArray()) { - params = ODBC::GetParametersFromArray( - Local::Cast(obj->Get(optionParamsKey)), - ¶mCount); - } - else { - paramCount = 0; - } +// TablesAsyncWorker, used by Tables function (see below) +class TablesAsyncWorker : public Napi::AsyncWorker { + + private: + + ODBCConnection *odbcConnectionObject; + QueryData *data; + + void Execute() { + + uv_mutex_lock(&ODBC::g_odbcMutex); + data->sqlReturnCode = SQLAllocHandle( + SQL_HANDLE_STMT, // HandleType + odbcConnectionObject->hDBC, // InputHandle + &data->hSTMT // OutputHandlePtr + ); + uv_mutex_unlock(&ODBC::g_odbcMutex); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_DBC, odbcConnectionObject->hDBC, "TablesAsyncWorker::Execute", "SQLAllocHandle"); - Local optionNoResultsKey = Nan::New(OPTION_NORESULTS); - if (obj->Has(optionNoResultsKey) && obj->Get(optionNoResultsKey)->IsBoolean()) { - noResultObject = obj->Get(optionNoResultsKey)->ToBoolean()->Value(); - } + data->sqlReturnCode = SQLTables( + data->hSTMT, // StatementHandle + data->catalog, // CatalogName + SQL_NTS, // NameLength1 + data->schema, // SchemaName + SQL_NTS, // NameLength2 + data->table, // TableName + SQL_NTS, // NameLength3 + data->type, // TableType + SQL_NTS // NameLength4 + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "TablesAsyncWorker::Execute", "SQLTables"); + + data->sqlReturnCode = ODBC::RetrieveResultSet(data); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "ODBC::RetrieveResultSet"); } - else { - return Nan::ThrowTypeError("ODBCConnection::QuerySync(): Argument 0 must be a String or an Object."); - } - } - else { - return Nan::ThrowTypeError("ODBCConnection::QuerySync(): Requires either 1 or 2 Arguments."); - } - //Done checking arguments - uv_mutex_lock(&ODBC::g_odbcMutex); + void OnOK() { + + DEBUG_PRINTF("ODBCConnection::QueryAsyncWorker::OnOk : data->sqlReturnCode=%i, \n", data->sqlReturnCode, ); + + Napi::Env env = Env(); + Napi::HandleScope scope(env); - //allocate a new statment handle - ret = SQLAllocHandle( SQL_HANDLE_STMT, - conn->m_hDBC, - &hSTMT ); + std::vector callbackArguments; - uv_mutex_unlock(&ODBC::g_odbcMutex); + callbackArguments.push_back(env.Null()); - DEBUG_PRINTF("ODBCConnection::QuerySync - hSTMT=%p\n", hSTMT); - - if (SQL_SUCCEEDED(ret)) { - if (paramCount) { - for (int i = 0; i < paramCount; i++) { - prm = params[i]; - - DEBUG_PRINTF( - "ODBCConnection::UV_Query - param[%i]: ValueType=%i type=%i BufferLength=%lli size=%lli length=%lli &length=%lli\n", i, prm.ValueType, prm.ParameterType, - prm.BufferLength, prm.ColumnSize, prm.StrLen_or_IndPtr, params[i].StrLen_or_IndPtr); - - ret = SQLBindParameter( - hSTMT, //StatementHandle - i + 1, //ParameterNumber - SQL_PARAM_INPUT, //InputOutputType - prm.ValueType, - prm.ParameterType, - prm.ColumnSize, - prm.DecimalDigits, - prm.ParameterValuePtr, - prm.BufferLength, - ¶ms[i].StrLen_or_IndPtr); - - if (ret == SQL_ERROR) {break;} - } - } + Napi::Array rows = ODBC::ProcessDataForNapi(env, data); + callbackArguments.push_back(rows); - if (SQL_SUCCEEDED(ret)) { - ret = SQLExecDirect( - hSTMT, - (SQLTCHAR *) **sql, - sql->length()); + Callback().Call(callbackArguments); } - - // free parameters - for (int i = 0; i < paramCount; i++) { - if (prm = params[i], prm.ParameterValuePtr != NULL) { - switch (prm.ValueType) { - case SQL_C_WCHAR: free(prm.ParameterValuePtr); break; - case SQL_C_CHAR: free(prm.ParameterValuePtr); break; - case SQL_C_LONG: delete (int64_t *)prm.ParameterValuePtr; break; - case SQL_C_DOUBLE: delete (double *)prm.ParameterValuePtr; break; - case SQL_C_BIT: delete (bool *)prm.ParameterValuePtr; break; - } - } + + public: + + TablesAsyncWorker(ODBCConnection *odbcConnectionObject, QueryData *data, Napi::Function& callback) : Napi::AsyncWorker(callback), + odbcConnectionObject(odbcConnectionObject), + data(data) {} + + ~TablesAsyncWorker() { + delete data; } - - free(params); +}; + +/* + * ODBCConnection::Tables + * + * Description: Returns the list of table, catalog, or schema names, and + * table types, stored in a specific data source. + * + * Parameters: + * const Napi::CallbackInfo& info: + * The information passed from the JavaSript environment, including the + * function arguments for 'tables'. + * + * info[0]: String: catalog + * info[1]: String: schema + * info[2]: String: table + * info[3]: String: type + * info[4]: Function: callback function: + * function(error, result) + * error: An error object if there was a database issue + * result: The ODBCResult + * + * Return: + * Napi::Value: + * Undefined (results returned in callback) + */ +Napi::Value ODBCConnection::Tables(const Napi::CallbackInfo& info) { + + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + + if (info.Length() != 5) { + Napi::TypeError::New(env, "tables() function takes 5 arguments.").ThrowAsJavaScriptException(); } - - delete sql; - - //check to see if there was an error during execution - if (ret == SQL_ERROR) { - Local objError = ODBC::GetSQLError( - SQL_HANDLE_STMT, - hSTMT, - (char *) "[node-odbc] Error in ODBCConnection::QuerySync" - ); - - Nan::ThrowError(objError); - - return; + + Napi::Function callback; + QueryData* data = new QueryData(); + + // Napi doesn't have LowMemoryNotification like NAN did. Throw standard error. + if (!data) { + Napi::TypeError::New(env, "Could not allocate enough memory to run query.").ThrowAsJavaScriptException(); + delete data; + return env.Null(); } - else if (noResultObject) { - //if there is not result object requested then - //we must destroy the STMT ourselves. - uv_mutex_lock(&ODBC::g_odbcMutex); - - SQLFreeHandle(SQL_HANDLE_STMT, hSTMT); - - uv_mutex_unlock(&ODBC::g_odbcMutex); - - info.GetReturnValue().Set(Nan::True()); + + if (info[0].IsString()) { + data->catalog = ODBC::NapiStringToSQLTCHAR(info[0].ToString()); + } else if (!info[0].IsNull()) { + Napi::TypeError::New(env, "tables: first argument must be a string or null").ThrowAsJavaScriptException(); + delete data; + return env.Null(); } - else { - Local result[4]; - bool* canFreeHandle = new bool(true); - - result[0] = Nan::New(conn->m_hENV); - result[1] = Nan::New(conn->m_hDBC); - result[2] = Nan::New(hSTMT); - result[3] = Nan::New(canFreeHandle); - - Local js_result = Nan::NewInstance(Nan::New(ODBCResult::constructor), 4, result).ToLocalChecked(); - info.GetReturnValue().Set(js_result); + if (info[1].IsString()) { + data->schema = ODBC::NapiStringToSQLTCHAR(info[1].ToString()); + } else if (!info[1].IsNull()) { + Napi::TypeError::New(env, "tables: first argument must be a string or null").ThrowAsJavaScriptException(); + delete data; + return env.Null(); } + + if (info[2].IsString()) { + data->table = ODBC::NapiStringToSQLTCHAR(info[2].ToString()); + } else if (!info[2].IsNull()) { + Napi::TypeError::New(env, "tables: first argument must be a string or null").ThrowAsJavaScriptException(); + delete data; + return env.Null(); + } + + if (info[3].IsString()) { + data->type = ODBC::NapiStringToSQLTCHAR(info[3].ToString()); + } else if (!info[3].IsNull()) { + Napi::TypeError::New(env, "tables: first argument must be a string or null").ThrowAsJavaScriptException(); + delete data; + return env.Null(); + } + + if (info[4].IsFunction()) { callback = info[4].As(); } + else { + Napi::TypeError::New(env, "tables: fifth argument must be a function").ThrowAsJavaScriptException(); + delete data; + return env.Null(); + } + + TablesAsyncWorker *worker = new TablesAsyncWorker(this, data, callback); + worker->Queue(); + + return env.Undefined(); } +/****************************************************************************** + ********************************* COLUMNS ************************************ + *****************************************************************************/ -/* - * GetInfoSync - */ +// ColumnsAsyncWorker, used by Columns function (see below) +class ColumnsAsyncWorker : public Napi::AsyncWorker { -NAN_METHOD(ODBCConnection::GetInfoSync) { - DEBUG_PRINTF("ODBCConnection::GetInfoSync\n"); - Nan::HandleScope scope; + public: - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + ColumnsAsyncWorker(ODBCConnection *odbcConnectionObject, QueryData *data, Napi::Function& callback) : Napi::AsyncWorker(callback), + odbcConnectionObject(odbcConnectionObject), + data(data) {} - if (info.Length() == 1) { - if ( !info[0]->IsNumber() ) { - return Nan::ThrowTypeError("ODBCConnection::GetInfoSync(): Argument 0 must be a Number."); + ~ColumnsAsyncWorker() { + delete data; } - } - else { - return Nan::ThrowTypeError("ODBCConnection::GetInfoSync(): Requires 1 Argument."); - } - SQLUSMALLINT InfoType = info[0]->NumberValue(); + private: - switch (InfoType) { - case SQL_USER_NAME: - SQLRETURN ret; - SQLTCHAR userName[255]; - SQLSMALLINT userNameLength; + ODBCConnection *odbcConnectionObject; + QueryData *data; + + void Execute() { + + uv_mutex_lock(&ODBC::g_odbcMutex); + data->sqlReturnCode = SQLAllocHandle( + SQL_HANDLE_STMT, // HandleType + odbcConnectionObject->hDBC, // InputHandle + &data->hSTMT // OutputHandlePtr + ); + uv_mutex_unlock(&ODBC::g_odbcMutex); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_DBC, odbcConnectionObject->hDBC, "ColumnsAsyncWorker::Execute", "SQLAllocHandle"); + + data->sqlReturnCode = SQLColumns( + data->hSTMT, // StatementHandle + data->catalog, // CatalogName + SQL_NTS, // NameLength1 + data->schema, // SchemaName + SQL_NTS, // NameLength2 + data->table, // TableName + SQL_NTS, // NameLength3 + data->column, // ColumnName + SQL_NTS // NameLength4 + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "ColumnsAsyncWorker::Execute", "SQLColumns"); + + data->sqlReturnCode = ODBC::RetrieveResultSet(data); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "QueryAsyncWorker::Execute", "ODBC::RetrieveResultSet"); + } - ret = SQLGetInfo(conn->m_hDBC, SQL_USER_NAME, userName, sizeof(userName), &userNameLength); + void OnOK() { - if (SQL_SUCCEEDED(ret)) { -#ifdef UNICODE - info.GetReturnValue().Set(Nan::New((uint16_t *)userName).ToLocalChecked()); -#else - info.GetReturnValue().Set(Nan::New((const char *) userName).ToLocalChecked()); -#endif - } - break; + Napi::Env env = Env(); + Napi::HandleScope scope(env); - default: - return Nan::ThrowTypeError("ODBCConnection::GetInfoSync(): The only supported Argument is SQL_USER_NAME."); - } -} + Napi::Array rows = ODBC::ProcessDataForNapi(env, data); + std::vector callbackArguments; + callbackArguments.push_back(env.Null()); + callbackArguments.push_back(rows); + Callback().Call(callbackArguments); + } +}; /* - * Tables + * ODBCConnection::Columns + * + * Description: Returns the list of column names in specified tables. + * + * Parameters: + * const Napi::CallbackInfo& info: + * The information passed from the JavaSript environment, including the + * function arguments for 'columns'. + * + * info[0]: String: catalog + * info[1]: String: schema + * info[2]: String: table + * info[3]: String: column + * info[4]: Function: callback function: + * function(error, result) + * error: An error object if there was a database error + * result: The ODBCResult + * + * Return: + * Napi::Value: + * Undefined (results returned in callback) */ +Napi::Value ODBCConnection::Columns(const Napi::CallbackInfo& info) { -NAN_METHOD(ODBCConnection::Tables) { - Nan::HandleScope scope; + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - REQ_STRO_OR_NULL_ARG(0, catalog); - REQ_STRO_OR_NULL_ARG(1, schema); - REQ_STRO_OR_NULL_ARG(2, table); - REQ_STRO_OR_NULL_ARG(3, type); - Local cb = Local::Cast(info[4]); + QueryData* data = new QueryData(); + Napi::Function callback; - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - query_work_data* data = - (query_work_data *) calloc(1, sizeof(query_work_data)); - + // Napi doesn't have LowMemoryNotification like NAN did. Throw standard error. if (!data) { - Nan::LowMemoryNotification(); - Nan::ThrowError("Could not allocate enough memory"); - return; + Napi::Error::New(env, "Could not allocate enough memory to run query.").ThrowAsJavaScriptException(); + return env.Null(); } - data->sql = NULL; - data->catalog = NULL; - data->schema = NULL; - data->table = NULL; - data->type = NULL; - data->column = NULL; - data->cb = new Nan::Callback(cb); - - if (!catalog->Equals(Nan::New("null").ToLocalChecked())) { -#ifdef UNICODE - data->catalog = (uint16_t *) malloc((catalog->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); - catalog->Write((uint16_t *) data->catalog); -#else - data->catalog = (char *) malloc(catalog->Utf8Length() + 1); - catalog->WriteUtf8((char *) data->catalog); -#endif + if (info[0].IsString()) { + data->catalog = ODBC::NapiStringToSQLTCHAR(info[0].ToString()); + } else if (!info[0].IsNull()) { + Napi::Error::New(env, "columns: first argument must be a string or null").ThrowAsJavaScriptException(); + delete data; + return env.Null(); } - if (!schema->Equals(Nan::New("null").ToLocalChecked())) { -#ifdef UNICODE - data->schema = (uint16_t *) malloc((schema->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); - schema->Write((uint16_t *) data->schema); -#else - data->schema = (char *) malloc(schema->Utf8Length() + 1); - schema->WriteUtf8((char *) data->schema); -#endif + if (info[1].IsString()) { + data->schema = ODBC::NapiStringToSQLTCHAR(info[1].ToString()); + } else if (!info[1].IsNull()) { + Napi::Error::New(env, "columns: second argument must be a string or null").ThrowAsJavaScriptException(); + delete data; + return env.Null(); } - - if (!table->Equals(Nan::New("null").ToLocalChecked())) { -#ifdef UNICODE - data->table = (uint16_t *) malloc((table->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); - table->Write((uint16_t *) data->table); -#else - data->table = (char *) malloc(table->Utf8Length() + 1); - table->WriteUtf8((char *) data->table); -#endif + + if (info[2].IsString()) { + data->table = ODBC::NapiStringToSQLTCHAR(info[2].ToString()); + } else if (!info[2].IsNull()) { + Napi::Error::New(env, "columns: third argument must be a string or null").ThrowAsJavaScriptException(); + delete data; + return env.Null(); } - - if (!type->Equals(Nan::New("null").ToLocalChecked())) { -#ifdef UNICODE - data->type = (uint16_t *) malloc((type->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); - type->Write((uint16_t *) data->type); -#else - data->type = (char *) malloc(type->Utf8Length() + 1); - type->WriteUtf8((char *) data->type); -#endif + + if (info[3].IsString()) { + data->type = ODBC::NapiStringToSQLTCHAR(info[3].ToString()); + } else if (!info[3].IsNull()) { + Napi::Error::New(env, "columns: fourth argument must be a string or null").ThrowAsJavaScriptException(); + delete data; + return env.Null(); } - - data->conn = conn; - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_Tables, - (uv_after_work_cb) UV_AfterQuery); - conn->Ref(); + if (info[4].IsFunction()) { callback = info[4].As(); } + else { + Napi::Error::New(env, "columns: fifth argument must be a function").ThrowAsJavaScriptException(); + delete data; + return env.Null(); + } + + ColumnsAsyncWorker *worker = new ColumnsAsyncWorker(this, data, callback); + worker->Queue(); - info.GetReturnValue().Set(Nan::Undefined()); + return env.Undefined(); } -void ODBCConnection::UV_Tables(uv_work_t* req) { - query_work_data* data = (query_work_data *)(req->data); - - uv_mutex_lock(&ODBC::g_odbcMutex); - - SQLAllocHandle(SQL_HANDLE_STMT, data->conn->m_hDBC, &data->hSTMT ); - - uv_mutex_unlock(&ODBC::g_odbcMutex); - - SQLRETURN ret = SQLTables( - data->hSTMT, - (SQLTCHAR *) data->catalog, SQL_NTS, - (SQLTCHAR *) data->schema, SQL_NTS, - (SQLTCHAR *) data->table, SQL_NTS, - (SQLTCHAR *) data->type, SQL_NTS - ); - - // this will be checked later in UV_AfterQuery - data->result = ret; -} +/****************************************************************************** + **************************** BEGIN TRANSACTION ******************************* + *****************************************************************************/ +// BeginTransactionAsyncWorker, used by EndTransaction function (see below) +class BeginTransactionAsyncWorker : public Napi::AsyncWorker { + public: -/* - * Columns - */ + BeginTransactionAsyncWorker(ODBCConnection *odbcConnectionObject, Napi::Function& callback) : Napi::AsyncWorker(callback), + odbcConnectionObject(odbcConnectionObject) {} -NAN_METHOD(ODBCConnection::Columns) { - Nan::HandleScope scope; + ~BeginTransactionAsyncWorker() {} - REQ_STRO_OR_NULL_ARG(0, catalog); - REQ_STRO_OR_NULL_ARG(1, schema); - REQ_STRO_OR_NULL_ARG(2, table); - REQ_STRO_OR_NULL_ARG(3, column); - - Local cb = Local::Cast(info[4]); - - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - query_work_data* data = (query_work_data *) calloc(1, sizeof(query_work_data)); - - if (!data) { - Nan::LowMemoryNotification(); - Nan::ThrowError("Could not allocate enough memory"); - return; - } + private: - data->sql = NULL; - data->catalog = NULL; - data->schema = NULL; - data->table = NULL; - data->type = NULL; - data->column = NULL; - data->cb = new Nan::Callback(cb); - - if (!catalog->Equals(Nan::New("null").ToLocalChecked())) { -#ifdef UNICODE - data->catalog = (uint16_t *) malloc((catalog->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); - catalog->Write((uint16_t *) data->catalog); -#else - data->catalog = (char *) malloc(catalog->Utf8Length() + 1); - catalog->WriteUtf8((char *) data->catalog); -#endif - } + ODBCConnection *odbcConnectionObject; + SQLRETURN sqlReturnCode; - if (!schema->Equals(Nan::New("null").ToLocalChecked())) { -#ifdef UNICODE - data->schema = (uint16_t *) malloc((schema->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); - schema->Write((uint16_t *) data->schema); -#else - data->schema = (char *) malloc(schema->Utf8Length() + 1); - schema->WriteUtf8((char *) data->schema); -#endif - } - - if (!table->Equals(Nan::New("null").ToLocalChecked())) { -#ifdef UNICODE - data->table = (uint16_t *) malloc((table->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); - table->Write((uint16_t *) data->table); -#else - data->table = (char *) malloc(table->Utf8Length() + 1); - table->WriteUtf8((char *) data->table); -#endif - } - - if (!column->Equals(Nan::New("null").ToLocalChecked())) { -#ifdef UNICODE - data->column = (uint16_t *) malloc((column->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); - column->Write((uint16_t *) data->column); -#else - data->column = (char *) malloc(column->Utf8Length() + 1); - column->WriteUtf8((char *) data->column); -#endif - } - - data->conn = conn; - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_Columns, - (uv_after_work_cb)UV_AfterQuery); - - conn->Ref(); + void Execute() { - info.GetReturnValue().Set(Nan::Undefined()); -} + DEBUG_PRINTF("ODBCConnection::BeginTransactionAsyncWorker::Execute\n"); + + //set the connection manual commits + sqlReturnCode = SQLSetConnectAttr( + odbcConnectionObject->hDBC, // ConnectionHandle + SQL_ATTR_AUTOCOMMIT, // Attribute + (SQLPOINTER) SQL_AUTOCOMMIT_OFF, // ValuePtr + SQL_NTS // StringLength + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(sqlReturnCode, SQL_HANDLE_DBC, odbcConnectionObject->hDBC, "BeginTransactionAsyncWorker::Execute", "SQLSetConnectAttr"); + } -void ODBCConnection::UV_Columns(uv_work_t* req) { - query_work_data* data = (query_work_data *)(req->data); - - uv_mutex_lock(&ODBC::g_odbcMutex); - - SQLAllocHandle(SQL_HANDLE_STMT, data->conn->m_hDBC, &data->hSTMT ); - - uv_mutex_unlock(&ODBC::g_odbcMutex); - - SQLRETURN ret = SQLColumns( - data->hSTMT, - (SQLTCHAR *) data->catalog, SQL_NTS, - (SQLTCHAR *) data->schema, SQL_NTS, - (SQLTCHAR *) data->table, SQL_NTS, - (SQLTCHAR *) data->column, SQL_NTS - ); - - // this will be checked later in UV_AfterQuery - data->result = ret; -} + void OnOK() { -/* - * BeginTransactionSync - * - */ + DEBUG_PRINTF("ODBCConnection::BeginTransactionAsyncWorker::OnOK\n"); -NAN_METHOD(ODBCConnection::BeginTransactionSync) { - DEBUG_PRINTF("ODBCConnection::BeginTransactionSync\n"); - Nan::HandleScope scope; + Napi::Env env = Env(); + Napi::HandleScope scope(env); + + std::vector callbackArguments; - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - SQLRETURN ret; - - //set the connection manual commits - ret = SQLSetConnectAttr( - conn->m_hDBC, - SQL_ATTR_AUTOCOMMIT, - (SQLPOINTER) SQL_AUTOCOMMIT_OFF, - SQL_NTS); - - if (!SQL_SUCCEEDED(ret)) { - Local objError = ODBC::GetSQLError(SQL_HANDLE_DBC, conn->m_hDBC); - - Nan::ThrowError(objError); - - info.GetReturnValue().Set(Nan::False()); - } - - info.GetReturnValue().Set(Nan::True()); -} + callbackArguments.push_back(env.Null()); + + Callback().Call(callbackArguments); + } +}; /* - * BeginTransaction + * ODBCConnection::BeginTransaction (Async) * + * Description: Begin a transaction by turning off SQL_ATTR_AUTOCOMMIT. + * Transaction is commited or rolledback in EndTransaction or + * EndTransactionSync. + * + * Parameters: + * const Napi::CallbackInfo& info: + * The information passed from the JavaSript environment, including the + * function arguments for 'beginTransaction'. + * + * info[0]: Function: callback function: + * function(error) + * error: An error object if the transaction wasn't started, or + * null if operation was successful. + * + * Return: + * Napi::Value: + * Boolean, indicates whether the transaction was successfully started */ +Napi::Value ODBCConnection::BeginTransaction(const Napi::CallbackInfo& info) { -NAN_METHOD(ODBCConnection::BeginTransaction) { DEBUG_PRINTF("ODBCConnection::BeginTransaction\n"); - Nan::HandleScope scope; - REQ_FUN_ARG(0, cb); + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - query_work_data* data = - (query_work_data *) calloc(1, sizeof(query_work_data)); - - if (!data) { - Nan::LowMemoryNotification(); - return Nan::ThrowError("Could not allocate enough memory"); - } + Napi::Function callback; - data->cb = new Nan::Callback(cb); - data->conn = conn; - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_BeginTransaction, - (uv_after_work_cb)UV_AfterBeginTransaction); + if (info[0].IsFunction()) { callback = info[0].As(); } + else { Napi::Error::New(env, "beginTransaction: first argument must be a function").ThrowAsJavaScriptException(); } - return; + BeginTransactionAsyncWorker *worker = new BeginTransactionAsyncWorker(this, callback); + worker->Queue(); + + return env.Undefined(); } -/* - * UV_BeginTransaction - * - */ +/****************************************************************************** + ***************************** END TRANSACTION ******************************** + *****************************************************************************/ -void ODBCConnection::UV_BeginTransaction(uv_work_t* req) { - DEBUG_PRINTF("ODBCConnection::UV_BeginTransaction\n"); - - query_work_data* data = (query_work_data *)(req->data); - - //set the connection manual commits - data->result = SQLSetConnectAttr( - data->conn->self()->m_hDBC, - SQL_ATTR_AUTOCOMMIT, - (SQLPOINTER) SQL_AUTOCOMMIT_OFF, - SQL_NTS); -} + // EndTransactionAsyncWorker, used by Commit and Rollback functions (see below) +class EndTransactionAsyncWorker : public Napi::AsyncWorker { -/* - * UV_AfterBeginTransaction - * - */ + private: -void ODBCConnection::UV_AfterBeginTransaction(uv_work_t* req, int status) { - DEBUG_PRINTF("ODBCConnection::UV_AfterBeginTransaction\n"); - Nan::HandleScope scope; + ODBCConnection *odbcConnectionObject; + SQLSMALLINT completionType; + SQLRETURN sqlReturnCode; - //TODO: Is this supposed to be of type query_work_data? - open_connection_work_data* data = (open_connection_work_data *)(req->data); - - Local argv[1]; - - bool err = false; + void Execute() { - if (!SQL_SUCCEEDED(data->result)) { - err = true; + DEBUG_PRINTF("ODBCConnection::EndTransactionAsyncWorker::Execute\n"); + + sqlReturnCode = SQLEndTran( + SQL_HANDLE_DBC, // HandleType + odbcConnectionObject->hDBC, // Handle + completionType // CompletionType + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(sqlReturnCode, SQL_HANDLE_DBC, odbcConnectionObject->hDBC, "EndTransactionAsyncWorker::Execute", "SQLEndTran"); + + //Reset the connection back to autocommit + sqlReturnCode = SQLSetConnectAttr( + odbcConnectionObject->hDBC, // ConnectionHandle + SQL_ATTR_AUTOCOMMIT, // Attribute + (SQLPOINTER) SQL_AUTOCOMMIT_ON, // ValuePtr + SQL_NTS // StringLength + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(sqlReturnCode, SQL_HANDLE_DBC, odbcConnectionObject->hDBC, "EndTransactionAsyncWorker::Execute", "SQLSetConnectAttr"); + } - Local objError = ODBC::GetSQLError(SQL_HANDLE_DBC, data->conn->self()->m_hDBC); - - argv[0] = objError; - } + void OnOK() { - Nan::TryCatch try_catch; + DEBUG_PRINTF("ODBCConnection::EndTransactionAsyncWorker::OnOK\n"); - data->cb->Call( err ? 1 : 0, argv); + Napi::Env env = Env(); + Napi::HandleScope scope(env); + + std::vector callbackArguments; + + callbackArguments.push_back(env.Null()); - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); - } + Callback().Call(callbackArguments); + } - delete data->cb; - - free(data); - free(req); -} + public: -/* - * EndTransactionSync - * - */ + EndTransactionAsyncWorker(ODBCConnection *odbcConnectionObject, SQLSMALLINT completionType, Napi::Function& callback) : Napi::AsyncWorker(callback), + odbcConnectionObject(odbcConnectionObject), + completionType(completionType) {} -NAN_METHOD(ODBCConnection::EndTransactionSync) { - DEBUG_PRINTF("ODBCConnection::EndTransactionSync\n"); - Nan::HandleScope scope; + ~EndTransactionAsyncWorker() {} +}; - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - REQ_BOOL_ARG(0, rollback); - - Local objError; - SQLRETURN ret; - bool error = false; - SQLSMALLINT completionType = (rollback->Value()) - ? SQL_ROLLBACK - : SQL_COMMIT - ; - - //Call SQLEndTran - ret = SQLEndTran( - SQL_HANDLE_DBC, - conn->m_hDBC, - completionType); - - //check how the transaction went - if (!SQL_SUCCEEDED(ret)) { - error = true; - - objError = ODBC::GetSQLError(SQL_HANDLE_DBC, conn->m_hDBC); - } - - //Reset the connection back to autocommit - ret = SQLSetConnectAttr( - conn->m_hDBC, - SQL_ATTR_AUTOCOMMIT, - (SQLPOINTER) SQL_AUTOCOMMIT_ON, - SQL_NTS); - - //check how setting the connection attr went - //but only process the code if an error has not already - //occurred. If an error occurred during SQLEndTran, - //that is the error that we want to throw. - if (!SQL_SUCCEEDED(ret) && !error) { - //TODO: if this also failed, we really should - //be restarting the connection or something to deal with this state - error = true; - - objError = ODBC::GetSQLError(SQL_HANDLE_DBC, conn->m_hDBC); - } - - if (error) { - Nan::ThrowError(objError); - - info.GetReturnValue().Set(Nan::False()); - } - else { - info.GetReturnValue().Set(Nan::True()); - } -} /* - * EndTransaction + * ODBCConnection::Commit + * + * Description: Commit a transaction by calling SQLEndTran on the connection + * in an AsyncWorker with SQL_COMMIT option. * + * Parameters: + * const Napi::CallbackInfo& info: + * The information passed from the JavaSript environment, including the + * function arguments for 'endTransaction'. + * + * info[0]: Function: callback function: + * function(error) + * error: An error object if the transaction wasn't ended, or + * null if operation was successful. + * + * Return: + * Napi::Value: + * Undefined */ +Napi::Value ODBCConnection::Commit(const Napi::CallbackInfo &info) { -NAN_METHOD(ODBCConnection::EndTransaction) { - DEBUG_PRINTF("ODBCConnection::EndTransaction\n"); - Nan::HandleScope scope; + DEBUG_PRINTF("ODBCConnection::Commit\n"); - REQ_BOOL_ARG(0, rollback); - REQ_FUN_ARG(1, cb); + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - query_work_data* data = - (query_work_data *) calloc(1, sizeof(query_work_data)); - - if (!data) { - Nan::LowMemoryNotification(); - return Nan::ThrowError("Could not allocate enough memory"); + if (!info[0].IsFunction()) { + Napi::TypeError::New(env, "[node-odbc]: commit(callback): first argument must be a function").ThrowAsJavaScriptException(); + return env.Null(); } - - data->completionType = (rollback->Value()) - ? SQL_ROLLBACK - : SQL_COMMIT - ; - data->cb = new Nan::Callback(cb); - data->conn = conn; - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_EndTransaction, - (uv_after_work_cb)UV_AfterEndTransaction); - info.GetReturnValue().Set(Nan::Undefined()); -} + Napi::Function callback = info[0].As(); -/* - * UV_EndTransaction - * - */ + // calls EndTransactionAsyncWorker with SQL_COMMIT option + EndTransactionAsyncWorker *worker = new EndTransactionAsyncWorker(this, SQL_COMMIT, callback); + worker->Queue(); -void ODBCConnection::UV_EndTransaction(uv_work_t* req) { - DEBUG_PRINTF("ODBCConnection::UV_EndTransaction\n"); - - query_work_data* data = (query_work_data *)(req->data); - - bool err = false; - - //Call SQLEndTran - SQLRETURN ret = SQLEndTran( - SQL_HANDLE_DBC, - data->conn->m_hDBC, - data->completionType); - - data->result = ret; - - if (!SQL_SUCCEEDED(ret)) { - err = true; - } - - //Reset the connection back to autocommit - ret = SQLSetConnectAttr( - data->conn->m_hDBC, - SQL_ATTR_AUTOCOMMIT, - (SQLPOINTER) SQL_AUTOCOMMIT_ON, - SQL_NTS); - - if (!SQL_SUCCEEDED(ret) && !err) { - //there was not an earlier error, - //so we shall pass the return code from - //this last call. - data->result = ret; - } + return env.Undefined(); } /* - * UV_AfterEndTransaction + * ODBCConnection::Rollback + * + * Description: Rollback a transaction by calling SQLEndTran on the connection + * in an AsyncWorker with SQL_ROLLBACK option. + * + * Parameters: + * const Napi::CallbackInfo& info: + * The information passed from the JavaSript environment, including the + * function arguments for 'endTransaction'. + * + * info[0]: Function: callback function: + * function(error) + * error: An error object if the transaction wasn't ended, or + * null if operation was successful. * + * Return: + * Napi::Value: + * Undefined */ +Napi::Value ODBCConnection::Rollback(const Napi::CallbackInfo &info) { -void ODBCConnection::UV_AfterEndTransaction(uv_work_t* req, int status) { - DEBUG_PRINTF("ODBCConnection::UV_AfterEndTransaction\n"); - Nan::HandleScope scope; - - open_connection_work_data* data = (open_connection_work_data *)(req->data); - - Local argv[1]; - - bool err = false; + DEBUG_PRINTF("ODBCConnection::Rollback\n"); - if (!SQL_SUCCEEDED(data->result)) { - err = true; + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - Local objError = ODBC::GetSQLError(SQL_HANDLE_DBC, data->conn->self()->m_hDBC); - - argv[0] = objError; + if (!info[0].IsFunction()) { + Napi::TypeError::New(env, "[node-odbc]: rollback(callback): first argument must be a function").ThrowAsJavaScriptException(); + return env.Null(); } - Nan::TryCatch try_catch; + Napi::Function callback = info[0].As(); - data->cb->Call(err ? 1 : 0, argv); + // calls EndTransactionAsyncWorker with SQL_ROLLBACK option + EndTransactionAsyncWorker *worker = new EndTransactionAsyncWorker(this, SQL_ROLLBACK, callback); + worker->Queue(); - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); - } - - delete data->cb; - - free(data); - free(req); + return env.Undefined(); } diff --git a/src/odbc_connection.h b/src/odbc_connection.h old mode 100644 new mode 100755 index 5da3441..9039960 --- a/src/odbc_connection.h +++ b/src/odbc_connection.h @@ -18,157 +18,71 @@ #ifndef _SRC_ODBC_CONNECTION_H #define _SRC_ODBC_CONNECTION_H -#include +#include +// #include + +class ODBCConnection : public Napi::ObjectWrap { + + friend class CloseAsyncWorker; + friend class CreateStatementAsyncWorker; + friend class QueryAsyncWorker; + friend class BeginTransactionAsyncWorker; + friend class EndTransactionAsyncWorker; + friend class TablesAsyncWorker; + friend class ColumnsAsyncWorker; + friend class GetInfoAsyncWorker; + friend class GetAttributeAsyncWorker; + friend class CallProcedureAsyncWorker; -class ODBCConnection : public Nan::ObjectWrap { public: - static Nan::Persistent OPTION_SQL; - static Nan::Persistent OPTION_PARAMS; - static Nan::Persistent OPTION_NORESULTS; - static Nan::Persistent constructor; - - static void Init(v8::Handle exports); - - void Free(); - - protected: - ODBCConnection() {}; - - explicit ODBCConnection(HENV hENV, HDBC hDBC): - Nan::ObjectWrap(), - m_hENV(hENV), - m_hDBC(hDBC) {}; - - ~ODBCConnection(); - -public: - //constructor - static NAN_METHOD(New); - - //Property Getter/Setters - static NAN_GETTER(ConnectedGetter); - static NAN_GETTER(ConnectTimeoutGetter); - static NAN_SETTER(ConnectTimeoutSetter); - static NAN_GETTER(LoginTimeoutGetter); - static NAN_SETTER(LoginTimeoutSetter); - - //async methods - static NAN_METHOD(BeginTransaction); -protected: - static void UV_BeginTransaction(uv_work_t* work_req); - static void UV_AfterBeginTransaction(uv_work_t* work_req, int status); - -public: - static NAN_METHOD(EndTransaction); -protected: - static void UV_EndTransaction(uv_work_t* work_req); - static void UV_AfterEndTransaction(uv_work_t* work_req, int status); - -public: - static NAN_METHOD(Open); -protected: - static void UV_Open(uv_work_t* work_req); - static void UV_AfterOpen(uv_work_t* work_req, int status); - -public: - static NAN_METHOD(Close); -protected: - static void UV_Close(uv_work_t* work_req); - static void UV_AfterClose(uv_work_t* work_req, int status); - -public: - static NAN_METHOD(CreateStatement); -protected: - static void UV_CreateStatement(uv_work_t* work_req); - static void UV_AfterCreateStatement(uv_work_t* work_req, int status); - -public: - static NAN_METHOD(Query); -protected: - static void UV_Query(uv_work_t* req); - static void UV_AfterQuery(uv_work_t* req, int status); - -public: - static NAN_METHOD(Columns); -protected: - static void UV_Columns(uv_work_t* req); - -public: - static NAN_METHOD(Tables); -protected: - static void UV_Tables(uv_work_t* req); - - //sync methods -public: - static NAN_METHOD(CloseSync); - static NAN_METHOD(CreateStatementSync); - static NAN_METHOD(OpenSync); - static NAN_METHOD(QuerySync); - static NAN_METHOD(BeginTransactionSync); - static NAN_METHOD(EndTransactionSync); - static NAN_METHOD(GetInfoSync); -protected: - - struct Fetch_Request { - Nan::Callback* callback; - ODBCConnection *objResult; - SQLRETURN result; - }; - - ODBCConnection *self(void) { return this; } - - protected: - HENV m_hENV; - HDBC m_hDBC; - SQLUSMALLINT canHaveMoreResults; - bool connected; - int statements; - SQLUINTEGER connectTimeout; - SQLUINTEGER loginTimeout; -}; -struct create_statement_work_data { - Nan::Callback* cb; - ODBCConnection *conn; - HSTMT hSTMT; - int result; -}; + static Napi::FunctionReference constructor; + static Napi::Object Init(Napi::Env env, Napi::Object exports); -struct query_work_data { - Nan::Callback* cb; - ODBCConnection *conn; - HSTMT hSTMT; - - Parameter *params; - int paramCount; - int completionType; - bool noResultObject; - - void *sql; - void *catalog; - void *schema; - void *table; - void *type; - void *column; - - int sqlLen; - int sqlSize; + ODBCConnection(const Napi::CallbackInfo& info); + ~ODBCConnection(); + + private: + + SQLRETURN Free(); + + Napi::Value Close(const Napi::CallbackInfo& info); + Napi::Value CreateStatement(const Napi::CallbackInfo& info); + Napi::Value Query(const Napi::CallbackInfo& info); + Napi::Value CallProcedure(const Napi::CallbackInfo& info); + + Napi::Value BeginTransaction(const Napi::CallbackInfo& info); + Napi::Value Commit(const Napi::CallbackInfo &info); + Napi::Value Rollback(const Napi::CallbackInfo &rollback); + + Napi::Value GetUsername(const Napi::CallbackInfo &info); + Napi::Value Columns(const Napi::CallbackInfo& info); + Napi::Value Tables(const Napi::CallbackInfo& info); + + Napi::Value GetConnAttr(const Napi::CallbackInfo& info); + Napi::Value SetConnAttr(const Napi::CallbackInfo& info); + + //Property Getter/Setterss + Napi::Value ConnectedGetter(const Napi::CallbackInfo& info); + // void ConnectedSetter(const Napi::CallbackInfo& info, const Napi::Value &value); + Napi::Value ConnectTimeoutGetter(const Napi::CallbackInfo& info); + void ConnectTimeoutSetter(const Napi::CallbackInfo& info, const Napi::Value &value); + Napi::Value LoginTimeoutGetter(const Napi::CallbackInfo& info); + void LoginTimeoutSetter(const Napi::CallbackInfo& info, const Napi::Value &value); + Napi::Value AutocommitGetter(const Napi::CallbackInfo& info); + + Napi::Value GetInfo(const Napi::Env env, const SQLUSMALLINT option); + + bool isConnected; + bool autocommit; - int result; -}; + int numStatements; -struct open_connection_work_data { - Nan::Callback* cb; - ODBCConnection *conn; - int result; - int connectionLength; - void* connection; -}; + SQLHENV hENV; + SQLHDBC hDBC; -struct close_connection_work_data { - Nan::Callback* cb; - ODBCConnection *conn; - int result; + SQLUINTEGER connectionTimeout; + SQLUINTEGER loginTimeout; }; #endif diff --git a/src/odbc_result.cpp b/src/odbc_result.cpp deleted file mode 100644 index 1c1a716..0000000 --- a/src/odbc_result.cpp +++ /dev/null @@ -1,780 +0,0 @@ -/* - Copyright (c) 2013, Dan VerWeire - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ - -#include -#include -#include -#include -#include -#include - -#include "odbc.h" -#include "odbc_connection.h" -#include "odbc_result.h" -#include "odbc_statement.h" - -using namespace v8; -using namespace node; - -Nan::Persistent ODBCResult::constructor; -Nan::Persistent ODBCResult::OPTION_FETCH_MODE; - -void ODBCResult::Init(v8::Handle exports) { - DEBUG_PRINTF("ODBCResult::Init\n"); - Nan::HandleScope scope; - - Local constructor_template = Nan::New(New); - - // Constructor Template - constructor_template->SetClassName(Nan::New("ODBCResult").ToLocalChecked()); - - // Reserve space for one Handle - Local instance_template = constructor_template->InstanceTemplate(); - instance_template->SetInternalFieldCount(1); - - // Prototype Methods - Nan::SetPrototypeMethod(constructor_template, "fetchAll", FetchAll); - Nan::SetPrototypeMethod(constructor_template, "fetch", Fetch); - - Nan::SetPrototypeMethod(constructor_template, "moreResultsSync", MoreResultsSync); - Nan::SetPrototypeMethod(constructor_template, "closeSync", CloseSync); - Nan::SetPrototypeMethod(constructor_template, "fetchSync", FetchSync); - Nan::SetPrototypeMethod(constructor_template, "fetchAllSync", FetchAllSync); - Nan::SetPrototypeMethod(constructor_template, "getColumnNamesSync", GetColumnNamesSync); - Nan::SetPrototypeMethod(constructor_template, "getRowCountSync", GetRowCountSync); - - // Properties - OPTION_FETCH_MODE.Reset(Nan::New("fetchMode").ToLocalChecked()); - Nan::SetAccessor(instance_template, Nan::New("fetchMode").ToLocalChecked(), FetchModeGetter, FetchModeSetter); - - // Attach the Database Constructor to the target object - constructor.Reset(constructor_template->GetFunction()); - exports->Set(Nan::New("ODBCResult").ToLocalChecked(), - constructor_template->GetFunction()); -} - -ODBCResult::~ODBCResult() { - DEBUG_PRINTF("ODBCResult::~ODBCResult\n"); - //DEBUG_PRINTF("ODBCResult::~ODBCResult m_hSTMT=%x\n", m_hSTMT); - this->Free(); -} - -void ODBCResult::Free() { - DEBUG_PRINTF("ODBCResult::Free\n"); - //DEBUG_PRINTF("ODBCResult::Free m_hSTMT=%X m_canFreeHandle=%X\n", m_hSTMT, m_canFreeHandle); - - if (m_hSTMT && m_canFreeHandle) { - uv_mutex_lock(&ODBC::g_odbcMutex); - - SQLFreeHandle( SQL_HANDLE_STMT, m_hSTMT); - - m_hSTMT = NULL; - - uv_mutex_unlock(&ODBC::g_odbcMutex); - } - - if (bufferLength > 0) { - bufferLength = 0; - free(buffer); - } -} - -NAN_METHOD(ODBCResult::New) { - DEBUG_PRINTF("ODBCResult::New\n"); - Nan::HandleScope scope; - - REQ_EXT_ARG(0, js_henv); - REQ_EXT_ARG(1, js_hdbc); - REQ_EXT_ARG(2, js_hstmt); - REQ_EXT_ARG(3, js_canFreeHandle); - - HENV hENV = static_cast(js_henv->Value()); - HDBC hDBC = static_cast(js_hdbc->Value()); - HSTMT hSTMT = static_cast(js_hstmt->Value()); - bool* canFreeHandle = static_cast(js_canFreeHandle->Value()); - - //create a new OBCResult object - ODBCResult* objODBCResult = new ODBCResult(hENV, hDBC, hSTMT, *canFreeHandle); - - DEBUG_PRINTF("ODBCResult::New\n"); - //DEBUG_PRINTF("ODBCResult::New m_hDBC=%X m_hDBC=%X m_hSTMT=%X canFreeHandle=%X\n", - // objODBCResult->m_hENV, - // objODBCResult->m_hDBC, - // objODBCResult->m_hSTMT, - // objODBCResult->m_canFreeHandle - //); - - //free the pointer to canFreeHandle - delete canFreeHandle; - - //specify the buffer length - objODBCResult->bufferLength = MAX_VALUE_SIZE - 1; - - //initialze a buffer for this object - objODBCResult->buffer = (uint16_t *) malloc(objODBCResult->bufferLength + 1); - //TODO: make sure the malloc succeeded - - //set the initial colCount to 0 - objODBCResult->colCount = 0; - - //default fetchMode to FETCH_OBJECT - objODBCResult->m_fetchMode = FETCH_OBJECT; - - objODBCResult->Wrap(info.Holder()); - - info.GetReturnValue().Set(info.Holder()); -} - -NAN_GETTER(ODBCResult::FetchModeGetter) { - Nan::HandleScope scope; - - ODBCResult *obj = Nan::ObjectWrap::Unwrap(info.Holder()); - - info.GetReturnValue().Set(Nan::New(obj->m_fetchMode)); -} - -NAN_SETTER(ODBCResult::FetchModeSetter) { - Nan::HandleScope scope; - - ODBCResult *obj = Nan::ObjectWrap::Unwrap(info.Holder()); - - if (value->IsNumber()) { - obj->m_fetchMode = value->Int32Value(); - } -} - -/* - * Fetch - */ - -NAN_METHOD(ODBCResult::Fetch) { - DEBUG_PRINTF("ODBCResult::Fetch\n"); - Nan::HandleScope scope; - - ODBCResult* objODBCResult = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - fetch_work_data* data = (fetch_work_data *) calloc(1, sizeof(fetch_work_data)); - - Local cb; - - //set the fetch mode to the default of this instance - data->fetchMode = objODBCResult->m_fetchMode; - - if (info.Length() == 1 && info[0]->IsFunction()) { - cb = Local::Cast(info[0]); - } - else if (info.Length() == 2 && info[0]->IsObject() && info[1]->IsFunction()) { - cb = Local::Cast(info[1]); - - Local obj = info[0]->ToObject(); - - Local fetchModeKey = Nan::New(OPTION_FETCH_MODE); - if (obj->Has(fetchModeKey) && obj->Get(fetchModeKey)->IsInt32()) { - data->fetchMode = Nan::To(obj->Get(fetchModeKey)).ToLocalChecked()->Value(); - } - } - else { - return Nan::ThrowTypeError("ODBCResult::Fetch(): 1 or 2 arguments are required. The last argument must be a callback function."); - } - - data->cb = new Nan::Callback(cb); - - data->objResult = objODBCResult; - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_Fetch, - (uv_after_work_cb)UV_AfterFetch); - - objODBCResult->Ref(); - - info.GetReturnValue().Set(Nan::Undefined()); -} - -void ODBCResult::UV_Fetch(uv_work_t* work_req) { - DEBUG_PRINTF("ODBCResult::UV_Fetch\n"); - - fetch_work_data* data = (fetch_work_data *)(work_req->data); - - data->result = SQLFetch(data->objResult->m_hSTMT); -} - -void ODBCResult::UV_AfterFetch(uv_work_t* work_req, int status) { - DEBUG_PRINTF("ODBCResult::UV_AfterFetch\n"); - Nan::HandleScope scope; - - fetch_work_data* data = (fetch_work_data *)(work_req->data); - - SQLRETURN ret = data->result; - //TODO: we should probably define this on the work data so we - //don't have to keep creating it? - Local objError; - bool moreWork = true; - bool error = false; - - if (data->objResult->colCount == 0) { - data->objResult->columns = ODBC::GetColumns( - data->objResult->m_hSTMT, - &data->objResult->colCount); - } - - //check to see if the result has no columns - if (data->objResult->colCount == 0) { - //this means - moreWork = false; - } - //check to see if there was an error - else if (ret == SQL_ERROR) { - moreWork = false; - error = true; - - objError = ODBC::GetSQLError( - SQL_HANDLE_STMT, - data->objResult->m_hSTMT, - (char *) "Error in ODBCResult::UV_AfterFetch"); - } - //check to see if we are at the end of the recordset - else if (ret == SQL_NO_DATA) { - moreWork = false; - } - - if (moreWork) { - Local info[2]; - - info[0] = Nan::Null(); - if (data->fetchMode == FETCH_ARRAY) { - info[1] = ODBC::GetRecordArray( - data->objResult->m_hSTMT, - data->objResult->columns, - &data->objResult->colCount, - data->objResult->buffer, - data->objResult->bufferLength); - } - else { - info[1] = ODBC::GetRecordTuple( - data->objResult->m_hSTMT, - data->objResult->columns, - &data->objResult->colCount, - data->objResult->buffer, - data->objResult->bufferLength); - } - - Nan::TryCatch try_catch; - - data->cb->Call(2, info); - delete data->cb; - - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); - } - } - else { - ODBC::FreeColumns(data->objResult->columns, &data->objResult->colCount); - - Local info[2]; - - //if there was an error, pass that as arg[0] otherwise Null - if (error) { - info[0] = objError; - } - else { - info[0] = Nan::Null(); - } - - info[1] = Nan::Null(); - - Nan::TryCatch try_catch; - - data->cb->Call(2, info); - delete data->cb; - - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); - } - } - - data->objResult->Unref(); - - free(data); - free(work_req); - - return; -} - -/* - * FetchSync - */ - -NAN_METHOD(ODBCResult::FetchSync) { - DEBUG_PRINTF("ODBCResult::FetchSync\n"); - Nan::HandleScope scope; - - ODBCResult* objResult = Nan::ObjectWrap::Unwrap(info.Holder()); - - Local objError; - bool moreWork = true; - bool error = false; - int fetchMode = objResult->m_fetchMode; - - if (info.Length() == 1 && info[0]->IsObject()) { - Local obj = info[0]->ToObject(); - - Local fetchModeKey = Nan::New(OPTION_FETCH_MODE); - if (obj->Has(fetchModeKey) && obj->Get(fetchModeKey)->IsInt32()) { - fetchMode = Nan::To(obj->Get(fetchModeKey)).ToLocalChecked()->Value(); - } - } - - SQLRETURN ret = SQLFetch(objResult->m_hSTMT); - - if (objResult->colCount == 0) { - objResult->columns = ODBC::GetColumns( - objResult->m_hSTMT, - &objResult->colCount); - } - - //check to see if the result has no columns - if (objResult->colCount == 0) { - moreWork = false; - } - //check to see if there was an error - else if (ret == SQL_ERROR) { - moreWork = false; - error = true; - - objError = ODBC::GetSQLError( - SQL_HANDLE_STMT, - objResult->m_hSTMT, - (char *) "Error in ODBCResult::UV_AfterFetch"); - } - //check to see if we are at the end of the recordset - else if (ret == SQL_NO_DATA) { - moreWork = false; - } - - if (moreWork) { - Local data; - - if (fetchMode == FETCH_ARRAY) { - data = ODBC::GetRecordArray( - objResult->m_hSTMT, - objResult->columns, - &objResult->colCount, - objResult->buffer, - objResult->bufferLength); - } - else { - data = ODBC::GetRecordTuple( - objResult->m_hSTMT, - objResult->columns, - &objResult->colCount, - objResult->buffer, - objResult->bufferLength); - } - - info.GetReturnValue().Set(data); - } - else { - ODBC::FreeColumns(objResult->columns, &objResult->colCount); - - //if there was an error, pass that as arg[0] otherwise Null - if (error) { - Nan::ThrowError(objError); - - info.GetReturnValue().Set(Nan::Null()); - } - else { - info.GetReturnValue().Set(Nan::Null()); - } - } -} - -/* - * FetchAll - */ - -NAN_METHOD(ODBCResult::FetchAll) { - DEBUG_PRINTF("ODBCResult::FetchAll\n"); - Nan::HandleScope scope; - - ODBCResult* objODBCResult = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - fetch_work_data* data = (fetch_work_data *) calloc(1, sizeof(fetch_work_data)); - - Local cb; - - data->fetchMode = objODBCResult->m_fetchMode; - - if (info.Length() == 1 && info[0]->IsFunction()) { - cb = Local::Cast(info[0]); - } - else if (info.Length() == 2 && info[0]->IsObject() && info[1]->IsFunction()) { - cb = Local::Cast(info[1]); - - Local obj = info[0]->ToObject(); - - Local fetchModeKey = Nan::New(OPTION_FETCH_MODE); - if (obj->Has(fetchModeKey) && obj->Get(fetchModeKey)->IsInt32()) { - data->fetchMode = Nan::To(obj->Get(fetchModeKey)).ToLocalChecked()->Value(); - } - } - else { - Nan::ThrowTypeError("ODBCResult::FetchAll(): 1 or 2 arguments are required. The last argument must be a callback function."); - } - - data->rows.Reset(Nan::New()); - data->errorCount = 0; - data->count = 0; - data->objError.Reset(Nan::New()); - - data->cb = new Nan::Callback(cb); - data->objResult = objODBCResult; - - work_req->data = data; - - uv_queue_work(uv_default_loop(), - work_req, - UV_FetchAll, - (uv_after_work_cb)UV_AfterFetchAll); - - data->objResult->Ref(); - - info.GetReturnValue().Set(Nan::Undefined()); -} - -void ODBCResult::UV_FetchAll(uv_work_t* work_req) { - DEBUG_PRINTF("ODBCResult::UV_FetchAll\n"); - - fetch_work_data* data = (fetch_work_data *)(work_req->data); - - data->result = SQLFetch(data->objResult->m_hSTMT); - } - -void ODBCResult::UV_AfterFetchAll(uv_work_t* work_req, int status) { - DEBUG_PRINTF("ODBCResult::UV_AfterFetchAll\n"); - Nan::HandleScope scope; - - fetch_work_data* data = (fetch_work_data *)(work_req->data); - - ODBCResult* self = data->objResult->self(); - - bool doMoreWork = true; - - if (self->colCount == 0) { - self->columns = ODBC::GetColumns(self->m_hSTMT, &self->colCount); - } - - //check to see if the result set has columns - if (self->colCount == 0) { - //this most likely means that the query was something like - //'insert into ....' - doMoreWork = false; - } - //check to see if there was an error - else if (data->result == SQL_ERROR) { - data->errorCount++; - - //NanAssignPersistent(data->objError, ODBC::GetSQLError( - data->objError.Reset(ODBC::GetSQLError( - SQL_HANDLE_STMT, - self->m_hSTMT, - (char *) "[node-odbc] Error in ODBCResult::UV_AfterFetchAll" - )); - - doMoreWork = false; - } - //check to see if we are at the end of the recordset - else if (data->result == SQL_NO_DATA) { - doMoreWork = false; - } - else { - Local rows = Nan::New(data->rows); - if (data->fetchMode == FETCH_ARRAY) { - rows->Set( - Nan::New(data->count), - ODBC::GetRecordArray( - self->m_hSTMT, - self->columns, - &self->colCount, - self->buffer, - self->bufferLength) - ); - } - else { - rows->Set( - Nan::New(data->count), - ODBC::GetRecordTuple( - self->m_hSTMT, - self->columns, - &self->colCount, - self->buffer, - self->bufferLength) - ); - } - data->count++; - } - - if (doMoreWork) { - //Go back to the thread pool and fetch more data! - uv_queue_work( - uv_default_loop(), - work_req, - UV_FetchAll, - (uv_after_work_cb)UV_AfterFetchAll); - } - else { - ODBC::FreeColumns(self->columns, &self->colCount); - - Local info[2]; - - if (data->errorCount > 0) { - info[0] = Nan::New(data->objError); - } - else { - info[0] = Nan::Null(); - } - - info[1] = Nan::New(data->rows); - - Nan::TryCatch try_catch; - - data->cb->Call(2, info); - delete data->cb; - data->rows.Reset(); - data->objError.Reset(); - - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); - } - - free(data); - free(work_req); - - self->Unref(); - } -} - -/* - * FetchAllSync - */ - -NAN_METHOD(ODBCResult::FetchAllSync) { - DEBUG_PRINTF("ODBCResult::FetchAllSync\n"); - Nan::HandleScope scope; - - ODBCResult* self = Nan::ObjectWrap::Unwrap(info.Holder()); - - Local objError = Nan::New(); - - SQLRETURN ret; - int count = 0; - int errorCount = 0; - int fetchMode = self->m_fetchMode; - - if (info.Length() == 1 && info[0]->IsObject()) { - Local obj = info[0]->ToObject(); - - Local fetchModeKey = Nan::New(OPTION_FETCH_MODE); - if (obj->Has(fetchModeKey) && obj->Get(fetchModeKey)->IsInt32()) { - fetchMode = Nan::To(obj->Get(fetchModeKey)).ToLocalChecked()->Value(); - } - } - - if (self->colCount == 0) { - self->columns = ODBC::GetColumns(self->m_hSTMT, &self->colCount); - } - - Local rows = Nan::New(); - - //Only loop through the recordset if there are columns - if (self->colCount > 0) { - //loop through all records - while (true) { - ret = SQLFetch(self->m_hSTMT); - - //check to see if there was an error - if (ret == SQL_ERROR) { - errorCount++; - - objError = ODBC::GetSQLError( - SQL_HANDLE_STMT, - self->m_hSTMT, - (char *) "[node-odbc] Error in ODBCResult::UV_AfterFetchAll; probably" - " your query did not have a result set." - ); - - break; - } - - //check to see if we are at the end of the recordset - if (ret == SQL_NO_DATA) { - ODBC::FreeColumns(self->columns, &self->colCount); - - break; - } - - if (fetchMode == FETCH_ARRAY) { - rows->Set( - Nan::New(count), - ODBC::GetRecordArray( - self->m_hSTMT, - self->columns, - &self->colCount, - self->buffer, - self->bufferLength) - ); - } - else { - rows->Set( - Nan::New(count), - ODBC::GetRecordTuple( - self->m_hSTMT, - self->columns, - &self->colCount, - self->buffer, - self->bufferLength) - ); - } - count++; - } - } - else { - ODBC::FreeColumns(self->columns, &self->colCount); - } - - //throw the error object if there were errors - if (errorCount > 0) { - Nan::ThrowError(objError); - } - - info.GetReturnValue().Set(rows); -} - -/* - * CloseSync - * - */ - -NAN_METHOD(ODBCResult::CloseSync) { - DEBUG_PRINTF("ODBCResult::CloseSync\n"); - Nan::HandleScope scope; - - OPT_INT_ARG(0, closeOption, SQL_DESTROY); - - ODBCResult* result = Nan::ObjectWrap::Unwrap(info.Holder()); - - DEBUG_PRINTF("ODBCResult::CloseSync closeOption=%i m_canFreeHandle=%i\n", - closeOption, result->m_canFreeHandle); - - if (closeOption == SQL_DESTROY && result->m_canFreeHandle) { - result->Free(); - } - else if (closeOption == SQL_DESTROY && !result->m_canFreeHandle) { - //We technically can't free the handle so, we'll SQL_CLOSE - uv_mutex_lock(&ODBC::g_odbcMutex); - - SQLFreeStmt(result->m_hSTMT, SQL_CLOSE); - - uv_mutex_unlock(&ODBC::g_odbcMutex); - } - else { - uv_mutex_lock(&ODBC::g_odbcMutex); - - SQLFreeStmt(result->m_hSTMT, closeOption); - - uv_mutex_unlock(&ODBC::g_odbcMutex); - } - - info.GetReturnValue().Set(Nan::True()); -} - -NAN_METHOD(ODBCResult::MoreResultsSync) { - DEBUG_PRINTF("ODBCResult::MoreResultsSync\n"); - Nan::HandleScope scope; - - ODBCResult* result = Nan::ObjectWrap::Unwrap(info.Holder()); - - SQLRETURN ret = SQLMoreResults(result->m_hSTMT); - - if (ret == SQL_ERROR) { - Local objError = ODBC::GetSQLError( - SQL_HANDLE_STMT, - result->m_hSTMT, - (char *)"[node-odbc] Error in ODBCResult::MoreResultsSync" - ); - - Nan::ThrowError(objError); - } - - info.GetReturnValue().Set(SQL_SUCCEEDED(ret) || ret == SQL_ERROR ? Nan::True() : Nan::False()); -} - -/* - * GetColumnNamesSync - */ - -NAN_METHOD(ODBCResult::GetColumnNamesSync) { - DEBUG_PRINTF("ODBCResult::GetColumnNamesSync\n"); - Nan::HandleScope scope; - - ODBCResult* self = Nan::ObjectWrap::Unwrap(info.Holder()); - - Local cols = Nan::New(); - - if (self->colCount == 0) { - self->columns = ODBC::GetColumns(self->m_hSTMT, &self->colCount); - } - - for (int i = 0; i < self->colCount; i++) { -#ifdef UNICODE - cols->Set(Nan::New(i), - Nan::New((uint16_t*) self->columns[i].name).ToLocalChecked()); -#else - cols->Set(Nan::New(i), - Nan::New((char *) self->columns[i].name).ToLocalChecked()); -#endif - - } - - info.GetReturnValue().Set(cols); -} - -/* - * GetRowCountSync - */ - -NAN_METHOD(ODBCResult::GetRowCountSync) { - DEBUG_PRINTF("ODBCResult::GetRowCountSync\n"); - Nan::HandleScope scope; - - ODBCResult* self = Nan::ObjectWrap::Unwrap(info.Holder()); - - SQLLEN rowCount = 0; - - SQLRETURN ret = SQLRowCount(self->m_hSTMT, &rowCount); - - if (!SQL_SUCCEEDED(ret)) { - rowCount = 0; - } - - info.GetReturnValue().Set(Nan::New(rowCount)); -} diff --git a/src/odbc_result.h b/src/odbc_result.h deleted file mode 100644 index f100614..0000000 --- a/src/odbc_result.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - Copyright (c) 2013, Dan VerWeire - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ - -#ifndef _SRC_ODBC_RESULT_H -#define _SRC_ODBC_RESULT_H - -#include - -class ODBCResult : public Nan::ObjectWrap { - public: - static Nan::Persistent OPTION_FETCH_MODE; - static Nan::Persistent constructor; - static void Init(v8::Handle exports); - - void Free(); - - protected: - ODBCResult() {}; - - explicit ODBCResult(HENV hENV, HDBC hDBC, HSTMT hSTMT, bool canFreeHandle): - Nan::ObjectWrap(), - m_hENV(hENV), - m_hDBC(hDBC), - m_hSTMT(hSTMT), - m_canFreeHandle(canFreeHandle) {}; - - ~ODBCResult(); - - //constructor -public: - static NAN_METHOD(New); - - //async methods - static NAN_METHOD(Fetch); -protected: - static void UV_Fetch(uv_work_t* work_req); - static void UV_AfterFetch(uv_work_t* work_req, int status); - -public: - static NAN_METHOD(FetchAll); -protected: - static void UV_FetchAll(uv_work_t* work_req); - static void UV_AfterFetchAll(uv_work_t* work_req, int status); - - //sync methods -public: - static NAN_METHOD(CloseSync); - static NAN_METHOD(MoreResultsSync); - static NAN_METHOD(FetchSync); - static NAN_METHOD(FetchAllSync); - static NAN_METHOD(GetColumnNamesSync); - static NAN_METHOD(GetRowCountSync); - - //property getter/setters - static NAN_GETTER(FetchModeGetter); - static NAN_SETTER(FetchModeSetter); - -protected: - struct fetch_work_data { - Nan::Callback* cb; - ODBCResult *objResult; - SQLRETURN result; - - int fetchMode; - int count; - int errorCount; - Nan::Persistent rows; - Nan::Persistent objError; - }; - - ODBCResult *self(void) { return this; } - - protected: - HENV m_hENV; - HDBC m_hDBC; - HSTMT m_hSTMT; - bool m_canFreeHandle; - int m_fetchMode; - - uint16_t *buffer; - int bufferLength; - Column *columns; - short colCount; -}; - - - -#endif diff --git a/src/odbc_statement.cpp b/src/odbc_statement.cpp old mode 100644 new mode 100755 index 9f92ecc..010ae7f --- a/src/odbc_statement.cpp +++ b/src/odbc_statement.cpp @@ -14,1009 +14,375 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include -#include -#include -#include +#include +// #include #include -#include #include "odbc.h" #include "odbc_connection.h" -#include "odbc_result.h" #include "odbc_statement.h" -using namespace v8; -using namespace node; +Napi::FunctionReference ODBCStatement::constructor; -Nan::Persistent ODBCStatement::constructor; +HENV hENV; +HDBC hDBC; -void ODBCStatement::Init(v8::Handle exports) { - DEBUG_PRINTF("ODBCStatement::Init\n"); - Nan::HandleScope scope; +Napi::Object ODBCStatement::Init(Napi::Env env, Napi::Object exports) { - Local t = Nan::New(New); + DEBUG_PRINTF("ODBCStatement::Init\n"); - // Constructor Template - - t->SetClassName(Nan::New("ODBCStatement").ToLocalChecked()); + Napi::HandleScope scope(env); - // Reserve space for one Handle - Local instance_template = t->InstanceTemplate(); - instance_template->SetInternalFieldCount(1); - - // Prototype Methods - Nan::SetPrototypeMethod(t, "execute", Execute); - Nan::SetPrototypeMethod(t, "executeSync", ExecuteSync); - - Nan::SetPrototypeMethod(t, "executeDirect", ExecuteDirect); - Nan::SetPrototypeMethod(t, "executeDirectSync", ExecuteDirectSync); - - Nan::SetPrototypeMethod(t, "executeNonQuery", ExecuteNonQuery); - Nan::SetPrototypeMethod(t, "executeNonQuerySync", ExecuteNonQuerySync); - - Nan::SetPrototypeMethod(t, "prepare", Prepare); - Nan::SetPrototypeMethod(t, "prepareSync", PrepareSync); - - Nan::SetPrototypeMethod(t, "bind", Bind); - Nan::SetPrototypeMethod(t, "bindSync", BindSync); - - Nan::SetPrototypeMethod(t, "closeSync", CloseSync); + Napi::Function constructorFunction = DefineClass(env, "ODBCStatement", { + InstanceMethod("prepare", &ODBCStatement::Prepare), + InstanceMethod("bind", &ODBCStatement::Bind), + InstanceMethod("execute", &ODBCStatement::Execute), + InstanceMethod("close", &ODBCStatement::Close), + }); // Attach the Database Constructor to the target object - constructor.Reset(t->GetFunction()); - exports->Set(Nan::New("ODBCStatement").ToLocalChecked(), t->GetFunction()); + constructor = Napi::Persistent(constructorFunction); + constructor.SuppressDestruct(); + + return exports; +} + + +ODBCStatement::ODBCStatement(const Napi::CallbackInfo& info) : Napi::ObjectWrap(info) { + + this->data = new QueryData(); + this->hENV = *(info[0].As>().Data()); + this->hDBC = *(info[1].As>().Data()); + this->data->hSTMT = *(info[2].As>().Data()); } ODBCStatement::~ODBCStatement() { this->Free(); + delete data; + data = NULL; } -void ODBCStatement::Free() { +SQLRETURN ODBCStatement::Free() { DEBUG_PRINTF("ODBCStatement::Free\n"); - //if we previously had parameters, then be sure to free them - if (paramCount) { - int count = paramCount; - paramCount = 0; - - Parameter prm; - - //free parameter memory - for (int i = 0; i < count; i++) { - if (prm = params[i], prm.ParameterValuePtr != NULL) { - switch (prm.ValueType) { - case SQL_C_WCHAR: free(prm.ParameterValuePtr); break; - case SQL_C_CHAR: free(prm.ParameterValuePtr); break; - case SQL_C_SBIGINT: delete (int64_t *)prm.ParameterValuePtr; break; - case SQL_C_DOUBLE: delete (double *)prm.ParameterValuePtr; break; - case SQL_C_BIT: delete (bool *)prm.ParameterValuePtr; break; - } - } - } - free(params); - } - - if (m_hSTMT) { + if (this->data && this->data->hSTMT) { uv_mutex_lock(&ODBC::g_odbcMutex); - - SQLFreeHandle(SQL_HANDLE_STMT, m_hSTMT); - m_hSTMT = NULL; - + this->data->sqlReturnCode = SQLFreeHandle(SQL_HANDLE_STMT, this->data->hSTMT); + this->data->hSTMT = SQL_NULL_HANDLE; + data->clear(); uv_mutex_unlock(&ODBC::g_odbcMutex); - - if (bufferLength > 0) { - free(buffer); - } } -} - -NAN_METHOD(ODBCStatement::New) { - DEBUG_PRINTF("ODBCStatement::New\n"); - Nan::HandleScope scope; - - REQ_EXT_ARG(0, js_henv); - REQ_EXT_ARG(1, js_hdbc); - REQ_EXT_ARG(2, js_hstmt); - - HENV hENV = static_cast(js_henv->Value()); - HDBC hDBC = static_cast(js_hdbc->Value()); - HSTMT hSTMT = static_cast(js_hstmt->Value()); - - //create a new OBCResult object - ODBCStatement* stmt = new ODBCStatement(hENV, hDBC, hSTMT); - - //specify the buffer length - stmt->bufferLength = MAX_VALUE_SIZE - 1; - - //initialze a buffer for this object - stmt->buffer = (uint16_t *) malloc(stmt->bufferLength + 1); - //TODO: make sure the malloc succeeded - - //set the initial colCount to 0 - stmt->colCount = 0; - - //initialize the paramCount - stmt->paramCount = 0; - - stmt->Wrap(info.Holder()); - - info.GetReturnValue().Set(info.Holder()); -} - -/* - * Execute - */ - -NAN_METHOD(ODBCStatement::Execute) { - DEBUG_PRINTF("ODBCStatement::Execute\n"); - - Nan::HandleScope scope; - - REQ_FUN_ARG(0, cb); - ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - execute_work_data* data = - (execute_work_data *) calloc(1, sizeof(execute_work_data)); - - data->cb = new Nan::Callback(cb); - - data->stmt = stmt; - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_Execute, - (uv_after_work_cb)UV_AfterExecute); - - stmt->Ref(); - - info.GetReturnValue().Set(Nan::Undefined()); + // TODO: Actually fix this + return SQL_SUCCESS; } -void ODBCStatement::UV_Execute(uv_work_t* req) { - DEBUG_PRINTF("ODBCStatement::UV_Execute\n"); - - execute_work_data* data = (execute_work_data *)(req->data); - - SQLRETURN ret; - - ret = SQLExecute(data->stmt->m_hSTMT); - - data->result = ret; -} +/****************************************************************************** + ********************************* PREPARE ************************************ + *****************************************************************************/ + +// PrepareAsyncWorker, used by Prepare function (see below) +class PrepareAsyncWorker : public Napi::AsyncWorker { + + private: + ODBCStatement *odbcStatementObject; + QueryData *data; + + public: + PrepareAsyncWorker(ODBCStatement *odbcStatementObject, Napi::Function& callback) : Napi::AsyncWorker(callback), + odbcStatementObject(odbcStatementObject), + data(odbcStatementObject->data) {} + + ~PrepareAsyncWorker() {} + + void Execute() { + + DEBUG_PRINTF("ODBCStatement::PrepareAsyncWorker in Execute()\n"); + + DEBUG_PRINTF("ODBCStatement::PrepareAsyncWorker hDBC=%X hDBC=%X hSTMT=%X\n", + odbcStatementObject->hENV, + odbcStatementObject->hDBC, + data->hSTMT + ); + + data->sqlReturnCode = SQLPrepare( + data->hSTMT, // StatementHandle + data->sql, // StatementText + SQL_NTS // TextLength + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "PrepareAsyncWorker::Execute", "SQLPrepare"); + + // front-load the work of SQLNumParams and SQLDescribeParam here, so we + // can convert NAPI/JavaScript values to C values immediately in Bind + data->sqlReturnCode = SQLNumParams( + data->hSTMT, // StatementHandle + &data->parameterCount // ParameterCountPtr + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "PrepareAsyncWorker::Execute", "SQLNumParams"); + + data->parameters = new Parameter*[data->parameterCount]; + for (SQLSMALLINT i = 0; i < data->parameterCount; i++) { + data->parameters[i] = new Parameter(); + } -void ODBCStatement::UV_AfterExecute(uv_work_t* req, int status) { - DEBUG_PRINTF("ODBCStatement::UV_AfterExecute\n"); - - execute_work_data* data = (execute_work_data *)(req->data); - - Nan::HandleScope scope; - - //an easy reference to the statment object - ODBCStatement* self = data->stmt->self(); - - //First thing, let's check if the execution of the query returned any errors - if(data->result == SQL_ERROR) { - ODBC::CallbackSQLError( - SQL_HANDLE_STMT, - self->m_hSTMT, - data->cb); - } - else { - Local info[4]; - bool* canFreeHandle = new bool(false); - - info[0] = Nan::New(self->m_hENV); - info[1] = Nan::New(self->m_hDBC); - info[2] = Nan::New(self->m_hSTMT); - info[3] = Nan::New(canFreeHandle); - - Local js_result = Nan::NewInstance(Nan::New(ODBCResult::constructor), 4, info).ToLocalChecked(); + data->sqlReturnCode = ODBC::DescribeParameters(data->hSTMT, data->parameters, data->parameterCount); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "PrepareAsyncWorker::Execute", "---"); + } - info[0] = Nan::Null(); - info[1] = js_result; + void OnOK() { - Nan::TryCatch try_catch; + DEBUG_PRINTF("ODBCStatement::PrepareAsyncWorker in OnOk()\n"); + DEBUG_PRINTF("ODBCStatement::PrepareAsyncWorker hDBC=%X hDBC=%X hSTMT=%X\n", + odbcStatementObject->hENV, + odbcStatementObject->hDBC, + data->hSTMT + ); - data->cb->Call(2, info); + Napi::Env env = Env(); + Napi::HandleScope scope(env); - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); + std::vector callbackArguments; + callbackArguments.push_back(env.Null()); + Callback().Call(callbackArguments); } - } - - self->Unref(); - delete data->cb; - - free(data); - free(req); -} +}; /* - * ExecuteSync + * ODBCStatement:Prepare (Async) + * Description: Prepares an SQL string so that it can be bound with + * parameters and then executed. + * + * Parameters: + * const Napi::CallbackInfo& info: + * The information passed by Napi from the JavaScript call, including + * arguments from the JavaScript function. In JavaScript, the + * prepare() function takes two arguments. + * + * info[0]: String: the SQL string to prepare. + * info[1]: Function: callback function: + * function(error, result) + * error: An error object if there was a problem getting results, + * or null if operation was successful. + * result: The number of rows affected by the executed query. * + * Return: + * Napi::Value: + * Undefined (results returned in callback). */ +Napi::Value ODBCStatement::Prepare(const Napi::CallbackInfo& info) { -NAN_METHOD(ODBCStatement::ExecuteSync) { - DEBUG_PRINTF("ODBCStatement::ExecuteSync\n"); - - Nan::HandleScope scope; + DEBUG_PRINTF("ODBCStatement::Prepare\n"); - ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - SQLRETURN ret = SQLExecute(stmt->m_hSTMT); - - if(ret == SQL_ERROR) { - Local objError = ODBC::GetSQLError( - SQL_HANDLE_STMT, - stmt->m_hSTMT, - (char *) "[node-odbc] Error in ODBCStatement::ExecuteSync" - ); - - Nan::ThrowError(objError); - - info.GetReturnValue().Set(Nan::Null()); + if(!info[0].IsString() || !info[1].IsFunction()){ + Napi::TypeError::New(env, "Argument 0 must be a string , Argument 1 must be a function.").ThrowAsJavaScriptException(); + return env.Null(); } - else { - Local result[4]; - bool* canFreeHandle = new bool(false); - - result[0] = Nan::New(stmt->m_hENV); - result[1] = Nan::New(stmt->m_hDBC); - result[2] = Nan::New(stmt->m_hSTMT); - result[3] = Nan::New(canFreeHandle); - - Local js_result = Nan::NewInstance(Nan::New(ODBCResult::constructor), 4, result).ToLocalChecked(); - info.GetReturnValue().Set(js_result); - } -} + Napi::String sql = info[0].ToString(); + Napi::Function callback = info[1].As(); -/* - * ExecuteNonQuery - */ - -NAN_METHOD(ODBCStatement::ExecuteNonQuery) { - DEBUG_PRINTF("ODBCStatement::ExecuteNonQuery\n"); - - Nan::HandleScope scope; - - REQ_FUN_ARG(0, cb); + data->sql = ODBC::NapiStringToSQLTCHAR(sql); - ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - execute_work_data* data = - (execute_work_data *) calloc(1, sizeof(execute_work_data)); - - data->cb = new Nan::Callback(cb); - - data->stmt = stmt; - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_ExecuteNonQuery, - (uv_after_work_cb)UV_AfterExecuteNonQuery); + PrepareAsyncWorker *worker = new PrepareAsyncWorker(this, callback); + worker->Queue(); - stmt->Ref(); - - info.GetReturnValue().Set(Nan::Undefined()); + return env.Undefined(); } -void ODBCStatement::UV_ExecuteNonQuery(uv_work_t* req) { - DEBUG_PRINTF("ODBCStatement::ExecuteNonQuery\n"); - - execute_work_data* data = (execute_work_data *)(req->data); +/****************************************************************************** + *********************************** BIND ************************************* + *****************************************************************************/ - SQLRETURN ret; - - ret = SQLExecute(data->stmt->m_hSTMT); +// BindAsyncWorker, used by Bind function (see below) +class BindAsyncWorker : public Napi::AsyncWorker { - data->result = ret; -} + private: -void ODBCStatement::UV_AfterExecuteNonQuery(uv_work_t* req, int status) { - DEBUG_PRINTF("ODBCStatement::ExecuteNonQuery\n"); - - execute_work_data* data = (execute_work_data *)(req->data); - - Nan::HandleScope scope; - - //an easy reference to the statment object - ODBCStatement* self = data->stmt->self(); - - //First thing, let's check if the execution of the query returned any errors - if(data->result == SQL_ERROR) { - ODBC::CallbackSQLError( - SQL_HANDLE_STMT, - self->m_hSTMT, - data->cb); - } - else { - SQLLEN rowCount = 0; - - SQLRETURN ret = SQLRowCount(self->m_hSTMT, &rowCount); - - if (!SQL_SUCCEEDED(ret)) { - rowCount = 0; - } - - uv_mutex_lock(&ODBC::g_odbcMutex); - SQLFreeStmt(self->m_hSTMT, SQL_CLOSE); - uv_mutex_unlock(&ODBC::g_odbcMutex); - - Local info[2]; + ODBCStatement *statementObject; + QueryData *data; - info[0] = Nan::Null(); - // We get a potential loss of precision here. Number isn't as big as int64. Probably fine though. - info[1] = Nan::New(rowCount); + ~BindAsyncWorker() { } - Nan::TryCatch try_catch; - - data->cb->Call(2, info); - - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); + void Execute() { + data->sqlReturnCode = ODBC::BindParameters(data->hSTMT, data->parameters, data->parameterCount); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "BindAsyncWorker::Execute", "---"); } - } - self->Unref(); - delete data->cb; - - free(data); - free(req); -} + void OnOK() { -/* - * ExecuteNonQuerySync - * - */ - -NAN_METHOD(ODBCStatement::ExecuteNonQuerySync) { - DEBUG_PRINTF("ODBCStatement::ExecuteNonQuerySync\n"); - - Nan::HandleScope scope; + DEBUG_PRINTF("\nStatement::BindAsyncWorker::OnOk"); - ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + Napi::Env env = Env(); + Napi::HandleScope scope(env); - SQLRETURN ret = SQLExecute(stmt->m_hSTMT); - - if(ret == SQL_ERROR) { - Local objError = ODBC::GetSQLError( - SQL_HANDLE_STMT, - stmt->m_hSTMT, - (char *) "[node-odbc] Error in ODBCStatement::ExecuteSync" - ); - - Nan::ThrowError(objError); - - info.GetReturnValue().Set(Nan::Null()); - } - else { - SQLLEN rowCount = 0; - - SQLRETURN ret = SQLRowCount(stmt->m_hSTMT, &rowCount); - - if (!SQL_SUCCEEDED(ret)) { - rowCount = 0; + std::vector callbackArguments; + callbackArguments.push_back(env.Null()); + Callback().Call(callbackArguments); } - - uv_mutex_lock(&ODBC::g_odbcMutex); - SQLFreeStmt(stmt->m_hSTMT, SQL_CLOSE); - uv_mutex_unlock(&ODBC::g_odbcMutex); - - info.GetReturnValue().Set(Nan::New(rowCount)); - } -} - -/* - * ExecuteDirect - * - */ - -NAN_METHOD(ODBCStatement::ExecuteDirect) { - DEBUG_PRINTF("ODBCStatement::ExecuteDirect\n"); - - Nan::HandleScope scope; - - REQ_STRO_ARG(0, sql); - REQ_FUN_ARG(1, cb); - - ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - execute_direct_work_data* data = - (execute_direct_work_data *) calloc(1, sizeof(execute_direct_work_data)); - - data->cb = new Nan::Callback(cb); - -#ifdef UNICODE - data->sqlLen = sql->Length(); - data->sql = (uint16_t *) malloc((data->sqlLen * sizeof(uint16_t)) + sizeof(uint16_t)); - sql->Write((uint16_t *) data->sql); -#else - data->sqlLen = sql->Utf8Length(); - data->sql = (char *) malloc(data->sqlLen +1); - sql->WriteUtf8((char *) data->sql); -#endif - - data->stmt = stmt; - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_ExecuteDirect, - (uv_after_work_cb)UV_AfterExecuteDirect); - stmt->Ref(); + public: - info.GetReturnValue().Set(Nan::Undefined()); -} + BindAsyncWorker(ODBCStatement *statementObject, Napi::Function& callback) : Napi::AsyncWorker(callback), + statementObject(statementObject), + data(statementObject->data) {} +}; -void ODBCStatement::UV_ExecuteDirect(uv_work_t* req) { - DEBUG_PRINTF("ODBCStatement::UV_ExecuteDirect\n"); - - execute_direct_work_data* data = (execute_direct_work_data *)(req->data); +Napi::Value ODBCStatement::Bind(const Napi::CallbackInfo& info) { - SQLRETURN ret; + DEBUG_PRINTF("ODBCStatement::Bind\n"); - ret = SQLExecDirect( - data->stmt->m_hSTMT, - (SQLTCHAR *) data->sql, - data->sqlLen); - - data->result = ret; -} + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); -void ODBCStatement::UV_AfterExecuteDirect(uv_work_t* req, int status) { - DEBUG_PRINTF("ODBCStatement::UV_AfterExecuteDirect\n"); - - execute_direct_work_data* data = (execute_direct_work_data *)(req->data); - - Nan::HandleScope scope; - - //an easy reference to the statment object - ODBCStatement* self = data->stmt->self(); - - //First thing, let's check if the execution of the query returned any errors - if(data->result == SQL_ERROR) { - ODBC::CallbackSQLError( - SQL_HANDLE_STMT, - self->m_hSTMT, - data->cb); + if ( !info[0].IsArray() || !info[1].IsFunction() ) { + Napi::TypeError::New(env, "Function signature is: bind(array, function)").ThrowAsJavaScriptException(); + return env.Null(); } - else { - Local info[4]; - bool* canFreeHandle = new bool(false); - - info[0] = Nan::New(self->m_hENV); - info[1] = Nan::New(self->m_hDBC); - info[2] = Nan::New(self->m_hSTMT); - info[3] = Nan::New(canFreeHandle); - - Local js_result = Nan::NewInstance(Nan::New(ODBCResult::constructor), 4, info).ToLocalChecked(); - info[0] = Nan::Null(); - info[1] = js_result; + Napi::Array bindArray = info[0].As(); + Napi::Function callback = info[1].As(); - Nan::TryCatch try_catch; + // if the parameter count isnt right, end right away + if (data->parameterCount != (SQLSMALLINT)bindArray.Length() || data->parameters == NULL) { + std::vector callbackArguments; - data->cb->Call(2, info); + Napi::Error error = Napi::Error::New(env, Napi::String::New(env, "[node-odbc] Error in Statement::BindAsyncWorker::Bind: The number of parameters in the prepared statement doesn't match the number of parameters passed to bind.")); + callbackArguments.push_back(error.Value()); - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); - } + callback.Call(callbackArguments); + return env.Undefined(); } - self->Unref(); - delete data->cb; - - free(data->sql); - free(data); - free(req); -} - -/* - * ExecuteDirectSync - * - */ - -NAN_METHOD(ODBCStatement::ExecuteDirectSync) { - DEBUG_PRINTF("ODBCStatement::ExecuteDirectSync\n"); - - Nan::HandleScope scope; - -#ifdef UNICODE - REQ_WSTR_ARG(0, sql); -#else - REQ_STR_ARG(0, sql); -#endif + // converts NAPI/JavaScript values to values used by SQLBindParameter + ODBC::StoreBindValues(&bindArray, this->data->parameters); - ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); - - SQLRETURN ret = SQLExecDirect( - stmt->m_hSTMT, - (SQLTCHAR *) *sql, - sql.length()); - - if(ret == SQL_ERROR) { - Local objError = ODBC::GetSQLError( - SQL_HANDLE_STMT, - stmt->m_hSTMT, - (char *) "[node-odbc] Error in ODBCStatement::ExecuteDirectSync" - ); - - Nan::ThrowError(objError); - - info.GetReturnValue().Set(Nan::Null()); - } - else { - Local result[4]; - bool* canFreeHandle = new bool(false); - - result[0] = Nan::New(stmt->m_hENV); - result[1] = Nan::New(stmt->m_hDBC); - result[2] = Nan::New(stmt->m_hSTMT); - result[3] = Nan::New(canFreeHandle); - - Local js_result = Nan::NewInstance(Nan::New(ODBCResult::constructor), 4, result).ToLocalChecked(); + BindAsyncWorker *worker = new BindAsyncWorker(this, callback); + worker->Queue(); - info.GetReturnValue().Set(js_result); - } + return env.Undefined(); } -/* - * PrepareSync - * - */ - -NAN_METHOD(ODBCStatement::PrepareSync) { - DEBUG_PRINTF("ODBCStatement::PrepareSync\n"); - - Nan::HandleScope scope; +/****************************************************************************** + ********************************* EXECUTE ************************************ + *****************************************************************************/ - REQ_STRO_ARG(0, sql); +// ExecuteAsyncWorker, used by Execute function (see below) +class ExecuteAsyncWorker : public Napi::AsyncWorker { - ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + private: + ODBCStatement *odbcStatementObject; + QueryData *data; - SQLRETURN ret; + void Execute() { -#ifdef UNICODE - int sqlLen = sql->Length() + 1; - uint16_t* sql2 = (uint16_t *) malloc(sqlLen * sizeof(uint16_t)); - sql->Write(sql2); -#else - int sqlLen = sql->Utf8Length() + 1; - char* sql2 = (char *) malloc(sqlLen); - sql->WriteUtf8(sql2); -#endif - - ret = SQLPrepare( - stmt->m_hSTMT, - (SQLTCHAR *) sql2, - sqlLen); - - if (SQL_SUCCEEDED(ret)) { - info.GetReturnValue().Set(Nan::True()); - } - else { - Local objError = ODBC::GetSQLError( - SQL_HANDLE_STMT, - stmt->m_hSTMT, - (char *) "[node-odbc] Error in ODBCStatement::PrepareSync" - ); + DEBUG_PRINTF("ODBCStatement::ExecuteAsyncWorker::Execute\n"); - Nan::ThrowError(objError); + data->sqlReturnCode = SQLExecute( + data->hSTMT // StatementHandle + ); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "ExecuteAsyncWorker::Execute", "SQLExecute"); - info.GetReturnValue().Set(Nan::False()); - } -} + data->sqlReturnCode = ODBC::RetrieveResultSet(data); + ASYNC_WORKER_CHECK_CODE_SET_ERROR_RETURN(data->sqlReturnCode, SQL_HANDLE_STMT, data->hSTMT, "ExecuteAsyncWorker::Execute", "---"); + } -/* - * Prepare - * - */ + void OnOK() { -NAN_METHOD(ODBCStatement::Prepare) { - DEBUG_PRINTF("ODBCStatement::Prepare\n"); - - Nan::HandleScope scope; + DEBUG_PRINTF("ODBCStatement::ExecuteAsyncWorker::OnOk()\n"); - REQ_STRO_ARG(0, sql); - REQ_FUN_ARG(1, cb); + Napi::Env env = Env(); + Napi::HandleScope scope(env); - ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - prepare_work_data* data = - (prepare_work_data *) calloc(1, sizeof(prepare_work_data)); - - data->cb = new Nan::Callback(cb); - -#ifdef UNICODE - data->sqlLen = sql->Length(); - data->sql = (uint16_t *) malloc((data->sqlLen * sizeof(uint16_t)) + sizeof(uint16_t)); - sql->Write((uint16_t *) data->sql); -#else - data->sqlLen = sql->Utf8Length(); - data->sql = (char *) malloc(data->sqlLen +1); - sql->WriteUtf8((char *) data->sql); -#endif - - data->stmt = stmt; - - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_Prepare, - (uv_after_work_cb)UV_AfterPrepare); - - stmt->Ref(); - - info.GetReturnValue().Set(Nan::Undefined()); -} + Napi::Array rows = ODBC::ProcessDataForNapi(env, data); -void ODBCStatement::UV_Prepare(uv_work_t* req) { - DEBUG_PRINTF("ODBCStatement::UV_Prepare\n"); - - prepare_work_data* data = (prepare_work_data *)(req->data); - - DEBUG_PRINTF("ODBCStatement::UV_Prepare\n"); - //DEBUG_PRINTF("ODBCStatement::UV_Prepare m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", - // data->stmt->m_hENV, - // data->stmt->m_hDBC, - // data->stmt->m_hSTMT - //); - - SQLRETURN ret; - - ret = SQLPrepare( - data->stmt->m_hSTMT, - (SQLTCHAR *) data->sql, - data->sqlLen); + std::vector callbackArguments; + callbackArguments.push_back(env.Null()); + callbackArguments.push_back(rows); - data->result = ret; -} - -void ODBCStatement::UV_AfterPrepare(uv_work_t* req, int status) { - DEBUG_PRINTF("ODBCStatement::UV_AfterPrepare\n"); - - prepare_work_data* data = (prepare_work_data *)(req->data); - - DEBUG_PRINTF("ODBCStatement::UV_AfterPrepare\n"); - //DEBUG_PRINTF("ODBCStatement::UV_AfterPrepare m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", - // data->stmt->m_hENV, - // data->stmt->m_hDBC, - // data->stmt->m_hSTMT - //); - - Nan::HandleScope scope; - - //First thing, let's check if the execution of the query returned any errors - if(data->result == SQL_ERROR) { - ODBC::CallbackSQLError( - SQL_HANDLE_STMT, - data->stmt->m_hSTMT, - data->cb); - } - else { - Local info[2]; - - info[0] = Nan::Null(); - info[1] = Nan::True(); + Callback().Call(callbackArguments); + } - Nan::TryCatch try_catch; + public: + ExecuteAsyncWorker(ODBCStatement *odbcStatementObject, Napi::Function& callback) : Napi::AsyncWorker(callback), + odbcStatementObject(odbcStatementObject), + data(odbcStatementObject->data) {} - data->cb->Call( 2, info); + ~ExecuteAsyncWorker() {} +}; - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); - } - } +Napi::Value ODBCStatement::Execute(const Napi::CallbackInfo& info) { - data->stmt->Unref(); - delete data->cb; - - free(data->sql); - free(data); - free(req); -} - -/* - * BindSync - * - */ + DEBUG_PRINTF("ODBCStatement::Execute\n"); -NAN_METHOD(ODBCStatement::BindSync) { - DEBUG_PRINTF("ODBCStatement::BindSync\n"); - - Nan::HandleScope scope; + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if ( !info[0]->IsArray() ) { - return Nan::ThrowTypeError("Argument 1 must be an Array"); - } + Napi::Function callback; - ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); - - DEBUG_PRINTF("ODBCStatement::BindSync\n"); - //DEBUG_PRINTF("ODBCStatement::BindSync m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", - // stmt->m_hENV, - // stmt->m_hDBC, - // stmt->m_hSTMT - //); - - //if we previously had parameters, then be sure to free them - //before allocating more - if (stmt->paramCount) { - int count = stmt->paramCount; - stmt->paramCount = 0; - - Parameter prm; - - //free parameter memory - for (int i = 0; i < count; i++) { - if (prm = stmt->params[i], prm.ParameterValuePtr != NULL) { - switch (prm.ValueType) { - case SQL_C_WCHAR: free(prm.ParameterValuePtr); break; - case SQL_C_CHAR: free(prm.ParameterValuePtr); break; - case SQL_C_SBIGINT: delete (int64_t *)prm.ParameterValuePtr; break; - case SQL_C_DOUBLE: delete (double *)prm.ParameterValuePtr; break; - case SQL_C_BIT: delete (bool *)prm.ParameterValuePtr; break; - } - } - } + if (info[0].IsFunction()) { callback = info[0].As(); } + else { Napi::TypeError::New(env, "execute: first argument must be a function").ThrowAsJavaScriptException(); } - free(stmt->params); - } - - stmt->params = ODBC::GetParametersFromArray( - Local::Cast(info[0]), - &stmt->paramCount); - - SQLRETURN ret = SQL_SUCCESS; - Parameter prm; - - for (int i = 0; i < stmt->paramCount; i++) { - prm = stmt->params[i]; - - /*DEBUG_PRINTF( - "ODBCStatement::BindSync - param[%i]: c_type=%i type=%i " - "buffer_length=%i size=%i length=%i &length=%X decimals=%i value=%s\n", - i, prm.ValueType, prm.ParameterType, prm.BufferLength, prm.ColumnSize, prm.length, - &stmt->params[i].StrLen_or_IndPtr, prm.DecimalDigits, prm.ParameterValuePtr - );*/ - - ret = SQLBindParameter( - stmt->m_hSTMT, //StatementHandle - i + 1, //ParameterNumber - SQL_PARAM_INPUT, //InputOutputType - prm.ValueType, - prm.ParameterType, - prm.ColumnSize, - prm.DecimalDigits, - prm.ParameterValuePtr, - prm.BufferLength, - &stmt->params[i].StrLen_or_IndPtr); - - if (ret == SQL_ERROR) { - break; - } - } + ExecuteAsyncWorker *worker = new ExecuteAsyncWorker(this, callback); + worker->Queue(); - if (SQL_SUCCEEDED(ret)) { - info.GetReturnValue().Set(Nan::True()); - } - else { - Local objError = ODBC::GetSQLError( - SQL_HANDLE_STMT, - stmt->m_hSTMT, - (char *) "[node-odbc] Error in ODBCStatement::BindSync" - ); - - Nan::ThrowError(objError); - - info.GetReturnValue().Set(Nan::False()); - } + return env.Undefined(); } -/* - * Bind - * - */ +/****************************************************************************** + ********************************** CLOSE ************************************* + *****************************************************************************/ -NAN_METHOD(ODBCStatement::Bind) { - DEBUG_PRINTF("ODBCStatement::Bind\n"); - - Nan::HandleScope scope; +// CloseStatementAsyncWorker, used by Close function (see below) +class CloseStatementAsyncWorker : public Napi::AsyncWorker { - if ( !info[0]->IsArray() ) { - return Nan::ThrowError("Argument 1 must be an Array"); - } - - REQ_FUN_ARG(1, cb); + private: + ODBCStatement *odbcStatementObject; + QueryData *data; - ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); - - uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); - - bind_work_data* data = - (bind_work_data *) calloc(1, sizeof(bind_work_data)); - - //if we previously had parameters, then be sure to free them - //before allocating more - if (stmt->paramCount) { - int count = stmt->paramCount; - stmt->paramCount = 0; + void Execute() { + DEBUG_PRINTF("ODBCStatement::CloseAsyncWorker::Execute()\n"); + data->sqlReturnCode = odbcStatementObject->Free(); - Parameter prm; - - //free parameter memory - for (int i = 0; i < count; i++) { - if (prm = stmt->params[i], prm.ParameterValuePtr != NULL) { - switch (prm.ValueType) { - case SQL_C_WCHAR: free(prm.ParameterValuePtr); break; - case SQL_C_CHAR: free(prm.ParameterValuePtr); break; - case SQL_C_SBIGINT: delete (int64_t *)prm.ParameterValuePtr; break; - case SQL_C_DOUBLE: delete (double *)prm.ParameterValuePtr; break; - case SQL_C_BIT: delete (bool *)prm.ParameterValuePtr; break; - } + if (!SQL_SUCCEEDED(data->sqlReturnCode)) { + SetError(ODBC::GetSQLError(SQL_HANDLE_STMT, data->hSTMT, (char *) "[node-odbc] Error in Statement::CloseAsyncWorker::Execute")); + return; } } - free(stmt->params); - } - - data->stmt = stmt; - - DEBUG_PRINTF("ODBCStatement::Bind\n"); - //DEBUG_PRINTF("ODBCStatement::Bind m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", - // data->stmt->m_hENV, - // data->stmt->m_hDBC, - // data->stmt->m_hSTMT - //); - - data->cb = new Nan::Callback(cb); - - data->stmt->params = ODBC::GetParametersFromArray( - Local::Cast(info[0]), - &data->stmt->paramCount); - - work_req->data = data; - - uv_queue_work( - uv_default_loop(), - work_req, - UV_Bind, - (uv_after_work_cb)UV_AfterBind); + void OnOK() { - stmt->Ref(); + DEBUG_PRINTF("ODBCStatement::CloseStatementAsyncWorker::OnOk()\n"); - info.GetReturnValue().Set(Nan::Undefined()); -} + Napi::Env env = Env(); + Napi::HandleScope scope(env); -void ODBCStatement::UV_Bind(uv_work_t* req) { - DEBUG_PRINTF("ODBCStatement::UV_Bind\n"); - - bind_work_data* data = (bind_work_data *)(req->data); - - DEBUG_PRINTF("ODBCStatement::UV_Bind\n"); - //DEBUG_PRINTF("ODBCStatement::UV_Bind m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", - // data->stmt->m_hENV, - // data->stmt->m_hDBC, - // data->stmt->m_hSTMT - //); - - SQLRETURN ret = SQL_SUCCESS; - Parameter prm; - - for (int i = 0; i < data->stmt->paramCount; i++) { - prm = data->stmt->params[i]; - - /*DEBUG_PRINTF( - "ODBCStatement::UV_Bind - param[%i]: c_type=%i type=%i " - "buffer_length=%i size=%i length=%i &length=%X decimals=%i value=%s\n", - i, prm.ValueType, prm.ParameterType, prm.BufferLength, prm.ColumnSize, prm.length, - &data->stmt->params[i].StrLen_or_IndPtr, prm.DecimalDigits, prm.ParameterValuePtr - );*/ - - ret = SQLBindParameter( - data->stmt->m_hSTMT, //StatementHandle - i + 1, //ParameterNumber - SQL_PARAM_INPUT, //InputOutputType - prm.ValueType, - prm.ParameterType, - prm.ColumnSize, - prm.DecimalDigits, - prm.ParameterValuePtr, - prm.BufferLength, - &data->stmt->params[i].StrLen_or_IndPtr); - - if (ret == SQL_ERROR) { - break; + std::vector callbackArguments; + callbackArguments.push_back(env.Null()); + Callback().Call(callbackArguments); } - } - data->result = ret; -} + public: + CloseStatementAsyncWorker(ODBCStatement *odbcStatementObject, Napi::Function& callback) : Napi::AsyncWorker(callback), + odbcStatementObject(odbcStatementObject), + data(odbcStatementObject->data) {} -void ODBCStatement::UV_AfterBind(uv_work_t* req, int status) { - DEBUG_PRINTF("ODBCStatement::UV_AfterBind\n"); - - bind_work_data* data = (bind_work_data *)(req->data); - - Nan::HandleScope scope; - - //an easy reference to the statment object - ODBCStatement* self = data->stmt->self(); - - //Check if there were errors - if(data->result == SQL_ERROR) { - ODBC::CallbackSQLError( - SQL_HANDLE_STMT, - self->m_hSTMT, - data->cb); - } - else { - Local info[2]; + ~CloseStatementAsyncWorker() {} +}; - info[0] = Nan::Null(); - info[1] = Nan::True(); +Napi::Value ODBCStatement::Close(const Napi::CallbackInfo& info) { - Nan::TryCatch try_catch; + DEBUG_PRINTF("ODBCStatement::Close\n"); - data->cb->Call( 2, info); + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); - if (try_catch.HasCaught()) { - Nan::FatalException(try_catch); - } - } + Napi::Function callback = info[0].As(); - self->Unref(); - delete data->cb; - - free(data); - free(req); -} - -/* - * CloseSync - */ - -NAN_METHOD(ODBCStatement::CloseSync) { - DEBUG_PRINTF("ODBCStatement::CloseSync\n"); - - Nan::HandleScope scope; - - OPT_INT_ARG(0, closeOption, SQL_DESTROY); - - ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); - - DEBUG_PRINTF("ODBCStatement::CloseSync closeOption=%i\n", - closeOption); - - if (closeOption == SQL_DESTROY) { - stmt->Free(); - } - else { - uv_mutex_lock(&ODBC::g_odbcMutex); - - SQLFreeStmt(stmt->m_hSTMT, closeOption); - - uv_mutex_unlock(&ODBC::g_odbcMutex); - } + CloseStatementAsyncWorker *worker = new CloseStatementAsyncWorker(this, callback); + worker->Queue(); - info.GetReturnValue().Set(Nan::True()); + return env.Undefined(); } diff --git a/src/odbc_statement.h b/src/odbc_statement.h old mode 100644 new mode 100755 index 0aa5afa..56b05a8 --- a/src/odbc_statement.h +++ b/src/odbc_statement.h @@ -17,118 +17,27 @@ #ifndef _SRC_ODBC_STATEMENT_H #define _SRC_ODBC_STATEMENT_H -#include +#include +// #include -class ODBCStatement : public Nan::ObjectWrap { +class ODBCStatement : public Napi::ObjectWrap { public: - static Nan::Persistent constructor; - static void Init(v8::Handle exports); - - void Free(); - - protected: - ODBCStatement() {}; - - explicit ODBCStatement(HENV hENV, HDBC hDBC, HSTMT hSTMT): - Nan::ObjectWrap(), - m_hENV(hENV), - m_hDBC(hDBC), - m_hSTMT(hSTMT) {}; - - ~ODBCStatement(); - - //constructor -public: - static NAN_METHOD(New); - - //async methods - static NAN_METHOD(Execute); -protected: - static void UV_Execute(uv_work_t* work_req); - static void UV_AfterExecute(uv_work_t* work_req, int status); - -public: - static NAN_METHOD(ExecuteDirect); -protected: - static void UV_ExecuteDirect(uv_work_t* work_req); - static void UV_AfterExecuteDirect(uv_work_t* work_req, int status); + static Napi::FunctionReference constructor; -public: - static NAN_METHOD(ExecuteNonQuery); -protected: - static void UV_ExecuteNonQuery(uv_work_t* work_req); - static void UV_AfterExecuteNonQuery(uv_work_t* work_req, int status); - -public: - static NAN_METHOD(Prepare); -protected: - static void UV_Prepare(uv_work_t* work_req); - static void UV_AfterPrepare(uv_work_t* work_req, int status); + static Napi::Object Init(Napi::Env env, Napi::Object exports); -public: - static NAN_METHOD(Bind); -protected: - static void UV_Bind(uv_work_t* work_req); - static void UV_AfterBind(uv_work_t* work_req, int status); - - //sync methods -public: - static NAN_METHOD(CloseSync); - static NAN_METHOD(ExecuteSync); - static NAN_METHOD(ExecuteDirectSync); - static NAN_METHOD(ExecuteNonQuerySync); - static NAN_METHOD(PrepareSync); - static NAN_METHOD(BindSync); -protected: + SQLHENV hENV; + SQLHDBC hDBC; + QueryData *data; - struct Fetch_Request { - Nan::Callback* callback; - ODBCStatement *objResult; - SQLRETURN result; - }; - - ODBCStatement *self(void) { return this; } + SQLRETURN Free(); - protected: - HENV m_hENV; - HDBC m_hDBC; - HSTMT m_hSTMT; - - Parameter *params; - int paramCount; - - uint16_t *buffer; - int bufferLength; - Column *columns; - short colCount; -}; - -struct execute_direct_work_data { - Nan::Callback* cb; - ODBCStatement *stmt; - int result; - void *sql; - int sqlLen; -}; - -struct execute_work_data { - Nan::Callback* cb; - ODBCStatement *stmt; - int result; -}; - -struct prepare_work_data { - Nan::Callback* cb; - ODBCStatement *stmt; - int result; - void *sql; - int sqlLen; -}; + explicit ODBCStatement(const Napi::CallbackInfo& info); + ~ODBCStatement(); -struct bind_work_data { - Nan::Callback* cb; - ODBCStatement *stmt; - int result; + Napi::Value Prepare(const Napi::CallbackInfo& info); + Napi::Value Bind(const Napi::CallbackInfo& info); + Napi::Value Execute(const Napi::CallbackInfo& info); + Napi::Value Close(const Napi::CallbackInfo& info); }; - #endif diff --git a/src/strptime.c b/src/strptime.c old mode 100644 new mode 100755 diff --git a/src/strptime.h b/src/strptime.h old mode 100644 new mode 100755 diff --git a/test/bench-prepare-bind-execute-closeSync.js b/test/bench-prepare-bind-execute-closeSync.js deleted file mode 100644 index d858fa0..0000000 --- a/test/bench-prepare-bind-execute-closeSync.js +++ /dev/null @@ -1,60 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , iterations = 10000 - ; - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery3(function () { - finish(); - }); -}); - -function issueQuery3(done) { - var count = 0 - , time = new Date().getTime(); - - var stmt = db.prepareSync('select ? as test'); - - for (var x = 0; x < iterations; x++) { - (function (x) { - stmt.bind([x], function (err) { - if (err) { - console.log(err); - return finish(); - } - - //console.log(x); - - stmt.execute(cb); - }); - })(x); - } - - function cb (err, result) { - if (err) { - console.error(err); - return finish(); - } - - //console.log(result.fetchAllSync()); - - result.closeSync(); - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - Execute - CloseSync", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return done(); - } - } -} - -function finish() { - db.close(function () {}); -} diff --git a/test/bench-prepare-bind-executeNonQuery.js b/test/bench-prepare-bind-executeNonQuery.js deleted file mode 100644 index f6c5d2a..0000000 --- a/test/bench-prepare-bind-executeNonQuery.js +++ /dev/null @@ -1,54 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , iterations = 10000 - ; - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery2(function () { - finish(); - }); -}); - -function issueQuery2(done) { - var count = 0 - , time = new Date().getTime(); - - var stmt = db.prepareSync('select ? as test'); - - for (var x = 0; x < iterations; x++) { - (function (x) { - stmt.bind([x], function (err) { - if (err) { - console.log(err); - return finish(); - } - - stmt.executeNonQuery(cb); - }); - })(x); - } - - function cb (err, data) { - if (err) { - console.error(err); - return finish(); - } - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - ExecuteNonQuery ", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return done(); - } - } -} - -function finish() { - db.close(function () {}); -} diff --git a/test/bench-prepare-bindSync-execute-closeSync.js b/test/bench-prepare-bindSync-execute-closeSync.js deleted file mode 100644 index cbf0f67..0000000 --- a/test/bench-prepare-bindSync-execute-closeSync.js +++ /dev/null @@ -1,50 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , iterations = 10000 - ; - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery3(function () { - finish(); - }); -}); - -function issueQuery3(done) { - var count = 0 - , time = new Date().getTime(); - - var stmt = db.prepareSync('select ? as test'); - - for (var x = 0; x < iterations; x++) { - (function (x) { - stmt.bindSync([x]); - stmt.execute(cb); - })(x); - } - - function cb (err, result) { - if (err) { - console.error(err); - return finish(); - } - - result.closeSync(); - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - Execute - CloseSync", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return done(); - } - } -} - -function finish() { - db.close(function () {}); -} diff --git a/test/bench-prepare-bindSync-executeNonQuery.js b/test/bench-prepare-bindSync-executeNonQuery.js deleted file mode 100644 index f26da82..0000000 --- a/test/bench-prepare-bindSync-executeNonQuery.js +++ /dev/null @@ -1,48 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , iterations = 10000 - ; - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery2(function () { - finish(); - }); -}); - -function issueQuery2(done) { - var count = 0 - , time = new Date().getTime(); - - var stmt = db.prepareSync('select ? as test'); - - for (var x = 0; x < iterations; x++) { - (function (x) { - stmt.bindSync([x]); - stmt.executeNonQuery(cb); - })(x); - } - - function cb (err, data) { - if (err) { - console.error(err); - return finish(); - } - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - ExecuteNonQuery ", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return done(); - } - } -} - -function finish() { - db.close(function () {}); -} diff --git a/test/bench-prepare-execute-closeSync.js b/test/bench-prepare-execute-closeSync.js deleted file mode 100644 index c4d1e53..0000000 --- a/test/bench-prepare-execute-closeSync.js +++ /dev/null @@ -1,51 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , iterations = 10000 - ; - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery3(function () { - finish(); - }); -}); - -function issueQuery3(done) { - var count = 0 - , time = new Date().getTime(); - - var stmt = db.prepareSync('select ? as test'); - - for (var x = 0; x < iterations; x++) { - (function (x) { - stmt.execute([x], cb); - })(x); - } - - function cb (err, result) { - if (err) { - console.error(err); - return finish(); - } - - //console.log(result.fetchAllSync()); - - result.closeSync(); - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec : Prepare - Execute - CloseSync", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return done(); - } - } -} - -function finish() { - db.close(function () {}); -} diff --git a/test/bench-prepare-executeNonQuery.js b/test/bench-prepare-executeNonQuery.js deleted file mode 100644 index 14b9320..0000000 --- a/test/bench-prepare-executeNonQuery.js +++ /dev/null @@ -1,47 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , iterations = 10000 - ; - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery2(function () { - finish(); - }); -}); - -function issueQuery2(done) { - var count = 0 - , time = new Date().getTime(); - - var stmt = db.prepareSync('select ? as test'); - - for (var x = 0; x < iterations; x++) { - (function (x) { - stmt.executeNonQuery([x], cb); - })(x); - } - - function cb (err, data) { - if (err) { - console.error(err); - return finish(); - } - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec : Prepare - ExecuteNonQuery ", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return done(); - } - } -} - -function finish() { - db.close(function () {}); -} diff --git a/test/bench-prepare-not.js b/test/bench-prepare-not.js deleted file mode 100644 index 8596da4..0000000 --- a/test/bench-prepare-not.js +++ /dev/null @@ -1,43 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , iterations = 10000 - ; - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery1(function () { - finish(); - }); -}); - -function issueQuery1(done) { - var count = 0 - , time = new Date().getTime(); - - for (var x = 0; x < iterations; x++) { - db.query("select 1 + ? as test", [1], cb); - } - - function cb (err, data) { - if (err) { - console.error(err); - return finish(); - } - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec : Query", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return done(); - } - } -} - -function finish() { - db.close(function () {}); -} diff --git a/test/bench-query-fetch-parameters.js b/test/bench-query-fetch-parameters.js deleted file mode 100644 index 746ee5e..0000000 --- a/test/bench-query-fetch-parameters.js +++ /dev/null @@ -1,44 +0,0 @@ -var common = require("./common") -, odbc = require("../") -, db = new odbc.Database(); - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery(); -}); - -function issueQuery() { - var count = 0 - , iterations = 10000 - , time = new Date().getTime(); - - function iteration() { - db.query("select ? + ?, ? as test", [Math.floor(Math.random() * 1000), Math.floor(Math.random() * 1000), "This is a string"], cb); - } - - iteration() - - function cb (err, result) { - if (err) { - console.error("query: ", err); - return finish(); - } - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return finish(); - } else { - iteration(); - } - } - - function finish() { - db.close(function () {}); - } -} diff --git a/test/bench-query-fetch.js b/test/bench-query-fetch.js deleted file mode 100644 index 241589f..0000000 --- a/test/bench-query-fetch.js +++ /dev/null @@ -1,63 +0,0 @@ -var common = require("./common") -, odbc = require("../") -, db = new odbc.Database(); - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery(); -}); - -function issueQuery() { - var count = 0 - , iterations = 10000 - , time = new Date().getTime(); - - function iteration() { - db.queryResult("select 1 + 1 as test", cb); - } - - iteration() - - function cb (err, result) { - if (err) { - console.error("queryResult: ", err); - return finish(); - } - - fetchAll(result); - } - - function fetchAll(rs) { - rs.fetch(function (err, data) { - if (err) { - console.error(err); - return finish(); - } - - //if data is null, then no more data - if (!data) { - rs.closeSync(); - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return finish(); - } else { - iteration() - } - } - else { - fetchAll(rs); - } - }); - } - - function finish() { - db.close(function () {}); - } -} diff --git a/test/bench-query-fetchAll.js b/test/bench-query-fetchAll.js deleted file mode 100644 index 30816e8..0000000 --- a/test/bench-query-fetchAll.js +++ /dev/null @@ -1,53 +0,0 @@ -var common = require("./common") -, odbc = require("../") -, db = new odbc.Database(); - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery(); -}); - -function issueQuery() { - var count = 0 - , iterations = 10000 - , time = new Date().getTime(); - - function iteration() { - db.queryResult("select 1 + 1 as test", cb); - } - - iteration(); - - function cb (err, result) { - if (err) { - console.error(err); - return finish(); - } - - result.fetchAll(function (err, data) { - if (err) { - console.error(err); - return finish(); - } - - result.closeSync(); - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return finish(); - } else { - iteration(); - } - }); - } - - function finish() { - db.close(function () {}); - } -} diff --git a/test/bench-query-fetchAllSync.js b/test/bench-query-fetchAllSync.js deleted file mode 100644 index e539077..0000000 --- a/test/bench-query-fetchAllSync.js +++ /dev/null @@ -1,43 +0,0 @@ -var common = require("./common") -, odbc = require("../") -, db = new odbc.Database(); - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery(); -}); - -function issueQuery() { - var count = 0 - , iterations = 10000 - , time = new Date().getTime(); - - for (var x = 0; x < iterations; x++) { - db.queryResult("select 1 + 1 as test", cb); - } - - function cb (err, result) { - if (err) { - console.error(err); - return finish(); - } - - var data = result.fetchAllSync(); - result.closeSync(); - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return finish(); - } - } - - function finish() { - db.close(function () {}); - } -} \ No newline at end of file diff --git a/test/bench-query.js b/test/bench-query.js deleted file mode 100644 index c4e4c90..0000000 --- a/test/bench-query.js +++ /dev/null @@ -1,40 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database(); - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery(); -}); - -function issueQuery() { - var count = 0 - , iterations = 10000 - , time = new Date().getTime(); - - for (var x = 0; x < iterations; x++) { - db.query("select 1 + 1 as test", cb); - } - - function cb (err, data) { - if (err) { - console.error(err); - return finish(); - } - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return finish(); - } - } -} - -function finish() { - db.close(function () {}); -} diff --git a/test/bench-querySync-fetchArray.js b/test/bench-querySync-fetchArray.js deleted file mode 100644 index 2674dd1..0000000 --- a/test/bench-querySync-fetchArray.js +++ /dev/null @@ -1,29 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database({ fetchMode : odbc.FETCH_ARRAY }); - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery(); -}); - -function issueQuery() { - var count = 0 - , iterations = 10000 - , time = new Date().getTime(); - - for (var x = 0; x < iterations; x++) { - var data = db.querySync("select 1 + 1 as test"); - count += 1; - } - - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - - db.close(function () {}); -} \ No newline at end of file diff --git a/test/bench-querySync-parameters.js b/test/bench-querySync-parameters.js deleted file mode 100644 index dca0f10..0000000 --- a/test/bench-querySync-parameters.js +++ /dev/null @@ -1,27 +0,0 @@ -var common = require("./common") -, odbc = require("../") -, db = new odbc.Database(); - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery(); -}); - -function issueQuery() { - var count = 0 - , iterations = 10000 - , time = new Date().getTime(); - - for (var x = 0; x < iterations; x++) { - db.querySync("select ? + ?, ? as test", [Math.floor(Math.random() * 1000), Math.floor(Math.random() * 1000), "This is a string"]); - } - - var elapsed = new Date().getTime() - time; - console.log("%d queries issued in %d seconds, %d/sec", iterations, elapsed/1000, Math.floor(iterations/(elapsed/1000))); - - db.close(function () {}); -} diff --git a/test/bench-querySync.js b/test/bench-querySync.js deleted file mode 100644 index 8a8a894..0000000 --- a/test/bench-querySync.js +++ /dev/null @@ -1,29 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database(); - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery(); -}); - -function issueQuery() { - var count = 0 - , iterations = 10000 - , time = new Date().getTime(); - - for (var x = 0; x < iterations; x++) { - var data = db.querySync("select 1 + 1 as test"); - count += 1; - } - - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - - db.close(function () { }); -} \ No newline at end of file diff --git a/test/common.js b/test/common.js deleted file mode 100644 index 552cfae..0000000 --- a/test/common.js +++ /dev/null @@ -1,57 +0,0 @@ -var odbc = require("../"); -//odbc.library = '/usr/lib/odbc/libsqlite3odbc-0.91'; -//odbc.library = '/usr/lib/x86_64-linux-gnu/odbc/libtdsodbc'; -//odbc.library = '/opt/sqlncli-11.0.1790.0/lib64/libsqlncli-11.0'; - -exports.connectionString = "DRIVER={SQLite3};DATABASE=data/sqlite-test.db"; -exports.title = "Sqlite3"; -exports.dialect = "sqlite"; -exports.user = ""; - -if (process.argv.length === 3) { - exports.connectionString = process.argv[2]; -} - -exports.connectionObject = { - DRIVER : "{SQLITE3}", - DATABASE : "data/sqlite-test.db" -}; - -try { - exports.testConnectionStrings = require('./config.testConnectionStrings.json'); -} -catch (e) { - exports.testConnectionStrings = [{ title : exports.title, connectionString : exports.connectionString, dialect : exports.dialect }]; -} - -try { - exports.benchConnectionStrings = require('./config.benchConnectionStrings.json'); -} -catch (e) { - exports.benchConnectionStrings = [{ title : exports.title, connectionString : exports.connectionString, dialect : exports.dialect }]; -} - -if (process.argv.length === 3) { - //look through the testConnectionStrings to see if there is a title that matches - //what was requested. - var lookup = process.argv[2]; - - exports.testConnectionStrings.forEach(function (connectionString) { - if (connectionString && (connectionString.title == lookup || connectionString.connectionString == lookup)) { - exports.connectionString = connectionString.connectionString; - exports.dialect = connectionString.dialect; - exports.user = connectionString.user; - } - }); -} - -exports.databaseName = "test"; -exports.tableName = "NODE_ODBC_TEST_TABLE"; - -exports.dropTables = function (db, cb) { - db.query("drop table " + exports.tableName, cb); -}; - -exports.createTables = function (db, cb) { - db.query("create table " + exports.tableName + " (COLINT INTEGER, COLDATETIME DATETIME, COLTEXT TEXT)", cb); -}; diff --git a/test/config.benchConnectionStrings.json b/test/config.benchConnectionStrings.json deleted file mode 100644 index 437710d..0000000 --- a/test/config.benchConnectionStrings.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { "title" : "Sqlite3", "connectionString" : "DRIVER={SQLite3};DATABASE=data/sqlite-test.db" } - , { "title" : "MySQL-Local", "connectionString" : "DRIVER={MySQL};DATABASE=test;HOST=localhost;USER=test;" } - , { "title" : "MSSQL-FreeTDS-Remote", "connectionString" : "DRIVER={FreeTDS};SERVERNAME=sql2;DATABASE=test;UID=test;PWD=test;AutoTranslate=yes" } - , { "title" : "MSSQL-NativeCLI-Remote", "connectionString" : "DRIVER={SQL Server Native Client 11.0};SERVER=sql2;DATABASE=test;UID=test;PWD=test;" } -] diff --git a/test/config.testConnectionStrings.json b/test/config.testConnectionStrings.json deleted file mode 100644 index 1954bfa..0000000 --- a/test/config.testConnectionStrings.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { "title" : "Sqlite3", "connectionString" : "DRIVER={SQLite3};DATABASE=data/sqlite-test.db", "dialect" : "sqlite", "user": "" } - , { "title" : "MySQL-Local", "connectionString" : "DRIVER={MySQL};DATABASE=test;HOST=localhost;SOCKET=/var/run/mysqld/mysqld.sock;USER=test;", "dialect" : "mysql", "user" : "test" } - , { "title" : "MSSQL-FreeTDS-Remote", "connectionString" : "DRIVER={FreeTDS};SERVERNAME=sql2;DATABASE=test;UID=test;PWD=test;AutoTranslate=yes;TEXTSIZE=10000000", "dialect" : "mssql", "user" : "test" } - , { "title" : "MSSQL-NativeCLI-Remote", "connectionString" : "DRIVER={SQL Server Native Client 11.0};SERVER=sql2;DATABASE=test;UID=test;PWD=test;", "dialect": "mssql", "user" : "test" } -] diff --git a/test/connection/beginTransaction.js b/test/connection/beginTransaction.js new file mode 100755 index 0000000..9c076be --- /dev/null +++ b/test/connection/beginTransaction.js @@ -0,0 +1,229 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Connection } = require('../../'); + +describe('.beginTransaction([callback])...', () => { + let connection = null; + + beforeEach(() => { + connection = new Connection(`${process.env.CONNECTION_STRING}`); + }); + + afterEach(async () => { + await connection.close(); + connection = null; + }); + + describe('...with callbacks...', () => { + it('...should set .autocommit property to false.', (done) => { + assert.deepEqual(connection.autocommit, true); + connection.beginTransaction((error1) => { + assert.deepEqual(error1, null); + assert.deepEqual(connection.autocommit, false); + done(); + }); + }); + it('...should be idempotent if called multiple times before rollback().', (done) => { + connection.beginTransaction((error1) => { + assert.deepEqual(error1, null); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(2, 'rolledback', 20)`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.count, 1); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error3, result3) => { + assert.deepEqual(error3, null); + assert.notDeepEqual(result3, null); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3.count, -1); + assert.deepEqual(result3[0], { ID: 2, NAME: 'rolledback', AGE: 20 }); + connection.beginTransaction((error4) => { + assert.deepEqual(error4, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error5, result5) => { + assert.deepEqual(error5, null); + assert.notDeepEqual(result5, null); + assert.deepEqual(result5.length, 1); + assert.deepEqual(result5.count, -1); + assert.deepEqual(result5[0], { ID: 2, NAME: 'rolledback', AGE: 20 }); + connection.rollback((error6) => { + assert.deepEqual(error6, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error7, result7) => { + assert.deepEqual(error7, null); + assert.notDeepEqual(result7, null); + assert.deepEqual(result7.length, 0); + assert.deepEqual(result7.count, -1); + done(); + }); + }); + }); + }); + }); + }); + }); + }); + it('...should be idempotent if called multiple times before commit().', (done) => { + connection.beginTransaction((error1) => { + assert.deepEqual(error1, null); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.count, 1); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error3, result3) => { + assert.deepEqual(error3, null); + assert.notDeepEqual(result3, null); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3.count, -1); + assert.deepEqual(result3[0], { ID: 1, NAME: 'committed', AGE: 10 }); + connection.beginTransaction((error4) => { + assert.deepEqual(error4, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error5, result5) => { + assert.deepEqual(error5, null); + assert.notDeepEqual(result5, null); + assert.deepEqual(result5.length, 1); + assert.deepEqual(result5.count, -1); + assert.deepEqual(result5[0], { ID: 1, NAME: 'committed', AGE: 10 }); + connection.commit((error6) => { + assert.deepEqual(error6, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error7, result7) => { + assert.deepEqual(error7, null); + assert.notDeepEqual(result7, null); + assert.deepEqual(result7.length, 1); + assert.deepEqual(result7.count, -1); + assert.deepEqual(result7[0], { ID: 1, NAME: 'committed', AGE: 10 }); + connection.close((closeError) => { + assert.deepEqual(closeError, null); + done(); + }); + }); + }); + }); + }); + }); + }); + }); + }); + it('...should make transactional queries visible only to the connection that the transaction was started on until commit() is called (at transactional isolation level \'read committed\').', (done) => { + const connection1 = new Connection(`${process.env.CONNECTION_STRING};CMT=1`); // set commitment level to 1 (Read committed) + const connection2 = new Connection(`${process.env.CONNECTION_STRING};CMT=1;CONCURRENTACCESSRESOLUTION=1`); // set commitment level to 1 (Read committed) + connection1.beginTransaction((error0) => { + assert.deepEqual(error0, null); + connection1.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.count, 1); + connection1.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} FETCH1`, (error3, result3) => { + assert.deepEqual(error3, null); + assert.notDeepEqual(result3, null); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result3.count, -1); + connection2.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} FETCH2`, (error4, result4) => { + assert.deepEqual(error4, null); + assert.notDeepEqual(result4, null); + assert.deepEqual(result4.length, 0); + assert.deepEqual(result4.count, -1); + connection1.commit((error5) => { + assert.deepEqual(error5, null); + connection2.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error6, result6) => { + assert.deepEqual(error6, null); + assert.notDeepEqual(result6, null); + assert.deepEqual(result6.length, 1); + assert.deepEqual(result6.count, -1); + assert.deepEqual(result6[0], { ID: 1, NAME: 'committed', AGE: 10 }); + connection1.close((error7) => { + assert.deepEqual(error7, null); + connection2.close((error8) => { + assert.deepEqual(error8, null); + done(); + }); + }); + }); + }); + }); + }); + }); + }); + }); + it('...shouldn\'t hide queries that occur on other connections.', (done) => { + const connection1 = new Connection(`${process.env.CONNECTION_STRING}`); + const connection2 = new Connection(`${process.env.CONNECTION_STRING}`); + connection1.beginTransaction((error1) => { + assert.deepEqual(error1, null); + connection2.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.count, 1); + connection1.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error3, result3) => { + assert.deepEqual(error3, null); + assert.notDeepEqual(result3, null); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result3.count, -1); + connection1.close((error4) => { + assert.deepEqual(error4, null); + connection2.close((error5) => { + assert.deepEqual(error5, null); + done(); + }); + }); + }); + }); + }); + }); + }); // '...with callbacks...' + describe('...with promises...', () => { + it('...should set .autocommit property to false.', async () => { + assert.deepEqual(connection.autocommit, true); + await connection.beginTransaction(); + assert.deepEqual(connection.autocommit, false); + }); + it('...should be idempotent if called multiple times before rollback().', async () => { + await connection.beginTransaction(); + const result1 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(2, 'rolledback', 20)`); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.length, 0); + assert.deepEqual(result1.count, 1); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2.count, -1); + assert.deepEqual(result2[0], { ID: 2, NAME: 'rolledback', AGE: 20 }); + await connection.beginTransaction(); + const result3 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result3, null); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3.count, -1); + assert.deepEqual(result3[0], { ID: 2, NAME: 'rolledback', AGE: 20 }); + await connection.rollback(); + const result4 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result4, null); + assert.deepEqual(result4.length, 0); + assert.deepEqual(result4.count, -1); + }); + it('...should be idempotent if called multiple times before commit().', async () => { + await connection.beginTransaction(); + const result1 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.length, 0); + assert.deepEqual(result1.count, 1); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2.count, -1); + assert.deepEqual(result2[0], { ID: 1, NAME: 'committed', AGE: 10 }); + await connection.beginTransaction(); + const result3 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result3, null); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3.count, -1); + assert.deepEqual(result3[0], { ID: 1, NAME: 'committed', AGE: 10 }); + await connection.commit(); + const result4 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result4, null); + assert.deepEqual(result4.length, 1); + assert.deepEqual(result4.count, -1); + assert.deepEqual(result4[0], { ID: 1, NAME: 'committed', AGE: 10 }); + }); + }); // '...with promises...' +}); // '.beginTransaction([callback])...' diff --git a/test/connection/callProcedure.js b/test/connection/callProcedure.js new file mode 100755 index 0000000..06b6380 --- /dev/null +++ b/test/connection/callProcedure.js @@ -0,0 +1,19 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Connection } = require('../../'); + +describe('.callProcedure(procedureName, parameters, [callback])...', () => { + describe('...with callbacks...', () => { + it('...should place correct result in and out parameter.', (done) => { + const array = [undefined]; + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.callProcedure(null, `${process.env.DB_SCHEMA}`, `${process.env.DB_STOREDPROCEDURE}`, array, (error1, result1) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(result1, null); + done(); + }); + }); + }); +}); diff --git a/test/connection/close.js b/test/connection/close.js new file mode 100755 index 0000000..101624d --- /dev/null +++ b/test/connection/close.js @@ -0,0 +1,78 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Connection } = require('../../'); + +describe('.close([callback])...', () => { + it('...should throw a TypeError if function signature doesn\'t match accepted signatures.', async () => { + const CLOSE_TYPE_ERROR = { + name: 'TypeError', + message: '[node-odbc]: Incorrect function signature for call to connection.close({function}[optional]).', + }; + + const CLOSE_CALLBACK = () => {}; + + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + assert.throws(() => { + connection.close(1, []); + }, CLOSE_TYPE_ERROR); + assert.throws(() => { + connection.close(1, CLOSE_CALLBACK); + }, CLOSE_TYPE_ERROR); + assert.throws(() => { + connection.close({}, CLOSE_CALLBACK); + }, CLOSE_TYPE_ERROR); + assert.throws(() => { + connection.close(null); + }, CLOSE_TYPE_ERROR); + assert.throws(() => { + connection.close(null, CLOSE_CALLBACK); + }, CLOSE_TYPE_ERROR); + assert.throws(() => { + connection.close('CLOSE'); + }, CLOSE_TYPE_ERROR); + assert.throws(() => { + connection.close('CLOSE', CLOSE_CALLBACK); + }, CLOSE_TYPE_ERROR); + await connection.close(); + }); + describe('...with callbacks...', () => { + it('...should set .connected property to false.', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + assert.deepEqual(connection.connected, true); + connection.close((error1) => { + assert.deepEqual(error1, null); + assert.deepEqual(connection.connected, false); + done(); + }); + }); + it('...shouldn\'t allow queries after close() is called.', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.close((error1) => { + assert.deepEqual(error1, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error2, result2) => { + assert.notDeepEqual(error2, null); + assert.deepEqual(error2 instanceof Error, true); + assert.deepEqual(result2); + done(); + }); + }); + }); + }); // ...with callbacks... + describe('...with promises...', () => { + it('...should set .connected property to false.', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + assert.deepEqual(connection.connected, true); + await connection.close(); + assert.deepEqual(connection.connected, false); + }); + it('...shouldn\'t allow queries after close() is called.', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + await connection.close(); + assert.rejects(async () => { + await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + }); + }); + }); // ...with promises... +}); // '.close([callback])...' diff --git a/test/connection/columns.js b/test/connection/columns.js new file mode 100755 index 0000000..1e20f3a --- /dev/null +++ b/test/connection/columns.js @@ -0,0 +1,164 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const odbc = require('../../build/Release/odbc.node'); +const { Connection } = require('../../'); + +describe('.columns(catalog, schema, table, column, callback)...', () => { + let connection = null; + beforeEach(() => { + connection = new Connection(`${process.env.CONNECTION_STRING}`); + }); + + afterEach(async () => { + await connection.close(); + connection = null; + }); + describe('...with callbacks...', () => { + it('...should return information about all columns of a table.', (done) => { + // const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.columns(null, `${process.env.DB_SCHEMA}`, `${process.env.DB_TABLE}`, null, (error, results) => { + assert.strictEqual(error, null); + assert.strictEqual(results.length, 3); + assert.strictEqual(results.count, 3); + assert.deepStrictEqual(results.columns, + [ + { name: 'TABLE_CAT', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_SCHEM', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'COLUMN_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'DATA_TYPE', dataType: odbc.SQL_SMALLINT }, + { name: 'TYPE_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'COLUMN_SIZE', dataType: odbc.SQL_INTEGER }, + { name: 'BUFFER_LENGTH', dataType: odbc.SQL_INTEGER }, + { name: 'DECIMAL_DIGITS', dataType: odbc.SQL_SMALLINT }, + { name: 'NUM_PREC_RADIX', dataType: odbc.SQL_SMALLINT }, + { name: 'NULLABLE', dataType: odbc.SQL_SMALLINT }, + { name: 'REMARKS', dataType: odbc.SQL_VARCHAR }, + { name: 'COLUMN_DEF', dataType: odbc.SQL_VARCHAR }, + { name: 'SQL_DATA_TYPE', dataType: odbc.SQL_SMALLINT }, + { name: 'SQL_DATETIME_SUB', dataType: odbc.SQL_SMALLINT }, + { name: 'CHAR_OCTET_LENGTH', dataType: odbc.SQL_INTEGER }, + { name: 'ORDINAL_POSITION', dataType: odbc.SQL_INTEGER }, + { name: 'IS_NULLABLE', dataType: odbc.SQL_VARCHAR }, + ]); + const idColumn = results[0]; + assert.deepEqual(idColumn.COLUMN_NAME, 'ID'); + assert.deepEqual(idColumn.DATA_TYPE, odbc.SQL_INTEGER); + assert.deepEqual(idColumn.NULLABLE, odbc.SQL_NULLABLE); + + const nameColumn = results[1]; + assert.deepEqual(nameColumn.COLUMN_NAME, 'NAME'); + assert.deepEqual(nameColumn.DATA_TYPE, odbc.SQL_VARCHAR); + assert.deepEqual(nameColumn.NULLABLE, odbc.SQL_NULLABLE); + + const ageColumn = results[2]; + assert.deepEqual(ageColumn.COLUMN_NAME, 'AGE'); + assert.deepEqual(ageColumn.DATA_TYPE, odbc.SQL_INTEGER); + assert.deepEqual(ageColumn.NULLABLE, odbc.SQL_NULLABLE); + done(); + }); + }); + it('...should return empty with bad parameters.', (done) => { + // const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.columns(null, 'bad schema name', 'bad table name', null, (error, results) => { + assert.strictEqual(error, null); + assert.strictEqual(results.length, 0); + assert.strictEqual(results.count, 0); + assert.deepStrictEqual(results.columns, + [ + { name: 'TABLE_CAT', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_SCHEM', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'COLUMN_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'DATA_TYPE', dataType: odbc.SQL_SMALLINT }, + { name: 'TYPE_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'COLUMN_SIZE', dataType: odbc.SQL_INTEGER }, + { name: 'BUFFER_LENGTH', dataType: odbc.SQL_INTEGER }, + { name: 'DECIMAL_DIGITS', dataType: odbc.SQL_SMALLINT }, + { name: 'NUM_PREC_RADIX', dataType: odbc.SQL_SMALLINT }, + { name: 'NULLABLE', dataType: odbc.SQL_SMALLINT }, + { name: 'REMARKS', dataType: odbc.SQL_VARCHAR }, + { name: 'COLUMN_DEF', dataType: odbc.SQL_VARCHAR }, + { name: 'SQL_DATA_TYPE', dataType: odbc.SQL_SMALLINT }, + { name: 'SQL_DATETIME_SUB', dataType: odbc.SQL_SMALLINT }, + { name: 'CHAR_OCTET_LENGTH', dataType: odbc.SQL_INTEGER }, + { name: 'ORDINAL_POSITION', dataType: odbc.SQL_INTEGER }, + { name: 'IS_NULLABLE', dataType: odbc.SQL_VARCHAR }, + ]); + done(); + }); + }); + }); // ...with callbacks... + describe('...with promises...', () => { + it('...should return information about all columns of a table.', async () => { + // const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const results = await connection.columns(null, `${process.env.DB_SCHEMA}`, `${process.env.DB_TABLE}`, null); + assert.strictEqual(results.length, 3); + assert.strictEqual(results.count, 3); + assert.deepStrictEqual(results.columns, + [ + { name: 'TABLE_CAT', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_SCHEM', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'COLUMN_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'DATA_TYPE', dataType: odbc.SQL_SMALLINT }, + { name: 'TYPE_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'COLUMN_SIZE', dataType: odbc.SQL_INTEGER }, + { name: 'BUFFER_LENGTH', dataType: odbc.SQL_INTEGER }, + { name: 'DECIMAL_DIGITS', dataType: odbc.SQL_SMALLINT }, + { name: 'NUM_PREC_RADIX', dataType: odbc.SQL_SMALLINT }, + { name: 'NULLABLE', dataType: odbc.SQL_SMALLINT }, + { name: 'REMARKS', dataType: odbc.SQL_VARCHAR }, + { name: 'COLUMN_DEF', dataType: odbc.SQL_VARCHAR }, + { name: 'SQL_DATA_TYPE', dataType: odbc.SQL_SMALLINT }, + { name: 'SQL_DATETIME_SUB', dataType: odbc.SQL_SMALLINT }, + { name: 'CHAR_OCTET_LENGTH', dataType: odbc.SQL_INTEGER }, + { name: 'ORDINAL_POSITION', dataType: odbc.SQL_INTEGER }, + { name: 'IS_NULLABLE', dataType: odbc.SQL_VARCHAR }, + ]); + const idColumn = results[0]; + assert.deepEqual(idColumn.COLUMN_NAME, 'ID'); + assert.deepEqual(idColumn.DATA_TYPE, odbc.SQL_INTEGER); + assert.deepEqual(idColumn.NULLABLE, odbc.SQL_NULLABLE); + + const nameColumn = results[1]; + assert.deepEqual(nameColumn.COLUMN_NAME, 'NAME'); + assert.deepEqual(nameColumn.DATA_TYPE, odbc.SQL_VARCHAR); + assert.deepEqual(nameColumn.NULLABLE, odbc.SQL_NULLABLE); + + const ageColumn = results[2]; + assert.deepEqual(ageColumn.COLUMN_NAME, 'AGE'); + assert.deepEqual(ageColumn.DATA_TYPE, odbc.SQL_INTEGER); + assert.deepEqual(ageColumn.NULLABLE, odbc.SQL_NULLABLE); + }); + it('...should return empty with bad parameters.', async () => { + // const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const results = await connection.columns(null, 'bad schema name', 'bad table name', null); + assert.strictEqual(results.length, 0); + assert.strictEqual(results.count, 0); + assert.deepStrictEqual(results.columns, + [ + { name: 'TABLE_CAT', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_SCHEM', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'COLUMN_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'DATA_TYPE', dataType: odbc.SQL_SMALLINT }, + { name: 'TYPE_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'COLUMN_SIZE', dataType: odbc.SQL_INTEGER }, + { name: 'BUFFER_LENGTH', dataType: odbc.SQL_INTEGER }, + { name: 'DECIMAL_DIGITS', dataType: odbc.SQL_SMALLINT }, + { name: 'NUM_PREC_RADIX', dataType: odbc.SQL_SMALLINT }, + { name: 'NULLABLE', dataType: odbc.SQL_SMALLINT }, + { name: 'REMARKS', dataType: odbc.SQL_VARCHAR }, + { name: 'COLUMN_DEF', dataType: odbc.SQL_VARCHAR }, + { name: 'SQL_DATA_TYPE', dataType: odbc.SQL_SMALLINT }, + { name: 'SQL_DATETIME_SUB', dataType: odbc.SQL_SMALLINT }, + { name: 'CHAR_OCTET_LENGTH', dataType: odbc.SQL_INTEGER }, + { name: 'ORDINAL_POSITION', dataType: odbc.SQL_INTEGER }, + { name: 'IS_NULLABLE', dataType: odbc.SQL_VARCHAR }, + ]); + }); + }); // ...with promises... +}); diff --git a/test/connection/commit.js b/test/connection/commit.js new file mode 100755 index 0000000..8c2c82a --- /dev/null +++ b/test/connection/commit.js @@ -0,0 +1,167 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Connection } = require('../../'); + +describe('.commit([callback])...', () => { + describe('...with callbacks...', () => { + it('...should commit all queries from after beginTransaction() was called.', (done) => { + try { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.beginTransaction((error1) => { + assert.deepEqual(error1, null); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(2, 'rolledback', 20)`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.count, 1); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error3, result3) => { + assert.deepEqual(error3, null); + assert.notDeepEqual(result3, null); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3.count, -1); + assert.deepEqual(result3[0], { ID: 2, NAME: 'rolledback', AGE: 20 }); + connection.commit((error4) => { + assert.deepEqual(error4, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error5, result5) => { + assert.deepEqual(error5, null); + assert.notDeepEqual(result5, null); + assert.deepEqual(result5.length, 1); + assert.deepEqual(result5.count, -1); + assert.deepEqual(result5[0], { ID: 2, NAME: 'rolledback', AGE: 20 }); + connection.close((error6) => { + assert.deepEqual(error6); + done(); + }); + }); + }); + }); + }); + }); + } catch (error) { + done(error); + } + }); + it('...shouldn\'t have adverse effects if called outside beginTransaction().', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`, (error1, result1) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + connection.commit((error2) => { + assert.deepEqual(error2, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error3, result3) => { + assert.deepEqual(error3, null); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result3.count, -1); + connection.commit((error4) => { + assert.deepEqual(error4, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error5, result5) => { + assert.deepEqual(error5, null); + assert.deepEqual(result5.length, 1); + assert.deepEqual(result5[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result5.count, -1); + connection.close((error6) => { + assert.deepEqual(error6, null); + done(); + }); + }); + }); + }); + }); + }); + }); + it('...shouldn\'t commit if called after a transaction is already ended with rollback().', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.beginTransaction((error1) => { + assert.deepEqual(error1, null); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.count, 1); + connection.rollback((error3) => { + assert.deepEqual(error3, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error4, result4) => { + assert.deepEqual(error4, null); + assert.notDeepEqual(result4, null); + assert.deepEqual(result4.length, 0); + assert.deepEqual(result4.count, -1); + connection.commit((error5) => { + assert.deepEqual(error5, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error6, result6) => { + assert.deepEqual(error6, null); + assert.notDeepEqual(result6, null); + assert.deepEqual(result6.length, 0); + assert.deepEqual(result6.count, -1); + connection.close((error7) => { + assert.deepEqual(error7, null); + done(); + }); + }); + }); + }); + }); + }); + }); + }); + }); // '...with callbacks...' + describe('...with promises...', () => { + it('...should commit all queries from after beginTransaction() was called.', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + await connection.beginTransaction(); + const result1 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'comitted', 10)`); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2.count, -1); + assert.deepEqual(result2[0], { ID: 1, NAME: 'comitted', AGE: 10 }); + await connection.commit(); + const result3 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result3, null); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3.count, -1); + assert.deepEqual(result3[0], { ID: 1, NAME: 'comitted', AGE: 10 }); + await connection.close(); + }); + it('...shouldn\'t have adverse effects if called outside beginTransaction().', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const result1 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + await connection.commit(); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2.count, -1); + assert.deepEqual(result2[0], { ID: 1, NAME: 'committed', AGE: 10 }); + await connection.commit(); + const result3 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result3, null); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3.count, -1); + assert.deepEqual(result3[0], { ID: 1, NAME: 'committed', AGE: 10 }); + await connection.close(); + }); + it('...shouldn\'t commit if called after a transaction is already ended with rollback().', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + await connection.beginTransaction(); + const result1 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + await connection.rollback(); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.length, 0); + assert.deepEqual(result2.count, -1); + await connection.commit(); + const result3 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result3, null); + assert.deepEqual(result3.length, 0); + assert.deepEqual(result3.count, -1); + await connection.close(); + }); + }); // '...with promises...' +}); // '.rollback(callback)...' diff --git a/test/connection/constructor.js b/test/connection/constructor.js new file mode 100755 index 0000000..a182dde --- /dev/null +++ b/test/connection/constructor.js @@ -0,0 +1,21 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Connection } = require('../../'); + +describe('new Connection(connectionString)...', () => { + it('...should return an open Connection when passed a valid connection string.', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + assert.notDeepEqual(connection, null); + assert.deepEqual(connection instanceof Connection, true); + assert.deepEqual(connection.connected, true); + assert.deepEqual(connection.autocommit, true); + await connection.close(); + }); + it('...should throw an Error when passed an invalid connection string.', async () => { + assert.throws(() => { + const connection = new Connection('abc123!@#'); // eslint-disable-line no-unused-vars + }); + }); +}); diff --git a/test/connection/query.js b/test/connection/query.js new file mode 100755 index 0000000..9fd4e42 --- /dev/null +++ b/test/connection/query.js @@ -0,0 +1,113 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Connection } = require('../../'); + +describe('.query(sql, [parameters], [callback])...', () => { + it('...should throw a TypeError if function signature doesn\'t match accepted signatures.', async () => { + const QUERY_TYPE_ERROR = { + name: 'TypeError', + message: '[node-odbc]: Incorrect function signature for call to connection.query({string}, {array}[optional], {function}[optional]).', + }; + const QUERY_CALLBACK = () => {}; + const QUERY_STRING = `SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`; + + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + assert.throws(() => { + connection.query(); + }, QUERY_TYPE_ERROR); + assert.throws(() => { + connection.query(QUERY_CALLBACK); + }, QUERY_TYPE_ERROR); + assert.throws(() => { + connection.query(1, []); + }, QUERY_TYPE_ERROR); + assert.throws(() => { + connection.query(1, [], QUERY_CALLBACK); + }, QUERY_TYPE_ERROR); + assert.throws(() => { + connection.query(QUERY_STRING, {}); + }, QUERY_TYPE_ERROR); + assert.throws(() => { + connection.query(QUERY_STRING, {}, QUERY_CALLBACK); + }, QUERY_TYPE_ERROR); + assert.throws(() => { + connection.query(1, 1); + }, QUERY_TYPE_ERROR); + assert.throws(() => { + connection.query(1, 1, QUERY_CALLBACK); + }, QUERY_TYPE_ERROR); + await connection.close(); + }); + describe('...with callbacks...', () => { + it('...should correctly identify function signature with .query({string}, {function}).', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`, (error1, result1) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.length, 0); + assert.deepEqual(result1.count, 1); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2.count, -1); + assert.deepEqual(result2[0], { ID: 1, NAME: 'committed', AGE: 10 }); + connection.close((error3) => { + assert.deepEqual(error3, null); + done(); + }); + }); + }); + }); + it('...should correctly identify function signature with .query({string}, {array}, {function})', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, [1, 'committed', 10], (error1, result1) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.length, 0); + assert.deepEqual(result1.count, 1); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2.count, -1); + assert.deepEqual(result2[0], { ID: 1, NAME: 'committed', AGE: 10 }); + connection.close((error3) => { + assert.deepEqual(error3, null); + done(); + }); + }); + }); + }); + }); // ...with callbacks... + describe('...with promises...', () => { + it('...should correctly identify function signature with .query({string}).', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const result1 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.length, 0); + assert.deepEqual(result1.count, 1); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2.count, -1); + assert.deepEqual(result2[0], { ID: 1, NAME: 'committed', AGE: 10 }); + await connection.close(); + }); + it('...should correctly identify function signature with .query({string}, {array})', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const result1 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, [1, 'committed', 10]); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.length, 0); + assert.deepEqual(result1.count, 1); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2.count, -1); + assert.deepEqual(result2[0], { ID: 1, NAME: 'committed', AGE: 10 }); + await connection.close(); + }); + }); // ...with promises... +}); // '.query(sql, [parameters], [callback])...' diff --git a/test/connection/rollback.js b/test/connection/rollback.js new file mode 100755 index 0000000..ed98c83 --- /dev/null +++ b/test/connection/rollback.js @@ -0,0 +1,275 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Connection } = require('../../'); + +describe('.rollback(callback)...', () => { + describe('...with callbacks...', () => { + it('...should rollback all queries from after beginTransaction() was called.', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.beginTransaction((error1) => { + assert.deepEqual(error1, null); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(2, 'rolledback', 20)`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.count, 1); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error3, result3) => { + assert.deepEqual(error3, null); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3.count, -1); + assert.deepEqual(result3[0], { ID: 2, NAME: 'rolledback', AGE: 20 }); + connection.rollback((error4) => { + assert.deepEqual(error4, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error5, result5) => { + assert.deepEqual(error5, null); + assert.deepEqual(result5.length, 0); + assert.deepEqual(result5.count, -1); + connection.close((error6) => { + assert.deepEqual(error6); + done(); + }); + }); + }); + }); + }); + }); + }); + it('...shouldn\'t rollback commits from before beginTransaction() was called.', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`, (error1, result1) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result2.count, -1); + connection.beginTransaction((error3) => { + assert.deepEqual(error3, null); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(2, 'rolledback', 20)`, (error4, result4) => { + assert.deepEqual(error4, null); + assert.notDeepEqual(result4, null); + assert.deepEqual(result4.count, 1); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error5, result5) => { + assert.deepEqual(error5, null); + assert.deepEqual(result5.length, 2); + assert.deepEqual(result5[1], { ID: 2, NAME: 'rolledback', AGE: 20 }); + assert.deepEqual(result5.count, -1); + connection.rollback((error6) => { + assert.deepEqual(error6, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error7, result7) => { + assert.deepEqual(error7, null); + assert.deepEqual(result7.length, 1); + assert.deepEqual(result7[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result7.count, -1); + connection.close((error8) => { + assert.deepEqual(error8, null); + done(); + }); + }); + }); + }); + }); + }); + }); + }); + }); + it('...shouldn\'t affect queries when beginTransaction() hasn\'t been called.', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`, (error1, result1) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result2.count, -1); + connection.rollback((error3) => { + assert.deepEqual(error3, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error4, result7) => { + assert.deepEqual(error4, null); + assert.deepEqual(result7.length, 1); + assert.deepEqual(result7[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result7.count, -1); + connection.close((error5) => { + assert.deepEqual(error5, null); + done(); + }); + }); + }); + }); + }); + }); + it('...shouldn\'t rollback if called after a transaction is already ended with commit().', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.beginTransaction((error1) => { + assert.deepEqual(error1, null); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.count, 1); + connection.commit((error3) => { + assert.deepEqual(error3, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error4, result4) => { + assert.deepEqual(error4, null); + assert.deepEqual(result4.length, 1); + assert.deepEqual(result4[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result4.count, -1); + connection.rollback((error5) => { + assert.deepEqual(error5, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error6, result6) => { + assert.deepEqual(error6, null); + assert.deepEqual(result6.length, 1); + assert.deepEqual(result6[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result6.count, -1); + connection.close((error7) => { + assert.deepEqual(error7, null); + done(); + }); + }); + }); + }); + }); + }); + }); + }); + it('...shouldn\'t rollback if called after a transaction is already ended with rollback().', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.beginTransaction((error1) => { + assert.deepEqual(error1, null); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`, (error2, result2) => { + assert.deepEqual(error2, null); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.count, 1); + connection.rollback((error3) => { + assert.deepEqual(error3, null); + connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`, (error4, result4) => { + assert.deepEqual(error4, null); + assert.notDeepEqual(result4, null); + assert.deepEqual(result4.count, 1); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error5, result5) => { + assert.deepEqual(error5, null); + assert.deepEqual(result5.length, 1); + assert.deepEqual(result5[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result5.count, -1); + connection.rollback((error6) => { + assert.deepEqual(error6, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error7, result7) => { + assert.deepEqual(error7, null); + assert.deepEqual(result7.length, 1); + assert.deepEqual(result7[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result7.count, -1); + connection.close((error8) => { + assert.deepEqual(error8, null); + done(); + }); + }); + }); + }); + }); + }); + }); + }); + }); + }); // '...with callbacks...' + describe('...with promises...', () => { + it('...should rollback all queries from after beginTransaction() was called.', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + await connection.beginTransaction(); + const result1 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(2, 'rolledback', 20)`); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2.count, -1); + assert.deepEqual(result2[0], { ID: 2, NAME: 'rolledback', AGE: 20 }); + await connection.rollback(); + const result3 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.deepEqual(result3.length, 0); + assert.deepEqual(result3.count, -1); + await connection.close(); + }); + it('...shouldn\'t rollback commits from before beginTransaction() was called.', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const result1 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2.count, -1); + assert.deepEqual(result2[0], { ID: 1, NAME: 'committed', AGE: 10 }); + await connection.beginTransaction(); + const result3 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(2, 'rolledback', 20)`); + assert.notDeepEqual(result3, null); + assert.deepEqual(result3.count, 1); + const result4 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.deepEqual(result4.length, 2); + assert.deepEqual(result4.count, -1); + assert.deepEqual(result4[1], { ID: 2, NAME: 'rolledback', AGE: 20 }); + await connection.rollback(); + const result5 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.deepEqual(result5.length, 1); + assert.deepEqual(result5.count, -1); + assert.deepEqual(result5[0], { ID: 1, NAME: 'committed', AGE: 10 }); + await connection.close(); + }); + it('...shouldn\'t affect queries when beginTransaction() hasn\'t been called.', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const result1 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result2.count, -1); + await connection.rollback(); + const result3 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result3.count, -1); + await connection.close(); + }); + it('...shouldn\'t rollback if called after a transaction is already ended with a commit().', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + await connection.beginTransaction(); + const result1 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + await connection.commit(); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result2.count, -1); + await connection.rollback(); + const result3 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result3.count, -1); + await connection.close(); + }); + it('...shouldn\'t rollback if called after a transaction is already ended with a rollback().', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + await connection.beginTransaction(); + const result1 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + await connection.rollback(); + const result2 = await connection.query(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'committed', 10)`); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.count, 1); + const result3 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.deepEqual(result3.length, 1); + assert.deepEqual(result3[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result3.count, -1); + await connection.rollback(); + const result4 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.deepEqual(result4.length, 1); + assert.deepEqual(result4[0], { ID: 1, NAME: 'committed', AGE: 10 }); + assert.deepEqual(result4.count, -1); + await connection.close(); + }); + }); // '...with promises...' +}); // '.rollback(callback)...' diff --git a/test/connection/tables.js b/test/connection/tables.js new file mode 100755 index 0000000..43ace39 --- /dev/null +++ b/test/connection/tables.js @@ -0,0 +1,87 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const odbc = require('../../build/Release/odbc.node'); +const { Connection } = require('../../'); + +describe('.tables(catalog, schema, table, type, callback)...', () => { + describe('...with callbacks...', () => { + it('...should return information about a table.', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.tables(null, `${process.env.DB_SCHEMA}`, `${process.env.DB_TABLE}`, null, (error, results) => { + assert.strictEqual(error, null); + assert.strictEqual(results.length, 1); + assert.strictEqual(results.count, 1); + assert.deepStrictEqual(results.columns, + [ + { name: 'TABLE_CAT', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_SCHEM', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_TYPE', dataType: odbc.SQL_VARCHAR }, + { name: 'REMARKS', dataType: odbc.SQL_VARCHAR }, + ]); + const result = results[0]; + // not testing for TABLE_CAT, dependent on the system + assert.strictEqual(result.TABLE_SCHEM, `${process.env.DB_SCHEMA}`); + assert.strictEqual(result.TABLE_NAME, `${process.env.DB_TABLE}`); + assert.strictEqual(result.TABLE_TYPE, 'TABLE'); + // not testing for REMARKS, dependent on the system + done(); + }); + }); + it('...should return empty with bad parameters.', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.tables(null, 'bad schema name', 'bad table name', null, (error, results) => { + assert.strictEqual(error, null); + assert.strictEqual(results.length, 0); + assert.strictEqual(results.count, 0); + assert.deepStrictEqual(results.columns, + [ + { name: 'TABLE_CAT', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_SCHEM', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_TYPE', dataType: odbc.SQL_VARCHAR }, + { name: 'REMARKS', dataType: odbc.SQL_VARCHAR }, + ]); + done(); + }); + }); + }); // ...with callbacks... + describe('...with promises...', () => { + it('...should return information about a table.', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const results = await connection.tables(null, `${process.env.DB_SCHEMA}`, `${process.env.DB_TABLE}`, null); + assert.strictEqual(results.length, 1); + assert.strictEqual(results.count, 1); + assert.deepStrictEqual(results.columns, + [ + { name: 'TABLE_CAT', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_SCHEM', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_TYPE', dataType: odbc.SQL_VARCHAR }, + { name: 'REMARKS', dataType: odbc.SQL_VARCHAR }, + ]); + const result = results[0]; + // not testing for TABLE_CAT, dependent on the system + assert.strictEqual(result.TABLE_SCHEM, `${process.env.DB_SCHEMA}`); + assert.strictEqual(result.TABLE_NAME, `${process.env.DB_TABLE}`); + assert.strictEqual(result.TABLE_TYPE, 'TABLE'); + // not testing for REMARKS, dependent on the system + }); + it('...should return empty with bad parameters.', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const results = await connection.tables(null, 'bad schema name', 'bad table name', null); + assert.strictEqual(results.length, 0); + assert.strictEqual(results.count, 0); + assert.deepStrictEqual(results.columns, + [ + { name: 'TABLE_CAT', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_SCHEM', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_NAME', dataType: odbc.SQL_VARCHAR }, + { name: 'TABLE_TYPE', dataType: odbc.SQL_VARCHAR }, + { name: 'REMARKS', dataType: odbc.SQL_VARCHAR }, + ]); + }); + }); // ...with promises... +}); diff --git a/test/connection/test.js b/test/connection/test.js new file mode 100755 index 0000000..a17bd04 --- /dev/null +++ b/test/connection/test.js @@ -0,0 +1,14 @@ +/* eslint-env node, mocha */ +/* eslint-disable global-require */ + +describe('Connection', () => { + require('./constructor.js'); + require('./close.js'); + require('./query.js'); + require('./beginTransaction.js'); + require('./commit.js'); + require('./rollback.js'); + require('./columns.js'); + require('./tables.js'); + require('./callProcedure.js'); +}); diff --git a/test/data/.gitignore b/test/data/.gitignore deleted file mode 100644 index f1450df..0000000 --- a/test/data/.gitignore +++ /dev/null @@ -1 +0,0 @@ -sqlite-test.db diff --git a/test/disabled/bench-insert.js b/test/disabled/bench-insert.js deleted file mode 100644 index f153864..0000000 --- a/test/disabled/bench-insert.js +++ /dev/null @@ -1,65 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database(); - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - createTable(); -}); - -function createTable() { - db.query("create table bench_insert (str varchar(50))", function (err) { - if (err) { - console.error(err); - return finish(); - } - - return insertData(); - }); -} - -function dropTable() { - db.query("drop table bench_insert", function (err) { - if (err) { - console.error(err); - return finish(); - } - - return finish(); - }); -} - -function insertData() { - var count = 0 - , iterations = 10000 - , time = new Date().getTime(); - - for (var x = 0; x < iterations; x++) { - db.query("insert into bench_insert (str) values ('testing')", cb); - - } - - function cb (err) { - if (err) { - console.error(err); - return finish(); - } - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d records inserted in %d seconds, %d/sec", iterations, elapsed/1000, iterations/(elapsed/1000)); - return dropTable(); - } - } -} - -function finish() { - db.close(function () { - console.log("connection closed"); - }); -} diff --git a/test/disabled/test-issue-13.js b/test/disabled/test-issue-13.js deleted file mode 100644 index 6743157..0000000 --- a/test/disabled/test-issue-13.js +++ /dev/null @@ -1,39 +0,0 @@ -var common = require("./common") - , odbc = require("../odbc.js") - , db = new odbc.Database(); - -db.open(common.connectionString, function(err) -{ - if (err) { - console.error(err.message); - return; - } - - require('http').createServer(function (req, res) { - if (req.url == "/close") { - db.close(function () {}); - db = null; - res.end(); - return false; - } - - var query = "select 1234 union select 5345"; - - db.query(query, function(err, rows, moreResultSets) - { - if (err) { - console.error(err.message); - } - - err = null; - rows = null; - moreResultSets = null; - res.end(); - }); - }).listen(8082, "127.0.0.1"); -}); - -process.on('uncaughtException', function (err) { - console.error('uncaughtException:' + err); - console.error(err.stack); -}); diff --git a/test/disabled/test-prepare-bind-executeNonQuery.js b/test/disabled/test-prepare-bind-executeNonQuery.js deleted file mode 100644 index 4f5aa83..0000000 --- a/test/disabled/test-prepare-bind-executeNonQuery.js +++ /dev/null @@ -1,63 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , assert = require("assert") - , db = new odbc.Database() - , iterations = 100000 - ; - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery2(function () { - finish(); - }); -}); - -function issueQuery2(done) { - var count = 0 - , time = new Date().getTime(); - - var stmt = db.prepareSync('select ? as test'); - - for (var x = 0; x < iterations; x++) { - (function (x) { - stmt.bind([x], function (err) { - if (err) { - console.log(err); - return finish(); - } - - stmt.executeNonQuery(function (err, result) { - cb(err, result, x); - }); - }); - })(x); - } - - function cb (err, data, x) { - if (err) { - console.error(err); - return finish(); - } - - //TODO: there's nothing to assert in this case. - //we actually need to insert data and then get - //the data back out and then assert. - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - ExecuteNonQuery ", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return done(); - } - } -} - -function finish() { - db.close(function () { - console.log("connection closed"); - }); -} diff --git a/test/disabled/test-prepare-bindSync-executeNonQuery.js b/test/disabled/test-prepare-bindSync-executeNonQuery.js deleted file mode 100644 index 40aad26..0000000 --- a/test/disabled/test-prepare-bindSync-executeNonQuery.js +++ /dev/null @@ -1,50 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , iterations = 100000 - ; - -db.open(common.connectionString, function(err){ - if (err) { - console.error(err); - process.exit(1); - } - - issueQuery2(function () { - finish(); - }); -}); - -function issueQuery2(done) { - var count = 0 - , time = new Date().getTime(); - - var stmt = db.prepareSync('select ? as test'); - - for (var x = 0; x < iterations; x++) { - (function (x) { - stmt.bindSync([x]); - stmt.executeNonQuery(cb); - })(x); - } - - function cb (err, data) { - if (err) { - console.error(err); - return finish(); - } - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - ExecuteNonQuery ", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return done(); - } - } -} - -function finish() { - db.close(function () { - console.log("connection closed"); - }); -} diff --git a/test/odbc-bench.c b/test/odbc-bench.c deleted file mode 100644 index ff912e5..0000000 --- a/test/odbc-bench.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - Copyright (c) 2012, Dan VerWeire - Copyright (c) 2011, Lee Smith - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ - -#include -#include -#include -#include -#include -#include -#include - -#define MAX_FIELD_SIZE 1024 -#define MAX_VALUE_SIZE 1048576 - - - -int main() { - HENV m_hEnv; - HDBC m_hDBC; - HSTMT m_hStmt; - SQLRETURN ret; - SQLUSMALLINT canHaveMoreResults; - //SQLCHAR outstr[1024]; - //SQLSMALLINT outstrlen; - - if( SQL_SUCCEEDED(SQLAllocEnv( &m_hEnv )) ) { - - if( SQL_SUCCEEDED(SQLAllocHandle( SQL_HANDLE_DBC, m_hEnv, &m_hDBC )) ) { - SQLSetConnectOption( m_hDBC, SQL_LOGIN_TIMEOUT,5 ); - - ret = SQLDriverConnect( - m_hDBC, - NULL, - "DRIVER={MySQL};SERVER=localhost;USER=test;PASSWORD=;DATABASE=test;", - SQL_NTS, - NULL,//outstr, - 0,//sizeof(outstr), - NULL,//&outstrlen, - SQL_DRIVER_NOPROMPT - ); - - if( SQL_SUCCEEDED(ret) ) { - int iterations = 10000; - int i = 0; - struct timeb start; - - ftime(&start); - - for (i =0 ; i <= iterations; i ++) { - SQLAllocHandle(SQL_HANDLE_STMT, m_hDBC, &m_hStmt); - - SQLExecDirect(m_hStmt, "select 1 + 1 as test;", SQL_NTS); - - while ( SQL_SUCCEEDED(SQLFetch(m_hStmt) )) { - //printf("sql query succeeded\n"); - } - - SQLFreeHandle(SQL_HANDLE_STMT, m_hStmt); - } - - struct timeb stop; - ftime(&stop); - - double elapsed = ((stop.time * 1000 + stop.millitm) - (start.time * 1000 + start.millitm)); - - printf("%d queries issued in %f seconds, %f/sec\n", iterations, (double) elapsed / 1000, iterations/((double) elapsed / 1000)); - } - else { - printf("here3\n"); - printError("SQLDriverConnect", m_hDBC, SQL_HANDLE_DBC); - } - } - else { - printError("SQLAllocHandle - dbc", m_hEnv, SQL_HANDLE_ENV); - } - } - else { - printError("SQLAllocHandle - env", m_hEnv, SQL_HANDLE_ENV); - } - - //SQLFreeHandle(SQL_HANDLE_DBC, m_hDBC); - //SQLFreeHandle(SQL_HANDLE_ENV, m_hEnv); - - return 0; -} - -void printError(const char *fn, SQLHANDLE handle, SQLSMALLINT type) -{ - SQLINTEGER i = 0; - SQLINTEGER native; - SQLCHAR state[ 7 ]; - SQLCHAR text[256]; - SQLSMALLINT len; - SQLRETURN ret; - - printf( - "\n" - "The driver reported the following diagnostics whilst running " - "%s\n\n", - fn - ); - - do { - ret = SQLGetDiagRec(type, handle, ++i, state, &native, text, sizeof(text), &len ); - if (SQL_SUCCEEDED(ret)) - printf("%s:%ld:%ld:%s\n", state, (long int) i, (long int) native, text); - } - while( ret == SQL_SUCCESS ); -} diff --git a/test/pool/close.js b/test/pool/close.js new file mode 100644 index 0000000..e69de29 diff --git a/test/pool/connect.js b/test/pool/connect.js new file mode 100644 index 0000000..e69de29 diff --git a/test/pool/constructor.js b/test/pool/constructor.js new file mode 100755 index 0000000..c58a30f --- /dev/null +++ b/test/pool/constructor.js @@ -0,0 +1,11 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Pool } = require('../../'); + +describe('constructor...', () => { + it('...should return the number of open Connections as was requested.', async () => { + const pool = new Pool(`${process.env.CONNECTION_STRING}`); + }); +}); diff --git a/test/pool/init.js b/test/pool/init.js new file mode 100644 index 0000000..268e941 --- /dev/null +++ b/test/pool/init.js @@ -0,0 +1,28 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Pool } = require('../../'); + +describe('.init([callback])...', () => { + it('...with callbacks...', () => { + it('...should create the number of connections passed to the constructor.', () => { + + }); + it('...should create open connections.', () => { + + }); + it('...should not create connections with a bad connection string.', () => { + + }); + it('...should not create connections if a bad number of initial connections is created.', () => { + + }); + }); + it('...should t', async () => { + const pool = new Pool(`${process.env.CONNECTION_STRING}`); + pool.init((error1) => { + assert.deepEqual(error1, null); + }); + }); +}); diff --git a/test/pool/query.js b/test/pool/query.js new file mode 100644 index 0000000..e69de29 diff --git a/test/pool/test.js b/test/pool/test.js new file mode 100755 index 0000000..06ec052 --- /dev/null +++ b/test/pool/test.js @@ -0,0 +1,10 @@ +/* eslint-env node, mocha */ +/* eslint-disable global-require */ + +describe('Pool', () => { + require('./constructor.js'); + require('./init.js'); + require('./connect.js'); + require('./query.js'); + require('./close.js'); +}); diff --git a/test/queries/test.js b/test/queries/test.js new file mode 100755 index 0000000..666cf26 --- /dev/null +++ b/test/queries/test.js @@ -0,0 +1,37 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Connection } = require('../../'); + +describe('Queries...', () => { + let connection = null; + + beforeEach(() => { + connection = new Connection(`${process.env.CONNECTION_STRING}`); + }); + + afterEach(async () => { + await connection.close(); + connection = null; + }); + + it('...should pass w1nk/node-odbc issue #54', async () => { + const result = await connection.query('select cast(-1 as INTEGER) as test1, cast(-2147483648 as INTEGER) as test2, cast(2147483647 as INTEGER) as test3 from sysibm.sysdummy1'); + assert.notDeepEqual(result, null); + assert.deepEqual(result.count, -1); + assert.deepEqual(result.length, 1); + assert.deepEqual(result[0].TEST1, -1); + assert.deepEqual(result[0].TEST2, -2147483648); + assert.deepEqual(result[0].TEST3, 2147483647); + }); + it('...should pass w1nk/node-odbc issue #85', async () => { + const result = await connection.query('select cast(-1 as INTEGER) as test1, cast(-2147483648 as INTEGER) as test2, cast(2147483647 as INTEGER) as test3 from sysibm.sysdummy1'); + assert.notDeepEqual(result, null); + assert.deepEqual(result.count, -1); + assert.deepEqual(result.length, 1); + assert.deepEqual(result[0].TEST1, -1); + assert.deepEqual(result[0].TEST2, -2147483648); + assert.deepEqual(result[0].TEST3, 2147483647); + }); +}); diff --git a/test/run-bench.js b/test/run-bench.js deleted file mode 100644 index 1e5181a..0000000 --- a/test/run-bench.js +++ /dev/null @@ -1,79 +0,0 @@ -var fs = require("fs") - , common = require('./common.js') - , spawn = require("child_process").spawn - , requestedBench = null - , files - ; - -if (process.argv.length === 3) { - requestedBench = process.argv[2]; -} - -var connectionStrings = common.benchConnectionStrings; - -//check to see if the requested test is actually a driver to benchmark -if (requestedBench) { - connectionStrings.forEach(function (connectionString) { - if (requestedBench == connectionString.title) { - connectionStrings = [connectionString]; - requestedBench = null; - } - }); -} - -doNextConnectionString(); - -function doBench(file, connectionString) { - var bench = spawn("node", ['--expose_gc',file, connectionString.connectionString]); - - process.stdout.write("Running \033[01;33m" + file.replace(/\.js$/, "") + "\033[01;0m with [\033[01;29m" + connectionString.title + "\033[01;0m] : "); - - bench.on("exit", function (code, signal) { - doNextBench(connectionString); - }); - - bench.stderr.on("data", function (data) { - process.stderr.write(data); - }); - - bench.stdout.on("data", function (data) { - process.stdout.write(data); - }); -} - -function doNextBench(connectionString) { - if (files.length) { - var benchFile = files.shift(); - - doBench(benchFile, connectionString); - } - else { - //we're done with this connection string, display results and exit accordingly - doNextConnectionString(); - } -} - -function doNextConnectionString() { - if (connectionStrings.length) { - var connectionString = connectionStrings.shift(); - - if (requestedBench) { - files = [requestedBench]; - } - else { - //re-read files - files = fs.readdirSync("./"); - - files = files.filter(function (file) { - return (/^bench-/.test(file)) ? true : false; - }); - - files.sort(); - } - - doNextBench(connectionString); - } - else { - console.log("Done"); - } -} diff --git a/test/run-tests.js b/test/run-tests.js deleted file mode 100644 index 4a82a9a..0000000 --- a/test/run-tests.js +++ /dev/null @@ -1,119 +0,0 @@ -var fs = require("fs") - , common = require('./common.js') - , spawn = require("child_process").spawn - , errorCount = 0 - , testCount = 0 - , testTimeout = 5000 - , requestedTest = null - , files - ; - -var filesDisabled = fs.readdirSync("./disabled"); - -if (filesDisabled.length) { - console.log("\n\033[01;31mWarning\033[01;0m : there are %s disabled tests\n", filesDisabled.length); -} - -if (process.argv.length === 3) { - requestedTest = process.argv[2]; -} - -var connectionStrings = common.testConnectionStrings; - -//check to see if the requested test is actually a driver to test -if (requestedTest) { - connectionStrings.forEach(function (connectionString) { - if (requestedTest == connectionString.title) { - connectionStrings = [connectionString]; - requestedTest = null; - } - }); -} - -doNextConnectionString(); - - -function doTest(file, connectionString) { - var test = spawn("node", ['--expose_gc',file, connectionString.connectionString]) - , timer = null - , timedOut = false; - ; - - process.stdout.write("Running test for [\033[01;29m" + connectionString.title + "\033[01;0m] : " + file.replace(/\.js$/, "")); - process.stdout.write(" ... "); - - testCount += 1; - - //TODO: process the following if some flag is set - //test.stdout.pipe(process.stdout); - //test.stderr.pipe(process.stderr); - - test.on("exit", function (code, signal) { - clearTimeout(timer); - - if (code != 0) { - errorCount += 1; - - process.stdout.write("\033[01;31mfail \033[01;0m "); - - if (timedOut) { - process.stdout.write("(Timed Out)"); - } - } - else { - process.stdout.write("\033[01;32msuccess \033[01;0m "); - } - - process.stdout.write("\n"); - - doNextTest(connectionString); - }); - - var timer = setTimeout(function () { - timedOut = true; - test.kill(); - },testTimeout); -} - -function doNextTest(connectionString) { - if (files.length) { - var testFile = files.shift(); - - doTest(testFile, connectionString); - } - else { - //we're done with this connection string, display results and exit accordingly - doNextConnectionString(); - } -} - -function doNextConnectionString() { - if (connectionStrings.length) { - var connectionString = connectionStrings.shift(); - - if (requestedTest) { - files = [requestedTest]; - } - else { - //re-read files - files = fs.readdirSync("./"); - - files = files.filter(function (file) { - return (/^test-/.test(file)) ? true : false; - }); - - files.sort(); - } - - doNextTest(connectionString); - } - else { - if (errorCount) { - console.log("\nResults : %s of %s tests failed.\n", errorCount, testCount); - process.exit(errorCount); - } - else { - console.log("Results : All tests were successful."); - } - } -} diff --git a/test/sql-cli.js b/test/sql-cli.js deleted file mode 100644 index 50ce068..0000000 --- a/test/sql-cli.js +++ /dev/null @@ -1,28 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database(); - -db.open(common.connectionString, function(err) -{ - if (err) { - console.log(err); - process.exit(1); - } - - process.stdout.write('> '); - process.stdin.resume(); - - process.stdin.on('data',function (strInput) { - db.query(strInput.toString(), function (err, rs, moreResultSets) { - if (err) { - console.log(err); - console.log(rs); - } - else { - console.log(rs); - } - - process.stdout.write('> '); - }); - }); -}); diff --git a/test/statement/bind.js b/test/statement/bind.js new file mode 100755 index 0000000..2fcf97d --- /dev/null +++ b/test/statement/bind.js @@ -0,0 +1,225 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Connection } = require('../../'); + +const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +describe('.bind(parameters, [calback])...', () => { + it('...should throw a TypeError if function signature doesn\'t match accepted signatures.', async () => { + // const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const statement = await connection.createStatement(); + + const BIND_TYPE_ERROR = { + name: 'TypeError', + message: '[node-odbc]: Incorrect function signature for call to statement.bind({array}, {function}[optional]).', + }; + const DUMMY_CALLBACK = () => {}; + const PREPARE_SQL = `INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`; + + assert.throws(() => { + statement.bind(); + }, BIND_TYPE_ERROR); + assert.throws(() => { + statement.bind(DUMMY_CALLBACK); + }, BIND_TYPE_ERROR); + assert.throws(() => { + statement.bind(PREPARE_SQL); + }, BIND_TYPE_ERROR); + assert.throws(() => { + statement.bind(PREPARE_SQL, DUMMY_CALLBACK); + }, BIND_TYPE_ERROR); + assert.throws(() => { + statement.bind(1); + }, BIND_TYPE_ERROR); + assert.throws(() => { + statement.bind(1, DUMMY_CALLBACK); + }, BIND_TYPE_ERROR); + assert.throws(() => { + statement.bind(null); + }, BIND_TYPE_ERROR); + assert.throws(() => { + statement.bind(null, DUMMY_CALLBACK); + }, BIND_TYPE_ERROR); + assert.throws(() => { + statement.bind(undefined); + }, BIND_TYPE_ERROR); + assert.throws(() => { + statement.bind(undefined, DUMMY_CALLBACK); + }, BIND_TYPE_ERROR); + assert.throws(() => { + statement.bind({}); + }, BIND_TYPE_ERROR); + assert.throws(() => { + statement.bind({}, DUMMY_CALLBACK); + }, BIND_TYPE_ERROR); + + // await connection.close(); + }); + describe('...with callbacks...', () => { + it('...should bind if a valid SQL string has been prepared.', (done) => { + // const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + statement.bind([1, 'bound1', 10], (error3) => { + assert.deepEqual(error3, null); + done(); + }); + }); + }); + }); + it('...should bind even if an parameter types are invalid for the prepared SQL statement', (done) => { + // const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + statement.bind(['ID', 1, 'AGE'], (error3) => { + assert.deepEqual(error3, null); + done(); + }); + }); + }); + }); + it('...should not bind if an invalid SQL statement has been prepared (> 0 parameters).', (done) => { + // const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare('INSERT INTO dummy123.table456 VALUES(?, ?, ?)', (error2) => { + assert.notDeepEqual(error2, null); + assert.deepEqual(error2 instanceof Error, true); + statement.bind([1, 'bound', 10], (error3) => { + assert.notDeepEqual(error3, null); + assert.deepEqual(error3 instanceof Error, true); + done(); + }); + }); + }); + }); + it('...should not bind if an invalid SQL statement has been prepared (0 parameters).', (done) => { + // const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare('INSERT INTO dummy123.table456 VALUES(1, \'bound\', 10)', (error2) => { + assert.notDeepEqual(error2, null); + assert.deepEqual(error2 instanceof Error, true); + statement.bind([], (error3) => { + assert.notDeepEqual(error3, null); + assert.deepEqual(error3 instanceof Error, true); + done(); + }); + }); + }); + }); + it('...should not bind if there is an incorrect number of parameters for the prepared SQL statement', (done) => { + // const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + statement.bind([1, 'bound'], (error3) => { // only 2 parameters, while statement expects 3. + assert.notDeepEqual(error3, null); + assert.deepEqual(error3 instanceof Error, true); + done(); + }); + }); + }); + }); + it('...should not bind if statement.prepare({string}, {function}[optional]) has not yet been called (> 0 parameters).', (done) => { + // const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.bind([1, 'bound', 10], (error3) => { + assert.notDeepEqual(error3, null); + assert.deepEqual(error3 instanceof Error, true); + done(); + }); + }); + }); + it('...should not bind if statement.prepare({string}, {function}[optional]) has not yet been called (0 parameters).', (done) => { + // const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.bind([], (error2) => { + assert.notDeepEqual(error2, null); + assert.deepEqual(error2 instanceof Error, true); + done(); + }); + }); + }); + }); // '...with callbacks...' + describe('...with promises...', () => { + it('...should bind if a valid SQL string has been prepared.', async () => { + assert.doesNotReject(async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + await statement.bind([1, 'bound1', 10]); + }); + }); + it('...should bind even if an parameter types are invalid for the prepared SQL statement', async () => { + assert.doesNotReject(async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + await statement.bind(['ID', 1, 'AGE']); + }); + }); + it('...should not bind if an invalid SQL statement has been prepared (> 0 parameters).', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + try { + await statement.prepare('INSERT INTO dummy123.table456 VALUES(?, ?, ?)'); + } catch (error) { + // no-op, we know it will throw an error + } + assert.rejects(async () => { + await statement.bind([1, 'bound', 10]); + }); + }); + it('...should not bind if an invalid SQL statement has been prepared (0 parameters).', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + try { + await statement.prepare('INSERT INTO dummy123.table456 VALUES()'); + } catch (error) { + // no-op, we know it will throw an error + } + assert.rejects(async () => { + await statement.bind([]); + }); + }); + it('...should not bind if there is an incorrect number of parameters for the prepared SQL statement', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + assert.rejects(async () => { + await statement.bind([1, 'bound']); + }); + }); + it('...should not bind if statement.prepare({string}, {function}[optional]) has not yet been called (> 0 parameters).', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + assert.rejects(async () => { + await statement.bind([1, 'bound', 10]); + }); + }); + it('...should not bind if statement.prepare({string}, {function}[optional]) has not yet been called (0 parameters).', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + assert.rejects(async () => { + await statement.bind([]); + }); + }); + }); // '...with promises...' +}); diff --git a/test/statement/close.js b/test/statement/close.js new file mode 100755 index 0000000..544d05b --- /dev/null +++ b/test/statement/close.js @@ -0,0 +1,295 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Connection } = require('../../'); + +describe('.close([calback])...', () => { + let connection = null; + + beforeEach(() => { + connection = new Connection(`${process.env.CONNECTION_STRING}`); + }); + + afterEach(async () => { + await connection.close(); + connection = null; + }); + + it('...should throw a TypeError if function signature doesn\'t match accepted signatures.', async () => { + const statement = await connection.createStatement(); + + const EXECUTE_TYPE_ERROR = { + name: 'TypeError', + message: '[node-odbc]: Incorrect function signature for call to statement.close({function}[optional]).', + }; + const DUMMY_CALLBACK = () => {}; + const SQL_STRING = `INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`; + + assert.throws(() => { + statement.close(SQL_STRING); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.close(SQL_STRING, DUMMY_CALLBACK); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.close(1); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.close(1, DUMMY_CALLBACK); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.close(null); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.close(null, DUMMY_CALLBACK); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.close({}); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.close({}, DUMMY_CALLBACK); + }, EXECUTE_TYPE_ERROR); + }); + describe('...with callbacks...', () => { + it('...should close a newly created statement.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.close((error2) => { + assert.deepEqual(error2, null); + done(); + }); + }); + }); + it('...should close after a statement has been prepared with parameters.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + statement.close((error3) => { + assert.deepEqual(error3, null); + done(); + }); + }); + }); + }); + it('...should close after a statement has been prepared without parameters.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'bound', 10)`, (error2) => { + assert.deepEqual(error2, null); + statement.close((error3) => { + assert.deepEqual(error3, null); + done(); + }); + }); + }); + }); + it('...should close after a statement has been prepared and values bound.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + statement.bind([1, 'bound', 10], (error3) => { + assert.deepEqual(error3, null); + statement.close((error4) => { + assert.deepEqual(error4, null); + done(); + }); + }); + }); + }); + }); + it('...should close after a statement has been prepared, bound, and executed successfully.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + statement.bind([1, 'bound', 10], (error3) => { + assert.deepEqual(error3, null); + statement.execute((error4, result4) => { + assert.deepEqual(error4, null); + assert.notDeepEqual(result4, null); + statement.close((error5) => { + assert.deepEqual(error5, null); + done(); + }); + }); + }); + }); + }); + }); + it('...should close after calling prepare with an error (bad sql prepared).', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare('abc123!@#', (error2) => { + assert.notDeepEqual(error2, null); + assert.deepEqual(error2 instanceof Error, true); + statement.close((error4) => { + assert.deepEqual(error4, null); + done(); + }); + }); + }); + }); + it('...should close after calling execute with an error (did not bind parameters).', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + statement.execute((error3, result3) => { + assert.notDeepEqual(error3, null); + assert.deepEqual(error3 instanceof Error, true); + assert.deepEqual(result3, null); + statement.close((error4) => { + assert.deepEqual(error4, null); + done(); + }); + }); + }); + }); + }); + it('...should error if trying to prepare after closing.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.close((error2) => { + assert.deepEqual(error2, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error3) => { + assert.notDeepEqual(error3, null); + assert.deepEqual(error3 instanceof Error, true); + done(); + }); + }); + }); + }); + it('...should error if trying to bind after closing.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + statement.close((error3) => { + assert.deepEqual(error3, null); + statement.bind([1, 'bound', 10], (error4) => { + assert.notDeepEqual(error4, null); + assert.deepEqual(error4 instanceof Error, true); + done(); + }); + }); + }); + }); + }); + it('...should error if trying to execute after closing.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + statement.bind([1, 'bound', 10], (error3) => { + assert.deepEqual(error3, null); + statement.close((error4) => { + assert.deepEqual(error4, null); + statement.execute((error5, result5) => { + assert.notDeepEqual(error5, null); + assert.deepEqual(error5 instanceof Error, true); + assert.deepEqual(result5, null); + done(); + }); + }); + }); + }); + }); + }); + }); // '...with callbacks...' + describe('...with promises...', () => { + it('...should close a newly created statement.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.close(); + }); + it('...should close after a statement has been prepared with parameters.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + await statement.close(); + }); + it('...should close after a statement has been prepared without parameters.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'bound', 10)`); + await statement.close(); + }); + it('...should close after a statement has been prepared and values bound.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + await statement.bind([1, 'bound', 10]); + await statement.close(); + }); + it('...should close after a statement has been prepared, bound, and executed successfully.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + await statement.bind([1, 'bound', 10]); + const result = await statement.execute(); + assert.notDeepEqual(result, null); + await statement.close(); + }); + it('...should close after calling prepare with an error (bad sql prepared).', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await assert.rejects(async () => { + await statement.prepare('abc123!@#'); + }); + await statement.close(); + }); + it('...should close after calling execute with an error (did not bind parameters).', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + let result; + await assert.rejects(async () => { + result = await statement.execute(); + }); + assert.deepEqual(result, null); + await statement.close(); + }); + it('...should error if trying to prepare after closing.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.close(); + await assert.rejects(async () => { + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + }); + }); + it('...should error if trying to bind after closing.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + await statement.close(); + await assert.rejects(async () => { + await statement.bind([1, 'bound', 10]); + }); + }); + it('...should error if trying to execute after closing.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + await statement.bind([1, 'bound', 10]); + await statement.close(); + let result; + await assert.rejects(async () => { + result = await statement.execute(); + }); + assert.deepEqual(result, null); + }); + }); // '...with promises...' +}); diff --git a/test/statement/execute.js b/test/statement/execute.js new file mode 100755 index 0000000..7e2d067 --- /dev/null +++ b/test/statement/execute.js @@ -0,0 +1,205 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Connection } = require('../../'); + +describe('.execute([calback])...', () => { + let connection = null; + + beforeEach(() => { + connection = new Connection(`${process.env.CONNECTION_STRING}`); + }); + + afterEach(async () => { + await connection.close(); + connection = null; + }); + + it('...should throw a TypeError if function signature doesn\'t match accepted signatures.', async () => { + const statement = await connection.createStatement(); + + const EXECUTE_TYPE_ERROR = { + name: 'TypeError', + message: '[node-odbc]: Incorrect function signature for call to statement.execute({function}[optional]).', + }; + const DUMMY_CALLBACK = () => {}; + const PREPARE_SQL = `INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`; + + assert.throws(() => { + statement.execute(PREPARE_SQL); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.execute(PREPARE_SQL, DUMMY_CALLBACK); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.execute(1); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.execute(1, DUMMY_CALLBACK); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.execute(null); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.execute(null, DUMMY_CALLBACK); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.execute({}); + }, EXECUTE_TYPE_ERROR); + assert.throws(() => { + statement.execute({}, DUMMY_CALLBACK); + }, EXECUTE_TYPE_ERROR); + }); + describe('...with callbacks...', () => { + it('...should execute if a valid SQL string has been prepared and valid values bound.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + statement.bind([1, 'bound', 10], (error3) => { + assert.deepEqual(error3, null); + statement.execute((error4, result4) => { + assert.deepEqual(error4, null); + assert.notDeepEqual(result4, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error5, result5) => { + assert.deepEqual(error5, null); + assert.notDeepEqual(result5, null); + assert.deepEqual(result5.length, 1); + assert.deepEqual(result5[0].ID, 1); + assert.deepEqual(result5[0].NAME, 'bound'); + assert.deepEqual(result5[0].AGE, 10); + done(); + }); + }); + }); + }); + }); + }); + it('...should execute if bind has not been called and the prepared statement has no parameters.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'bound', 10)`, (error2) => { + assert.deepEqual(error2, null); + statement.execute((error4, result4) => { + assert.deepEqual(error4, null); + assert.notDeepEqual(result4, null); + connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`, (error5, result5) => { + assert.deepEqual(error5, null); + assert.notDeepEqual(result5, null); + assert.deepEqual(result5.length, 1); + assert.deepEqual(result5[0].ID, 1); + assert.deepEqual(result5[0].NAME, 'bound'); + assert.deepEqual(result5[0].AGE, 10); + done(); + }); + }); + }); + }); + }); + it('...should not execute if prepare has not been called.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.execute((error2, result2) => { + assert.notDeepEqual(error2, null); + assert.deepEqual(error2 instanceof Error, true); + assert.deepEqual(result2, null); + done(); + }); + }); + }); + it('...should not execute if bind has not been called and the prepared statement has parameters.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + statement.execute((error3, result3) => { + assert.notDeepEqual(error3, null); + assert.deepEqual(error3 instanceof Error, true); + assert.deepEqual(result3, null); + done(); + }); + }); + }); + }); + it('...should not execute if bind values are incompatible with the fields they are binding to.', (done) => { + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + statement.bind(['ID', 10, 'AGE'], (error3) => { + assert.deepEqual(error3, null); + statement.execute((error4, result4) => { + assert.notDeepEqual(error4, null); + assert.deepEqual(error4 instanceof Error, true); + assert.deepEqual(result4, null); + done(); + }); + }); + }); + }); + }); + }); // '...with callbacks...' + describe('...with promises...', () => { + it('...should execute if a valid SQL string has been prepared and valid values bound.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + await statement.bind([1, 'bound', 10]); + const result1 = await statement.execute(); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.count, -1); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2[0].ID, 1); + assert.deepEqual(result2[0].NAME, 'bound'); + assert.deepEqual(result2[0].AGE, 10); + }); + it('...should execute if bind has not been called and the prepared statement has no parameters.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(1, 'bound', 10)`); + const result1 = await statement.execute(); + assert.notDeepEqual(result1, null); + assert.deepEqual(result1.count, 1); + const result2 = await connection.query(`SELECT * FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + assert.notDeepEqual(result2, null); + assert.deepEqual(result2.count, -1); + assert.deepEqual(result2.length, 1); + assert.deepEqual(result2[0].ID, 1); + assert.deepEqual(result2[0].NAME, 'bound'); + assert.deepEqual(result2[0].AGE, 10); + }); + it('...should not execute if prepare has not been called.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await assert.rejects(async () => { + await statement.execute(); + }); + }); + it('...should not execute if bind has not been called and the prepared statement has parameters.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + await assert.rejects(async () => { + await statement.execute(); + }); + }); + it('...should not execute if bind values are incompatible with the fields they are binding to.', async () => { + const statement = await connection.createStatement(); + assert.notDeepEqual(statement, null); + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + await statement.bind(['ID', 10, 'AGE']); + await assert.rejects(async () => { + await statement.execute(); + }); + }); + }); // '...with promises...' +}); diff --git a/test/statement/prepare.js b/test/statement/prepare.js new file mode 100755 index 0000000..382430d --- /dev/null +++ b/test/statement/prepare.js @@ -0,0 +1,125 @@ +/* eslint-env node, mocha */ + +require('dotenv').config(); +const assert = require('assert'); +const { Connection } = require('../../'); + +// const connection = new Connection(`${process.env.CONNECTION_STRING}`); + +describe('.prepare(sql, [calback])...', () => { + it('...should throw a TypeError if function signature doesn\'t match accepted signatures.', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const statement = await connection.createStatement(); + + const PREPARE_TYPE_ERROR = { + name: 'TypeError', + message: '[node-odbc]: Incorrect function signature for call to statement.prepare({string}, {function}[optional]).', + }; + const DUMMY_CALLBACK = () => {}; + + assert.throws(() => { + statement.prepare(); + }, PREPARE_TYPE_ERROR); + assert.throws(() => { + statement.prepare(DUMMY_CALLBACK); + }, PREPARE_TYPE_ERROR); + assert.throws(() => { + statement.prepare(1); + }, PREPARE_TYPE_ERROR); + assert.throws(() => { + statement.prepare(1, DUMMY_CALLBACK); + }, PREPARE_TYPE_ERROR); + assert.throws(() => { + statement.prepare(null); + }, PREPARE_TYPE_ERROR); + assert.throws(() => { + statement.prepare(null, DUMMY_CALLBACK); + }, PREPARE_TYPE_ERROR); + assert.throws(() => { + statement.prepare(undefined); + }, PREPARE_TYPE_ERROR); + assert.throws(() => { + statement.prepare(undefined, DUMMY_CALLBACK); + }, PREPARE_TYPE_ERROR); + assert.throws(() => { + statement.prepare({}); + }, PREPARE_TYPE_ERROR); + assert.throws(() => { + statement.prepare({}, DUMMY_CALLBACK); + }, PREPARE_TYPE_ERROR); + + // await connection.close(); + }); + describe('...with callbacks...', () => { + it('...should prepare a valid SQL string', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`, (error2) => { + assert.deepEqual(error2, null); + connection.close((error3) => { + assert.deepEqual(error3, null); + done(); + }); + }); + }); + }); + it('...should return an error with an invalid SQL string', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare('INSERT INTO dummy123.table456 VALUES()', (error2) => { + assert.notDeepEqual(error2, null); + assert.deepEqual(error2 instanceof Error, true); + connection.close((error3) => { + assert.deepEqual(error3, null); + done(); + }); + }); + }); + }); + it('...should return an error with a blank SQL string', (done) => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + connection.createStatement((error1, statement) => { + assert.deepEqual(error1, null); + assert.notDeepEqual(statement, null); + statement.prepare('', (error2) => { + assert.notDeepEqual(error2, null); + assert.deepEqual(error2 instanceof Error, true); + connection.close((error3) => { + assert.deepEqual(error3, null); + done(); + }); + }); + }); + }); + }); // '...with callbacks...' + describe('...with promises...', () => { + it('...should prepare a valid SQL string', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const statement = await connection.createStatement(); + assert.doesNotReject(async () => { + await statement.prepare(`INSERT INTO ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} VALUES(?, ?, ?)`); + await connection.close(); + }); + }); + it('...should return an error with an invalid SQL string', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const statement = await connection.createStatement(); + assert.rejects(async () => { + await statement.prepare('INSERT INTO dummy123.table456 VALUES()'); + await connection.close(); + }); + }); + it('...should return an error with a blank SQL string', async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + const statement = await connection.createStatement(); + assert.rejects(async () => { + await statement.prepare(''); + await connection.close(); + }); + }); + }); // '...with promises...' +}); diff --git a/test/statement/test.js b/test/statement/test.js new file mode 100755 index 0000000..f73bbdf --- /dev/null +++ b/test/statement/test.js @@ -0,0 +1,9 @@ +/* eslint-env node, mocha */ +/* eslint-disable global-require */ + +describe('Statement', () => { + require('./prepare.js'); + require('./bind.js'); + require('./execute.js'); + require('./close.js'); +}); diff --git a/test/test-bad-connection-string.js b/test/test-bad-connection-string.js deleted file mode 100644 index 67b4e63..0000000 --- a/test/test-bad-connection-string.js +++ /dev/null @@ -1,27 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -assert.throws(function () { - db.openSync("this is wrong"); -}); - -assert.equal(db.connected, false); - -db.open("this is wrong", function(err) { - console.log(err); - - assert.deepEqual(err, { - error: '[node-odbc] SQL_ERROR', - message: '[unixODBC][Driver Manager]Data source name not found, and no default driver specified', - state: 'IM002' - , errors : [{ - message: '[unixODBC][Driver Manager]Data source name not found, and no default driver specified', - state: 'IM002' - }] - }); - - assert.equal(db.connected, false); -}); diff --git a/test/test-binding-connection-loginTimeout.js b/test/test-binding-connection-loginTimeout.js deleted file mode 100644 index e6d7752..0000000 --- a/test/test-binding-connection-loginTimeout.js +++ /dev/null @@ -1,31 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.ODBC() - , assert = require("assert") - , exitCode = 0 - ; - -db.createConnection(function (err, conn) { - //loginTimeout should be 5 by default as set in C++ - assert.equal(conn.loginTimeout, 5); - - //test the setter and getter - conn.loginTimeout = 1234; - assert.equal(conn.loginTimeout, 1234); - - //set the time out to something small - conn.loginTimeout = 1; - assert.equal(conn.loginTimeout, 1); - - conn.open(common.connectionString, function (err) { - //TODO: it would be nice if we could somehow - //force a timeout to occurr, but most testing is - //done locally and it's hard to get a local server - //to not accept a connection within one second... - - console.log(err); - conn.close(function () { - - }); - }); -}); diff --git a/test/test-binding-connection-timeOut.js b/test/test-binding-connection-timeOut.js deleted file mode 100644 index dc30d0d..0000000 --- a/test/test-binding-connection-timeOut.js +++ /dev/null @@ -1,31 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.ODBC() - , assert = require("assert") - , exitCode = 0 - ; - -db.createConnection(function (err, conn) { - //connectionTimeout should be 0 by default as set in C++ - assert.equal(conn.connectTimeout, 0); - - //test the setter and getter - conn.connectTimeout = 1234; - assert.equal(conn.connectTimeout, 1234); - - //set the time out to something small - conn.connectTimeout = 1; - assert.equal(conn.connectTimeout, 1); - - conn.open(common.connectionString, function (err) { - //TODO: it would be nice if we could somehow - //force a timeout to occurr, but most testing is - //done locally and it's hard to get a local server - //to not accept a connection within one second... - - console.log(err); - conn.close(function () { - - }); - }); -}); diff --git a/test/test-binding-statement-executeSync.js b/test/test-binding-statement-executeSync.js deleted file mode 100644 index 8c5dba0..0000000 --- a/test/test-binding-statement-executeSync.js +++ /dev/null @@ -1,81 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.ODBC() - , assert = require("assert") - , exitCode = 0 - ; - -db.createConnection(function (err, conn) { - conn.openSync(common.connectionString); - - conn.createStatement(function (err, stmt) { - var r, result, caughtError; - - //try excuting without preparing or binding. - try { - result = stmt.executeSync(); - } - catch (e) { - caughtError = e; - } - - try { - assert.ok(caughtError); - } - catch (e) { - console.log(e.message); - exitCode = 1; - } - - //try incorrectly binding a string and then executeSync - try { - r = stmt.bind("select 1 + 1 as col1"); - } - catch (e) { - caughtError = e; - } - - try { - assert.equal(caughtError.message, "Argument 1 must be an Array"); - - r = stmt.prepareSync("select 1 + ? as col1"); - assert.equal(r, true, "prepareSync did not return true"); - - r = stmt.bindSync([2]); - assert.equal(r, true, "bindSync did not return true"); - - result = stmt.executeSync(); - assert.equal(result.constructor.name, "ODBCResult"); - - r = result.fetchAllSync(); - assert.deepEqual(r, [ { col1: 3 } ]); - - r = result.closeSync(); - assert.equal(r, true, "closeSync did not return true"); - - result = stmt.executeSync(); - assert.equal(result.constructor.name, "ODBCResult"); - - r = result.fetchAllSync(); - assert.deepEqual(r, [ { col1: 3 } ]); - - console.log(r); - } - catch (e) { - console.log(e.stack); - - exitCode = 1; - } - - conn.closeSync(); - - if (exitCode) { - console.log("failed"); - } - else { - console.log("success"); - } - - process.exit(exitCode); - }); -}); diff --git a/test/test-binding-statement-rebinding.js b/test/test-binding-statement-rebinding.js deleted file mode 100644 index 51d4df8..0000000 --- a/test/test-binding-statement-rebinding.js +++ /dev/null @@ -1,52 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.ODBC() - , assert = require("assert") - , exitCode = 0 - ; - -db.createConnection(function (err, conn) { - conn.openSync(common.connectionString); - - conn.createStatement(function (err, stmt) { - var r, result, caughtError; - - var a = ['hello', 'world']; - - stmt.prepareSync('select ? as col1, ? as col2'); - - stmt.bindSync(a); - - result = stmt.executeSync(); - - console.log(result.fetchAllSync()); - result.closeSync(); - - a[0] = 'goodbye'; - a[1] = 'steven'; - stmt.bindSync(a); - - result = stmt.executeSync(); - - r = result.fetchAllSync(); - - try { - assert.deepEqual(r, [ { col1: 'goodbye', col2: 'steven' } ]); - } - catch (e) { - console.log(e.stack); - exitCode = 1; - } - - conn.closeSync(); - - if (exitCode) { - console.log("failed"); - } - else { - console.log("success"); - } - - process.exit(exitCode); - }); -}); diff --git a/test/test-binding-transaction-commit.js b/test/test-binding-transaction-commit.js deleted file mode 100644 index 029f4af..0000000 --- a/test/test-binding-transaction-commit.js +++ /dev/null @@ -1,79 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.ODBC() - , assert = require("assert") - , exitCode = 0 - ; - -db.createConnection(function (err, conn) { - - conn.openSync(common.connectionString); - - common.createTables(conn, function (err, data) { - test1() - - function test1() { - conn.beginTransaction(function (err) { - if (err) { - console.log("Error beginning transaction."); - console.log(err); - exitCode = 1 - } - - var result = conn.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); - - //rollback - conn.endTransaction(true, function (err) { - if (err) { - console.log("Error rolling back transaction"); - console.log(err); - exitCode = 2 - } - - result = conn.querySync("select * from " + common.tableName); - data = result.fetchAllSync(); - - assert.deepEqual(data, []); - - test2(); - }); - }); - } - - function test2 () { - //Start a new transaction - conn.beginTransaction(function (err) { - if (err) { - console.log("Error beginning transaction"); - console.log(err); - exitCode = 3 - } - - result = conn.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); - - //commit - conn.endTransaction(false, function (err) { - if (err) { - console.log("Error committing transaction"); - console.log(err); - exitCode = 3 - } - - result = conn.querySync("select * from " + common.tableName); - data = result.fetchAllSync(); - - assert.deepEqual(data, [ { COLINT: 42, COLDATETIME: null, COLTEXT: null } ]); - - finish(); - }); - }); - } - - function finish() { - common.dropTables(conn, function (err) { - conn.closeSync(); - process.exit(exitCode); - }); - } - }); -}); diff --git a/test/test-binding-transaction-commitSync.js b/test/test-binding-transaction-commitSync.js deleted file mode 100644 index 3e004d8..0000000 --- a/test/test-binding-transaction-commitSync.js +++ /dev/null @@ -1,53 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.ODBC() - , assert = require("assert") - , exitCode = 0 - ; - -db.createConnection(function (err, conn) { - conn.openSync(common.connectionString); - - common.createTables(conn, function (err, data) { - try { - conn.beginTransactionSync(); - - var result = conn.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); - - conn.endTransactionSync(true); //rollback - - result = conn.querySync("select * from " + common.tableName); - - assert.deepEqual(result.fetchAllSync(), []); - } - catch (e) { - console.log("Failed when rolling back"); - console.log(e.stack); - exitCode = 1 - } - - try { - //Start a new transaction - conn.beginTransactionSync(); - - result = conn.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); - - conn.endTransactionSync(false); //commit - - result = conn.querySync("select * from " + common.tableName); - - assert.deepEqual(result.fetchAllSync(), [ { COLINT: 42, COLDATETIME: null, COLTEXT: null } ]); - } - catch (e) { - console.log("Failed when committing"); - console.log(e.stack); - - exitCode = 2; - } - - common.dropTables(conn, function (err) { - conn.closeSync(); - process.exit(exitCode); - }); - }); -}); diff --git a/test/test-closed.js b/test/test-closed.js deleted file mode 100644 index 155685e..0000000 --- a/test/test-closed.js +++ /dev/null @@ -1,14 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -assert.equal(db.connected, false); - -db.query("select * from test", function (err, rs, moreResultSets) { - assert.deepEqual(err, { message: 'Connection not open.' }); - assert.deepEqual(rs, []); - assert.equal(moreResultSets, false); - assert.equal(db.connected, false); -}); diff --git a/test/test-connection-object.js b/test/test-connection-object.js deleted file mode 100644 index d780297..0000000 --- a/test/test-connection-object.js +++ /dev/null @@ -1,13 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.open(common.connectionObject, function(err){ - assert.equal(err, null); - - db.close(function () { - assert.equal(db.connected, false); - }); -}); diff --git a/test/test-date.js b/test/test-date.js deleted file mode 100644 index d0309a2..0000000 --- a/test/test-date.js +++ /dev/null @@ -1,45 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -var sqlite = /sqlite/i.test(common.connectionString); - -db.open(common.connectionString, function(err) { - assert.equal(err, null); - assert.equal(db.connected, true); - - var dt = new Date(); - dt.setMilliseconds(0); // MySQL truncates them. - var ds = dt.toISOString().replace('Z',''); - var sql = "SELECT cast('" + ds + "' as datetime) as DT1"; - // XXX(bnoordhuis) sqlite3 has no distinct DATETIME or TIMESTAMP type. - // 'datetime' in this expression is a function, not a type. - if (sqlite) sql = "SELECT datetime('" + ds + "') as DT1"; - console.log(sql); - - db.query(sql, function (err, data) { - assert.equal(err, null); - assert.equal(data.length, 1); - - db.close(function () { - assert.equal(db.connected, false); - console.log(dt); - console.log(data); - - //test selected data after the connection - //is closed, in case the assertion fails - if (sqlite) { - assert.equal(data[0].DT1.constructor.name, "String", "DT1 is not an instance of a String object"); - assert.equal(data[0].DT1, ds.replace('T', ' ').replace(/\.\d+$/, '')); - } else { - assert.equal(data[0].DT1.constructor.name, "Date", "DT1 is not an instance of a Date object"); - // XXX(bnoordhuis) DT1 is in local time but we inserted - // a UTC date so we need to adjust it before comparing. - dt = new Date(dt.getTime() + 6e4 * dt.getTimezoneOffset()); - assert.equal(data[0].DT1.toISOString(), dt.toISOString()); - } - }); - }); -}); diff --git a/test/test-describe-column.js b/test/test-describe-column.js deleted file mode 100644 index 1cbe675..0000000 --- a/test/test-describe-column.js +++ /dev/null @@ -1,34 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); - -console.log("connected"); - -common.dropTables(db, function (err) { - if (err) console.log(err.message); - - console.log("tables dropped"); - - common.createTables(db, function (err) { - if (err) console.log(err.message); - - console.log("tables created"); - - db.describe({ - database : common.databaseName, - table : common.tableName, - column : 'COLDATETIME' - }, function (err, data) { - if (err) console.log(err.message); - - console.log(data); - - db.closeSync(); - assert.ok(data.length, "No records returned when attempting to describe the column COLDATETIME"); - }); - }); -}); diff --git a/test/test-describe-database.js b/test/test-describe-database.js deleted file mode 100644 index a715864..0000000 --- a/test/test-describe-database.js +++ /dev/null @@ -1,18 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); - -common.dropTables(db, function () { - common.createTables(db, function () { - db.describe({ - database : common.databaseName - }, function (err, data) { - db.closeSync(); - assert.ok(data.length, "No records returned when attempting to describe the database"); - }); - }); -}); diff --git a/test/test-describe-table.js b/test/test-describe-table.js deleted file mode 100644 index 23b8495..0000000 --- a/test/test-describe-table.js +++ /dev/null @@ -1,20 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); - -common.dropTables(db, function () { - common.createTables(db, function () { - - db.describe({ - database : common.databaseName - , table : common.tableName - }, function (err, data) { - db.closeSync(); - assert.ok(data.length, "No records returned when attempting to describe the tabe " + common.tableName); - }); - }); -}); diff --git a/test/test-domains-open.js b/test/test-domains-open.js deleted file mode 100644 index 042e7af..0000000 --- a/test/test-domains-open.js +++ /dev/null @@ -1,19 +0,0 @@ -var domain = require("domain"); - -var d = domain.create(); - -d.on("error", function (error) { - console.log("Error caught!", error); -}); - -d.run(function() { - var db = require("../")(); - - console.trace(); - - db.open("wrongConnectionString", function (error) { - console.trace(); - - throw new Error(); - }); -}); diff --git a/test/test-getInfoSync.js b/test/test-getInfoSync.js deleted file mode 100644 index 516d424..0000000 --- a/test/test-getInfoSync.js +++ /dev/null @@ -1,10 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert"); - -db.openSync(common.connectionString); -console.log(common); -var userName = db.conn.getInfoSync(odbc.SQL_USER_NAME); -assert.equal(userName, common.user); - diff --git a/test/test-global-open-close.js b/test/test-global-open-close.js deleted file mode 100644 index df4aea7..0000000 --- a/test/test-global-open-close.js +++ /dev/null @@ -1,14 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , assert = require("assert"); - -odbc.open(common.connectionString, function (err, conn) { - if (err) { - console.log(err); - } - assert.equal(err, null); - assert.equal(conn.constructor.name, 'Database'); - - conn.close(); -}); - diff --git a/test/test-instantiate-one-and-end.js b/test/test-instantiate-one-and-end.js deleted file mode 100644 index 315bd95..0000000 --- a/test/test-instantiate-one-and-end.js +++ /dev/null @@ -1,9 +0,0 @@ -var odbc = require("../") - , db = new odbc.Database() - ; - -//This test should just exit. The only reason it should stay open is if a -//connection has been established. But all we have done here is instantiate -//the object. - -console.log("done"); \ No newline at end of file diff --git a/test/test-issue-54.js b/test/test-issue-54.js deleted file mode 100644 index 4dedbad..0000000 --- a/test/test-issue-54.js +++ /dev/null @@ -1,38 +0,0 @@ -//NOTE: this does not assert anything that it should, please fix. - -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - , util = require('util') - , count = 0 - ; - -var sql = -"declare @t table (x int); \ -insert @t values (1); \ -select 'You will get this message' \ -raiserror('You will never get this error!', 16, 100); \ -raiserror('Two errors in a row! WHAT?', 16, 100); \ -select 'You will never get this message, either!' as msg; \ -" - -db.open(common.connectionString, function(err) { - console.log(err || "Connected") - - if (!err) { - db.query(sql, function (err, results, more) { - console.log("q1 result: ", err, results, more) - - if (!more) { - console.log("Running second query") - - db.query("select 1 as x", function(err, results, more) { - console.log("q2 result: ", err, results, more) - - db.close(function(err) { console.log(err || "Closed") }) - }) - } - }) - } -}); \ No newline at end of file diff --git a/test/test-issue-85.js b/test/test-issue-85.js deleted file mode 100644 index 3d7f499..0000000 --- a/test/test-issue-85.js +++ /dev/null @@ -1,29 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - , util = require('util') - , count = 0 - ; - -var sql = (common.dialect == 'sqlite' || common.dialect =='mysql') - ? 'select cast(-1 as signed) as test, cast(-2147483648 as signed) as test2, cast(2147483647 as signed) as test3;' - : 'select cast(-1 as int) as test, cast(-2147483648 as int) as test2, cast(2147483647 as int) as test3;' - ; - -db.open(common.connectionString, function(err) { - console.error(err || "Connected") - - if (!err) { - db.query(sql, function (err, results, more) { - console.log(results); - - assert.equal(err, null); - assert.equal(results[0].test, -1); - assert.equal(results[0].test2, -2147483648); - assert.equal(results[0].test3, 2147483647); - - db.close(function(err) { console.log(err || "Closed") }) - }) - } -}); diff --git a/test/test-issue-get-column-value-2.js b/test/test-issue-get-column-value-2.js deleted file mode 100644 index 6fb36aa..0000000 --- a/test/test-issue-get-column-value-2.js +++ /dev/null @@ -1,45 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - , util = require('util') - , count = 0 - ; - -var getSchema = function () { - var db = new odbc.Database(); - - console.log(util.format('Count %s, time %s', count, new Date())); - //console.log(db); - - db.open(common.connectionString, function(err) { - if (err) { - console.error("connection error: ", err.message); - db.close(function(){}); - return; - } - - db.describe({database: 'main', schema: 'RETAIL', table: common.tableName }, function (err, rows) { -// db.query("select * from " + common.tableName, function (err, rows) { - if (err) { - console.error("describe error: ", err.message); - db.close(function(){}); - return; - } - - db.close(function() { - console.log("Connection Closed"); - db = null; - count += 1; - if (count < 100) { - setImmediate(getSchema); - } - else { - process.exit(0); - } - }); - }); - }); -}; - -getSchema(); \ No newline at end of file diff --git a/test/test-memory-leaks-new-objects.js b/test/test-memory-leaks-new-objects.js deleted file mode 100644 index 1be00a4..0000000 --- a/test/test-memory-leaks-new-objects.js +++ /dev/null @@ -1,33 +0,0 @@ -var odbc = require("../") - , openCount = 100 - , start = process.memoryUsage().heapUsed - , x = 100 - ; - -gc(); - -start = process.memoryUsage().heapUsed; - -for (x = 0; x < openCount; x++ ) { - (function () { - var db = new odbc.Database(); - db = null; - })(); -} - -gc(); - -console.log(process.memoryUsage().heapUsed - start); - -gc(); - -for (x = 0; x < openCount; x++ ) { - (function () { - var db = new odbc.Database(); - db = null; - })(); -} - -gc(); - -console.log(process.memoryUsage().heapUsed - start); \ No newline at end of file diff --git a/test/test-multi-open-close.js b/test/test-multi-open-close.js deleted file mode 100644 index 9d6097e..0000000 --- a/test/test-multi-open-close.js +++ /dev/null @@ -1,49 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , openCallback = 0 - , closeCallback = 0 - , openCount = 100 - , connections = [] - ; - -for (var x = 0; x < openCount; x++ ) { - (function () { - var db = new odbc.Database(); - connections.push(db); - - db.open(common.connectionString, function(err) { - if (err) { - throw err; - process.exit(1); - } - - openCallback += 1; - - maybeClose(); - }); - })(); -} - -function maybeClose() { - - if (openCount == openCallback) { - doClose(); - } -} - - -function doClose() { - connections.forEach(function (db) { - db.close(function () { - closeCallback += 1; - - maybeFinish(); - }); - }); -} - -function maybeFinish() { - if (openCount == closeCallback) { - console.log('Done'); - } -} diff --git a/test/test-multi-open-query-close.js b/test/test-multi-open-query-close.js deleted file mode 100644 index 766e8c3..0000000 --- a/test/test-multi-open-query-close.js +++ /dev/null @@ -1,75 +0,0 @@ -var common = require("./common") -, odbc = require("../") -, openCallback = 0 -, closeCallback = 0 -, queryCallback = 0 -, openCount = 3 -, connections = [] -; - -for (var x = 0; x < openCount; x++ ) { - (function (x) { - var db = new odbc.Database(); - connections.push(db); - - db.open(common.connectionString, function(err) { - if (err) { - throw err; - process.exit(); - } - - //console.error("Open: %s %s %s", x, openCount, openCallback); - - openCallback += 1; - - maybeQuery(); - }); - })(x); -} - -function maybeQuery() { - if (openCount == openCallback) { - doQuery(); - } -} - -function doQuery() { - connections.forEach(function (db, ix) { - var seconds = connections.length - ix; - - var query = "WAITFOR DELAY '00:00:0" + seconds + "'; select " + seconds + " as result"; - - db.query(query, function (err, rows, moreResultSets) { - - //console.error("Query: %s %s %s %s", ix, openCount, queryCallback, moreResultSets, rows, err); - - queryCallback += 1; - - maybeClose(); - }); - }); -} - -function maybeClose() { - if (openCount == queryCallback) { - doClose(); - } -} - -function doClose() { - connections.forEach(function (db, ix) { - db.close(function () { - //console.log("Close: %s %s %s", ix, openCount, closeCallback); - - closeCallback += 1; - - maybeFinish(); - }); - }); -} - -function maybeFinish() { - if (openCount == closeCallback) { - console.error('done'); - } -} diff --git a/test/test-multi-openSync-closeSync.js b/test/test-multi-openSync-closeSync.js deleted file mode 100644 index d948d8d..0000000 --- a/test/test-multi-openSync-closeSync.js +++ /dev/null @@ -1,30 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , openCallback = 0 - , closeCallback = 0 - , openCount = 100 - , connections = [] - , errorCount = 0; - ; - -for (var x = 0; x < openCount; x++ ) { - var db = new odbc.Database(); - connections.push(db); - - try { - db.openSync(common.connectionString); - } - catch (e) { - console.log(common.connectionString); - console.log(e.stack); - errorCount += 1; - break; - } -} - -connections.forEach(function (db) { - db.closeSync(); -}); - -console.log('Done'); -process.exit(errorCount); \ No newline at end of file diff --git a/test/test-open-close.js b/test/test-open-close.js deleted file mode 100644 index 2ee660f..0000000 --- a/test/test-open-close.js +++ /dev/null @@ -1,29 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert"); - -assert.equal(db.connected, false); - -db.query("select * from " + common.tableName, function (err, rs, moreResultSets) { - assert.deepEqual(err, { message: 'Connection not open.' }); - assert.deepEqual(rs, []); - assert.equal(moreResultSets, false); - assert.equal(db.connected, false); -}); - -db.open(common.connectionString, function(err) { - assert.equal(err, null); - assert.equal(db.connected, true); - - db.close(function () { - assert.equal(db.connected, false); - - db.query("select * from " + common.tableName, function (err, rs, moreResultSets) { - assert.deepEqual(err, { message: 'Connection not open.' }); - assert.deepEqual(rs, []); - assert.equal(moreResultSets, false); - assert.equal(db.connected, false); - }); - }); -}); diff --git a/test/test-open-connectTimeout.js b/test/test-open-connectTimeout.js deleted file mode 100644 index 75857aa..0000000 --- a/test/test-open-connectTimeout.js +++ /dev/null @@ -1,24 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , assert = require("assert"); - -//test setting connectTimeout via the constructor works -var db = new odbc.Database({ connectTimeout : 1 }) - -db.open(common.connectionString, function(err) { - assert.equal(db.conn.connectTimeout, 1); - - assert.equal(err, null); - assert.equal(db.connected, true); - - db.close(function () { - assert.equal(db.connected, false); - - db.query("select * from " + common.tableName, function (err, rs, moreResultSets) { - assert.deepEqual(err, { message: 'Connection not open.' }); - assert.deepEqual(rs, []); - assert.equal(moreResultSets, false); - assert.equal(db.connected, false); - }); - }); -}); diff --git a/test/test-open-dont-close.js b/test/test-open-dont-close.js deleted file mode 100644 index c1967b3..0000000 --- a/test/test-open-dont-close.js +++ /dev/null @@ -1,12 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database(); - -db.open(common.connectionString, function(err) { - console.error('db.open callback'); - console.error('node should just sit and wait'); - console.log(err); - //reference db here so it isn't garbage collected: - - console.log(db.connected); -}); diff --git a/test/test-open-loginTimeout.js b/test/test-open-loginTimeout.js deleted file mode 100644 index 0e036b6..0000000 --- a/test/test-open-loginTimeout.js +++ /dev/null @@ -1,24 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , assert = require("assert"); - -//test setting loginTimeout via the constructor works -var db = new odbc.Database({ loginTimeout : 1 }) - -db.open(common.connectionString, function(err) { - assert.equal(db.conn.loginTimeout, 1); - - assert.equal(err, null); - assert.equal(db.connected, true); - - db.close(function () { - assert.equal(db.connected, false); - - db.query("select * from " + common.tableName, function (err, rs, moreResultSets) { - assert.deepEqual(err, { message: 'Connection not open.' }); - assert.deepEqual(rs, []); - assert.equal(moreResultSets, false); - assert.equal(db.connected, false); - }); - }); -}); diff --git a/test/test-openSync.js b/test/test-openSync.js deleted file mode 100644 index 88f6eeb..0000000 --- a/test/test-openSync.js +++ /dev/null @@ -1,31 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert"); - -assert.equal(db.connected, false); - -db.query("select * from " + common.tableName, function (err, rs, moreResultSets) { - assert.deepEqual(err, { message: 'Connection not open.' }); - assert.deepEqual(rs, []); - assert.equal(moreResultSets, false); - assert.equal(db.connected, false); -}); - -console.log("Attempting to connect to: %s", common.connectionString); - -try { - db.openSync(common.connectionString); -} -catch(e) { - console.log(e.stack); - assert.deepEqual(e, null); -} - -try { - db.closeSync(); -} -catch(e) { - console.log(e.stack); - assert.deepEqual(e, null); -} diff --git a/test/test-param-select-with-booleans-only.js b/test/test-param-select-with-booleans-only.js deleted file mode 100644 index 487322c..0000000 --- a/test/test-param-select-with-booleans-only.js +++ /dev/null @@ -1,21 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert"); - - -db.open(common.connectionString, function (err) { - assert.equal(err, null); - - db.query("select ? as \"TRUECOL\", ? as \"FALSECOL\" " - , [true, false] - , function (err, data, more) { - db.close(function () { - assert.equal(err, null); - assert.deepEqual(data, [{ - TRUECOL: true, - FALSECOL: false - }]); - }); - }); -}); diff --git a/test/test-param-select-with-decimals-only.js b/test/test-param-select-with-decimals-only.js deleted file mode 100644 index 2b186db..0000000 --- a/test/test-param-select-with-decimals-only.js +++ /dev/null @@ -1,20 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert"); - - -db.open(common.connectionString, function (err) { - assert.equal(err, null); - - db.query("select ? as \"DECCOL1\" " - , [5.5] - , function (err, data, more) { - db.close(function () { - assert.equal(err, null); - assert.deepEqual(data, [{ - DECCOL1: 5.5 - }]); - }); - }); -}); diff --git a/test/test-param-select-with-null.js b/test/test-param-select-with-null.js deleted file mode 100644 index 5c26a02..0000000 --- a/test/test-param-select-with-null.js +++ /dev/null @@ -1,21 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert"); - - -db.open(common.connectionString, function (err) { - assert.equal(err, null); - - db.query("select ? as \"NULLCOL1\" " - , [null] - , function (err, data, more) { - if (err) { console.error(err) } - db.close(function () { - assert.equal(err, null); - assert.deepEqual(data, [{ - NULLCOL1: null - }]); - }); - }); -}); diff --git a/test/test-param-select-with-nulls-mixed.js b/test/test-param-select-with-nulls-mixed.js deleted file mode 100644 index 4f1026e..0000000 --- a/test/test-param-select-with-nulls-mixed.js +++ /dev/null @@ -1,23 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert"); - - -db.open(common.connectionString, function (err) { - assert.equal(err, null); - - db.query("select ? as \"TEXTCOL1\", ? as \"TEXTCOL2\", ? as \"NULLCOL1\" " - , ["something", "something", null] - , function (err, data, more) { - if (err) { console.error(err) } - db.close(function () { - assert.equal(err, null); - assert.deepEqual(data, [{ - TEXTCOL1: "something", - TEXTCOL2: "something", - NULLCOL1: null - }]); - }); - }); -}); diff --git a/test/test-param-select-with-numbers-mixed.js b/test/test-param-select-with-numbers-mixed.js deleted file mode 100644 index 8a5cb6b..0000000 --- a/test/test-param-select-with-numbers-mixed.js +++ /dev/null @@ -1,22 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert"); - - -db.open(common.connectionString, function (err) { - assert.equal(err, null); - - db.query("select ? as TEXTCOL, ? as TEXTCOL2, ? as INTCOL " - , ["fish", "asdf", 1] - , function (err, data, more) { - db.close(function () { - assert.equal(err, null); - assert.deepEqual(data, [{ - TEXTCOL: 'fish', - TEXTCOL2: 'asdf', - INTCOL: 1 - }]); - }); - }); -}); diff --git a/test/test-param-select-with-numbers-only.js b/test/test-param-select-with-numbers-only.js deleted file mode 100644 index ad500d5..0000000 --- a/test/test-param-select-with-numbers-only.js +++ /dev/null @@ -1,24 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert"); - - -db.open(common.connectionString, function (err) { - assert.equal(err, null); - - db.query("select ? as INTCOL1, ? as INTCOL2, ? as INTCOL3, ? as FLOATCOL4, ? as FLOATYINT" - , [5, 3, 1, 1.23456789012345, 12345.000] - , function (err, data, more) { - db.close(function () { - assert.equal(err, null); - assert.deepEqual(data, [{ - INTCOL1: 5, - INTCOL2: 3, - INTCOL3: 1, - FLOATCOL4 : 1.23456789012345, - FLOATYINT : 12345 - }]); - }); - }); -}); diff --git a/test/test-param-select-with-strings-only.js b/test/test-param-select-with-strings-only.js deleted file mode 100644 index 8ce19dc..0000000 --- a/test/test-param-select-with-strings-only.js +++ /dev/null @@ -1,22 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert"); - - -db.open(common.connectionString, function (err) { - assert.equal(err, null); - - db.query("select ? as TEXTCOL, ? as TEXTCOL2, ? as TEXTCOL3" - , ["fish", "asdf", "1234"] - , function (err, data, more) { - db.close(function () { - assert.equal(err, null); - assert.deepEqual(data, [{ - TEXTCOL: 'fish', - TEXTCOL2: 'asdf', - TEXTCOL3: '1234' - }]); - }); - }); -}); diff --git a/test/test-param-select-with-unicode.js b/test/test-param-select-with-unicode.js deleted file mode 100644 index c2e5b8d..0000000 --- a/test/test-param-select-with-unicode.js +++ /dev/null @@ -1,15 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.open(common.connectionString, function(err) { - db.query("select ? as UNICODETEXT", ['ף צ ץ ק ר ש תכ ך ל מ ם נ ן ס ע פ 電电電買买買開开開東东東車车車'], function (err, data) { - db.close(function () { - console.log(data); - assert.equal(err, null); - assert.deepEqual(data, [{ UNICODETEXT: 'ף צ ץ ק ר ש תכ ך ל מ ם נ ן ס ע פ 電电電買买買開开開東东東車车車' }]); - }); - }); -}); diff --git a/test/test-pool-close.js b/test/test-pool-close.js deleted file mode 100644 index 2a36f75..0000000 --- a/test/test-pool-close.js +++ /dev/null @@ -1,38 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , pool = new odbc.Pool() - , connectionString = common.connectionString - , connections = [] - , connectCount = 10; - -openConnectionsUsingPool(connections); - -function openConnectionsUsingPool(connections) { - for (var x = 0; x <= connectCount; x++) { - - (function (connectionIndex) { - console.error("Opening connection #", connectionIndex); - - pool.open(connectionString, function (err, connection) { - //console.error("Opened connection #", connectionIndex); - - if (err) { - console.error("error: ", err.message); - return false; - } - - connections.push(connection); - - if (connectionIndex == connectCount) { - closeConnections(connections); - } - }); - })(x); - } -} - -function closeConnections (connections) { - pool.close(function () { - console.error("pool closed"); - }); -} \ No newline at end of file diff --git a/test/test-pool-connect.js b/test/test-pool-connect.js deleted file mode 100644 index a0f0d12..0000000 --- a/test/test-pool-connect.js +++ /dev/null @@ -1,38 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , pool = new odbc.Pool() - , connectionString = common.connectionString - , connections = [] - , connectCount = 10; - -openConnectionsUsingPool(connections); - -function openConnectionsUsingPool(connections) { - for (var x = 0; x <= connectCount; x++) { - - (function (connectionIndex) { - console.error("Opening connection #", connectionIndex); - - pool.open(connectionString, function (err, connection) { - //console.error("Opened connection #", connectionIndex); - - if (err) { - console.error("error: ", err.message); - return false; - } - - connections.push(connection); - - if (connectionIndex == connectCount) { - closeConnections(connections); - } - }); - })(x); - } -} - -function closeConnections (connections) { - pool.close(function () { - console.error("pool closed"); - }); -} diff --git a/test/test-prepare-bind-execute-closeSync.js b/test/test-prepare-bind-execute-closeSync.js deleted file mode 100644 index f7c74d7..0000000 --- a/test/test-prepare-bind-execute-closeSync.js +++ /dev/null @@ -1,62 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - , iterations = 1000 - ; - -db.openSync(common.connectionString); - -issueQuery3(function () { - finish(); -}); - - -function issueQuery3(done) { - var count = 0 - , time = new Date().getTime(); - - var stmt = db.prepareSync('select ? as test'); - - for (var x = 0; x < iterations; x++) { - (function (x) { - stmt.bind([x], function (err) { - if (err) { - console.log(err); - return finish(); - } - - //console.log(x); - - stmt.execute(function (err, result) { - cb(err, result, x); - }); - }); - })(x); - } - - function cb (err, result, x) { - if (err) { - console.error(err); - return finish(); - } - - var a = result.fetchAllSync(); - - assert.deepEqual(a, [{ test : x }]); - - result.closeSync(); - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - Execute - CloseSync", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return done(); - } - } -} - -function finish() { - db.closeSync(); - console.log("connection closed"); -} diff --git a/test/test-prepare-bind-execute-error.js b/test/test-prepare-bind-execute-error.js deleted file mode 100644 index e76af75..0000000 --- a/test/test-prepare-bind-execute-error.js +++ /dev/null @@ -1,49 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); -issueQuery(); - -function issueQuery() { - var count = 0 - , time = new Date().getTime() - , stmt - , result - , data - ; - - assert.doesNotThrow(function () { - stmt = db.prepareSync('select cast(? as datetime) as test'); - }); - - assert.throws(function () { - result = stmt.executeSync(); - }); - - assert.doesNotThrow(function () { - stmt.bindSync([0]); - }); - - assert.doesNotThrow(function () { - result = stmt.executeSync(); - }); - - assert.doesNotThrow(function () { - data = result.fetchAllSync(); - }); - - assert.ok(data); - - finish(0); -} - -function finish(exitCode) { - db.closeSync(); - - console.log("connection closed"); - - process.exit(exitCode || 0); -} diff --git a/test/test-prepare-bind-execute-long-string.js b/test/test-prepare-bind-execute-long-string.js deleted file mode 100644 index ee7e65c..0000000 --- a/test/test-prepare-bind-execute-long-string.js +++ /dev/null @@ -1,64 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); -issueQuery(100001); -issueQuery(3000); -issueQuery(4000); -issueQuery(5000); -issueQuery(8000); -finish(0); - -function issueQuery(length) { - var count = 0 - , time = new Date().getTime() - , stmt - , result - , data - , str = '' - ; - - var set = 'abcdefghijklmnopqrstuvwxyz'; - - for (var x = 0; x < length; x++) { - str += set[x % set.length]; - } - - assert.doesNotThrow(function () { - stmt = db.prepareSync('select ? as longString'); - }); - - assert.doesNotThrow(function () { - stmt.bindSync([str]); - }); - - assert.doesNotThrow(function () { - result = stmt.executeSync(); - }); - - assert.doesNotThrow(function () { - data = result.fetchAllSync(); - }); - - console.log('expected length: %s, returned length: %s', str.length, data[0].longString.length); - - for (var x = 0; x < str.length; x++) { - if (str[x] != data[0].longString[x]) { - console.log(x, str[x], data[0].longString[x]); - - assert.equal(str[x], data[0].longString[x]); - } - } - - assert.equal(data[0].longString, str); -} - -function finish(exitCode) { - db.closeSync(); - - console.log("connection closed"); - process.exit(exitCode || 0); -} diff --git a/test/test-prepare-bindSync-execute-closeSync.js b/test/test-prepare-bindSync-execute-closeSync.js deleted file mode 100644 index d2c74ca..0000000 --- a/test/test-prepare-bindSync-execute-closeSync.js +++ /dev/null @@ -1,46 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , assert = require("assert") - , db = new odbc.Database() - , iterations = 100 - ; - -db.openSync(common.connectionString); - -issueQuery3(function () { - finish(); -}); - -function issueQuery3(done) { - var count = 0 - , time = new Date().getTime(); - - var stmt = db.prepareSync('select ? as test'); - - for (var x = 0; x < iterations; x++) { - (function (x) { - stmt.bindSync([x]); - var result = stmt.executeSync() - cb(result, x); - - })(x); - } - - function cb (result, x) { - assert.deepEqual(result.fetchAllSync(), [ { test : x } ]); - - result.closeSync(); - - if (++count == iterations) { - var elapsed = new Date().getTime() - time; - - console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - Execute - CloseSync", count, elapsed/1000, Math.floor(count/(elapsed/1000))); - return done(); - } - } -} - -function finish() { - db.closeSync(); - console.log("connection closed"); -} diff --git a/test/test-prepare.js b/test/test-prepare.js deleted file mode 100644 index 5b11dd4..0000000 --- a/test/test-prepare.js +++ /dev/null @@ -1,34 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); - -assert.equal(db.connected, true); - -db.prepare("select ? as col1", function (err, stmt) { - assert.equal(err, null); - assert.equal(stmt.constructor.name, "ODBCStatement"); - - stmt.bind(["hello world"], function (err) { - assert.equal(err, null); - - stmt.execute(function (err, result) { - assert.equal(err, null); - assert.equal(result.constructor.name, "ODBCResult"); - - result.fetchAll(function (err, data) { - assert.equal(err, null); - console.log(data); - - result.closeSync(); - - db.closeSync(); - assert.deepEqual(data, [{ col1: "hello world" }]); - }); - }); - }); -}); - diff --git a/test/test-prepareSync-bad-sql.js b/test/test-prepareSync-bad-sql.js deleted file mode 100644 index eaf6cf8..0000000 --- a/test/test-prepareSync-bad-sql.js +++ /dev/null @@ -1,24 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); -assert.equal(db.connected, true); - -var stmt = db.prepareSync("asdf asdf asdf asdf sadf "); -assert.equal(stmt.constructor.name, "ODBCStatement"); - -stmt.bindSync(["hello world", 1, null]); - -stmt.execute(function (err, result) { - assert.ok(err); - - stmt.executeNonQuery(function (err, count) { - assert.ok(err); - - db.close(function () {}); - }); -}); - diff --git a/test/test-prepareSync-multiple-execution.js b/test/test-prepareSync-multiple-execution.js deleted file mode 100644 index 144c894..0000000 --- a/test/test-prepareSync-multiple-execution.js +++ /dev/null @@ -1,69 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -var count = 0; -var iterations = 10; - -db.openSync(common.connectionString); - -common.dropTables(db, function () { - common.createTables(db, function (err, data) { - if (err) { - console.log(err); - - return finish(2); - } - - var stmt = db.prepareSync("insert into " + common.tableName + " (colint, coltext) VALUES (?, ?)"); - assert.equal(stmt.constructor.name, "ODBCStatement"); - - recursive(stmt); - }); -}); - -function finish(retValue) { - console.log("finish exit value: %s", retValue); - - db.closeSync(); - process.exit(retValue || 0); -} - -function recursive (stmt) { - try { - var result = stmt.bindSync([4, 'hello world']); - assert.equal(result, true); - } - catch (e) { - console.log(e.message); - finish(3); - } - - stmt.execute(function (err, result) { - if (err) { - console.log(err.message); - - return finish(4); - } - - result.closeSync(); - count += 1; - - console.log("count %s, iterations %s", count, iterations); - - if (count <= iterations) { - setTimeout(function(){ - recursive(stmt); - },100); - } - else { - console.log(db.querySync("select * from " + common.tableName)); - - common.dropTables(db, function () { - return finish(0); - }); - } - }); -} diff --git a/test/test-prepareSync.js b/test/test-prepareSync.js deleted file mode 100644 index 8e83d54..0000000 --- a/test/test-prepareSync.js +++ /dev/null @@ -1,29 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); -assert.equal(db.connected, true); - -var stmt = db.prepareSync("select ? as col1, ? as col2, ? as col3"); -assert.equal(stmt.constructor.name, "ODBCStatement"); - -stmt.bindSync(["hello world", 1, null]); - -stmt.execute(function (err, result) { - assert.equal(err, null); - assert.equal(result.constructor.name, "ODBCResult"); - - result.fetchAll(function (err, data) { - assert.equal(err, null); - console.log(data); - - result.closeSync(); - - db.closeSync(); - assert.deepEqual(data, [{ col1: "hello world", col2 : 1, col3 : null }]); - }); -}); - diff --git a/test/test-query-create-table-fetchSync.js b/test/test-query-create-table-fetchSync.js deleted file mode 100644 index 7fa2ed9..0000000 --- a/test/test-query-create-table-fetchSync.js +++ /dev/null @@ -1,23 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); - -db.queryResult("create table " + common.tableName + " (COLINT INTEGER, COLDATETIME DATETIME, COLTEXT TEXT)", function (err, result) { - console.log(arguments); - - try { - //this should throw because there was no result to be had? - var data = result.fetchAllSync(); - console.log(data); - } - catch (e) { - console.log(e.stack); - } - - db.closeSync(); -}); - diff --git a/test/test-query-create-table.js b/test/test-query-create-table.js deleted file mode 100644 index bc3f7a2..0000000 --- a/test/test-query-create-table.js +++ /dev/null @@ -1,12 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); -common.createTables(db, function (err, data, morefollowing) { - console.log(arguments); - db.closeSync(); -}); - diff --git a/test/test-query-drop-table.js b/test/test-query-drop-table.js deleted file mode 100644 index 376ea1d..0000000 --- a/test/test-query-drop-table.js +++ /dev/null @@ -1,13 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); -common.dropTables(db, function (err, data) { - db.closeSync(); - assert.equal(err, null); - assert.deepEqual(data, []); -}); - diff --git a/test/test-query-insert.js b/test/test-query-insert.js deleted file mode 100644 index 0369d8e..0000000 --- a/test/test-query-insert.js +++ /dev/null @@ -1,34 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - , insertCount = 0; - ; - -db.open(common.connectionString, function(err) { - common.dropTables(db, function () { - common.createTables(db, function (err) { - assert.equal(err, null); - - db.query("insert into " + common.tableName + " (COLTEXT) values ('sandwich')", insertCallback); - db.query("insert into " + common.tableName + " (COLTEXT) values ('fish')", insertCallback); - db.query("insert into " + common.tableName + " (COLTEXT) values ('scarf')", insertCallback); - - }); - }); -}); - -function insertCallback(err, data) { - assert.equal(err, null); - assert.deepEqual(data, []); - - insertCount += 1; - - if (insertCount === 3) { - common.dropTables(db, function () { - db.close(function () { - console.error("Done"); - }); - }); - } -} \ No newline at end of file diff --git a/test/test-query-select-fetch.js b/test/test-query-select-fetch.js deleted file mode 100644 index 0fc356f..0000000 --- a/test/test-query-select-fetch.js +++ /dev/null @@ -1,19 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); -assert.equal(db.connected, true); - -db.queryResult("select 1 as COLINT, 'some test' as COLTEXT ", function (err, result) { - assert.equal(err, null); - assert.equal(result.constructor.name, "ODBCResult"); - - result.fetch(function (err, data) { - db.closeSync(); - assert.deepEqual(data, { COLINT: '1', COLTEXT: 'some test' }); - }); -}); - diff --git a/test/test-query-select-fetchAll.js b/test/test-query-select-fetchAll.js deleted file mode 100644 index 1245096..0000000 --- a/test/test-query-select-fetchAll.js +++ /dev/null @@ -1,23 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); - -assert.equal(db.connected, true); - -db.queryResult("select 1 as COLINT, 'some test' as COLTEXT union select 2, 'something else' ", function (err, result) { - assert.equal(err, null); - assert.equal(result.constructor.name, "ODBCResult"); - - result.fetchAll(function (err, data) { - db.closeSync(); - assert.deepEqual(data, [ - {"COLINT":1,"COLTEXT":"some test"} - ,{"COLINT":2,"COLTEXT":"something else"} - ]); - }); -}); - diff --git a/test/test-query-select-fetchAllSync.js b/test/test-query-select-fetchAllSync.js deleted file mode 100644 index 68eaa57..0000000 --- a/test/test-query-select-fetchAllSync.js +++ /dev/null @@ -1,23 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); - -assert.equal(db.connected, true); - -db.queryResult("select 1 as COLINT, 'some test' as COLTEXT union select 2, 'something else' ", function (err, result) { - assert.equal(err, null); - assert.equal(result.constructor.name, "ODBCResult"); - - var data = result.fetchAllSync(); - - db.closeSync(); - assert.deepEqual(data, [ - {"COLINT":1,"COLTEXT":"some test"} - ,{"COLINT":2,"COLTEXT":"something else"} - ]); -}); - diff --git a/test/test-query-select-fetchMode-array.js b/test/test-query-select-fetchMode-array.js deleted file mode 100644 index 33cb72b..0000000 --- a/test/test-query-select-fetchMode-array.js +++ /dev/null @@ -1,17 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = odbc({ fetchMode : odbc.FETCH_ARRAY }) - , assert = require("assert") - ; - -db.openSync(common.connectionString); - -assert.equal(db.connected, true); - -db.query("select 1 as COLINT, 'some test' as COLTEXT ", function (err, data) { - assert.equal(err, null); - - db.closeSync(); - assert.deepEqual(data, [[1,"some test"]]); -}); - diff --git a/test/test-query-select-unicode.js b/test/test-query-select-unicode.js deleted file mode 100644 index 751b464..0000000 --- a/test/test-query-select-unicode.js +++ /dev/null @@ -1,15 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); - -db.query("select '☯ąčęėįšųūž☎áäàéêèóöòüßÄÖÜ€ шчябы Ⅲ ❤' as UNICODETEXT", function (err, data) { - db.closeSync(); - console.log(data); - assert.equal(err, null); - assert.deepEqual(data, [{ UNICODETEXT: '☯ąčęėįšųūž☎áäàéêèóöòüßÄÖÜ€ шчябы Ⅲ ❤' }]); -}); - diff --git a/test/test-query-select.js b/test/test-query-select.js deleted file mode 100644 index 39916d1..0000000 --- a/test/test-query-select.js +++ /dev/null @@ -1,14 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); - -db.query("select 1 as \"COLINT\", 'some test' as \"COLTEXT\"", function (err, data) { - db.closeSync(); - assert.equal(err, null); - assert.deepEqual(data, [{ COLINT: '1', COLTEXT: 'some test' }]); -}); - diff --git a/test/test-queryResultSync-getColumnNamesSync.js b/test/test-queryResultSync-getColumnNamesSync.js deleted file mode 100644 index 67d95d9..0000000 --- a/test/test-queryResultSync-getColumnNamesSync.js +++ /dev/null @@ -1,14 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); -assert.equal(db.connected, true); - -var rs = db.queryResultSync("select 1 as SomeIntField, 'string' as someStringField"); - -assert.deepEqual(rs.getColumnNamesSync(), ['SomeIntField', 'someStringField']); - -db.closeSync(); diff --git a/test/test-queryResultSync-getRowCount.js b/test/test-queryResultSync-getRowCount.js deleted file mode 100644 index b6fa2d6..0000000 --- a/test/test-queryResultSync-getRowCount.js +++ /dev/null @@ -1,34 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); -assert.equal(db.connected, true); - -common.dropTables(db, function () { - common.createTables(db, function (err, data) { - if (err) { - console.log(err); - - return finish(2); - } - - var rs = db.queryResultSync("insert into " + common.tableName + " (colint, coltext) VALUES (100, 'hello world')"); - assert.equal(rs.constructor.name, "ODBCResult"); - - assert.equal(rs.getRowCountSync(), 1); - - common.dropTables(db, function () { - return finish(0); - }); - }); -}); - -function finish(retValue) { - console.log("finish exit value: %s", retValue); - - db.closeSync(); - process.exit(retValue || 0); -} diff --git a/test/test-querySync-select-unicode.js b/test/test-querySync-select-unicode.js deleted file mode 100644 index 21c6b66..0000000 --- a/test/test-querySync-select-unicode.js +++ /dev/null @@ -1,20 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); -var data; - -try { - data = db.querySync("select 'ꜨꜢ' as UNICODETEXT"); -} -catch (e) { - console.log(e.stack); -} - -db.closeSync(); -console.log(data); -assert.deepEqual(data, [{ UNICODETEXT: 'ꜨꜢ' }]); - diff --git a/test/test-querySync-select-with-execption.js b/test/test-querySync-select-with-execption.js deleted file mode 100644 index b53cbe4..0000000 --- a/test/test-querySync-select-with-execption.js +++ /dev/null @@ -1,22 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); -assert.equal(db.connected, true); -var err = null; - -try { - var data = db.querySync("select invalid query"); -} -catch (e) { - console.log(e.stack); - - err = e; -} - -db.closeSync(); -assert.equal(err.error, "[node-odbc] Error in ODBCConnection::QuerySync"); - diff --git a/test/test-querySync-select.js b/test/test-querySync-select.js deleted file mode 100644 index 7f14fa3..0000000 --- a/test/test-querySync-select.js +++ /dev/null @@ -1,15 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - ; - -db.openSync(common.connectionString); -assert.equal(db.connected, true); - -var data = db.querySync("select 1 as \"COLINT\", 'some test' as \"COLTEXT\""); - -db.closeSync(); -assert.deepEqual(data, [{ COLINT: 1, COLTEXT: 'some test' }]); - - diff --git a/test/test-require-and-end.js b/test/test-require-and-end.js deleted file mode 100644 index 3715394..0000000 --- a/test/test-require-and-end.js +++ /dev/null @@ -1,8 +0,0 @@ -var odbc = require("../") - ; - -//This test should just exit. This tests an issue where -//the C++ ODBC::Init function was causing the event loop to -//stay alive - -console.log("done"); \ No newline at end of file diff --git a/test/test-transaction-commit-sync.js b/test/test-transaction-commit-sync.js deleted file mode 100644 index 1880956..0000000 --- a/test/test-transaction-commit-sync.js +++ /dev/null @@ -1,54 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - , exitCode = 0 - ; - - -db.openSync(common.connectionString); - -common.createTables(db, function (err, data) { - try { - db.beginTransactionSync(); - - var results = db.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); - - db.rollbackTransactionSync(); - - results = db.querySync("select * from " + common.tableName); - - assert.deepEqual(results, []); - } - catch (e) { - console.log("Failed when rolling back"); - console.log(e.stack); - exitCode = 1 - } - - try { - //Start a new transaction - db.beginTransactionSync(); - - result = db.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); - - db.commitTransactionSync(); //commit - - result = db.querySync("select * from " + common.tableName); - - assert.deepEqual(result, [ { COLINT: 42, COLDATETIME: null, COLTEXT: null } ]); - } - catch (e) { - console.log("Failed when committing"); - console.log(e.stack); - - exitCode = 2; - } - - common.dropTables(db, function (err) { - db.closeSync(); - process.exit(exitCode); - }); -}); - - diff --git a/test/test-transaction-commit.js b/test/test-transaction-commit.js deleted file mode 100644 index cc4f425..0000000 --- a/test/test-transaction-commit.js +++ /dev/null @@ -1,77 +0,0 @@ -var common = require("./common") - , odbc = require("../") - , db = new odbc.Database() - , assert = require("assert") - , exitCode = 0 - ; - - -db.openSync(common.connectionString); - -common.createTables(db, function (err, data) { - test1() - - function test1() { - db.beginTransaction(function (err) { - if (err) { - console.log("Error beginning transaction."); - console.log(err); - exitCode = 1 - } - - var result = db.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); - - //rollback - db.endTransaction(true, function (err) { - if (err) { - console.log("Error rolling back transaction"); - console.log(err); - exitCode = 2 - } - - data = db.querySync("select * from " + common.tableName); - - assert.deepEqual(data, []); - - test2(); - }); - }); - } - - function test2 () { - //Start a new transaction - db.beginTransaction(function (err) { - if (err) { - console.log("Error beginning transaction"); - console.log(err); - exitCode = 3 - } - - result = db.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); - - //commit - db.endTransaction(false, function (err) { - if (err) { - console.log("Error committing transaction"); - console.log(err); - exitCode = 3 - } - - data = db.querySync("select * from " + common.tableName); - - assert.deepEqual(data, [ { COLINT: 42, COLDATETIME: null, COLTEXT: null } ]); - - finish(); - }); - }); - } - - function finish() { - common.dropTables(db, function (err) { - db.closeSync(); - process.exit(exitCode); - }); - } -}); - - diff --git a/test/test.js b/test/test.js new file mode 100755 index 0000000..59e3c96 --- /dev/null +++ b/test/test.js @@ -0,0 +1,44 @@ +/* eslint-env node, mocha */ +/* eslint-disable global-require */ + +const { Connection } = require('../'); + +const TABLE_EXISTS_STATE = '42S01'; + +describe('odbc', () => { + before(async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + try { + await connection.query(`CREATE TABLE ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}(ID INTEGER, NAME VARCHAR(24), AGE INTEGER)`); + } catch (error) { + const errorJSON = JSON.parse(`{${error.message}}`); + const sqlState = errorJSON.errors[0].SQLState; + if (sqlState !== TABLE_EXISTS_STATE) { + throw (error); + } + } + await connection.close(); + }); + + afterEach(async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + try { + await connection.query(`DELETE FROM ${process.env.DB_SCHEMA}.${process.env.DB_TABLE} WHERE 1=1`); + } catch (error) { + // console.log(error); + } finally { + await connection.close(); + } + }); + + after(async () => { + const connection = new Connection(`${process.env.CONNECTION_STRING}`); + await connection.query(`DROP TABLE ${process.env.DB_SCHEMA}.${process.env.DB_TABLE}`); + await connection.close(); + }); + + require('./queries/test.js'); + require('./connection/test.js'); + require('./statement/test.js'); + require('./pool/test.js'); +}); diff --git a/testenv.md b/testenv.md new file mode 100644 index 0000000..2d6ebed --- /dev/null +++ b/testenv.md @@ -0,0 +1,7 @@ +The following environment are required for tests to run on your system: + +CONNECTION_STRING=DSN=;UID=;PWD=;SCHEMA= +DB_SCHEMA= +DB_TABLE=ODBCTESTS +DB_STOREDPROCEDURE= +DB_USERNAME= \ No newline at end of file