Started on node.js+socket.io+mongoDB on the backend for more responsivnes

This commit is contained in:
KasperRT
2015-04-09 00:18:13 +02:00
parent 076f8e821f
commit a8a705bd77
1889 changed files with 322175 additions and 68 deletions

10
server/node_modules/mongodb/.travis.yml generated vendored Executable file
View File

@@ -0,0 +1,10 @@
language: node_js
node_js:
- 0.10
- 0.12
sudo: false
env:
- MONGODB_VERSION=2.2.x
- MONGODB_VERSION=2.4.x
- MONGODB_VERSION=2.6.x
- MONGODB_VERSION=3.0.x

1105
server/node_modules/mongodb/HISTORY.md generated vendored Executable file

File diff suppressed because it is too large Load Diff

201
server/node_modules/mongodb/LICENSE generated vendored Executable file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

16
server/node_modules/mongodb/Makefile generated vendored Executable file
View File

@@ -0,0 +1,16 @@
NODE = node
NPM = npm
JSDOC = jsdoc
name = all
generate_docs:
# cp -R ./HISTORY.md ./docs/content/meta/release-notes.md
cp -R ./docs/history-header.md ./docs/content/meta/release-notes.md
more ./HISTORY.md >> ./docs/content/meta/release-notes.md
pandoc docs/layouts/partials/welcome.md -o docs/layouts/partials/welcome.html
hugo -s docs/ -d ../public
$(JSDOC) -c conf.json -t docs/jsdoc-template/ -d ./public/api
cp -R ./public/api/scripts ./public/.
cp -R ./public/api/styles ./public/.
.PHONY: total

322
server/node_modules/mongodb/README.md generated vendored Executable file
View File

@@ -0,0 +1,322 @@
[![NPM](https://nodei.co/npm/mongodb.png?downloads=true&downloadRank=true)](https://nodei.co/npm/mongodb/) [![NPM](https://nodei.co/npm-dl/mongodb.png?months=6&height=3)](https://nodei.co/npm/mongodb/)
[![Build Status](https://secure.travis-ci.org/mongodb/node-mongodb-native.png)](http://travis-ci.org/mongodb/node-mongodb-native)
[![Gitter](https://badges.gitter.im/Join Chat.svg)](https://gitter.im/mongodb/node-mongodb-native?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
# Description
The MongoDB driver is the high level part of the 2.0 or higher MongoDB driver and is meant for end users.
## MongoDB Node.JS Driver
| what | where |
|---------------|------------------------------------------------|
| documentation | http://mongodb.github.io/node-mongodb-native/ |
| api-doc | http://mongodb.github.io/node-mongodb-native/ |
| source | https://github.com/mongodb/node-mongodb-native |
| mongodb | http://www.mongodb.org/ |
### Blogs of Engineers involved in the driver
- Christian Kvalheim [@christkv](https://twitter.com/christkv) <http://christiankvalheim.com>
### Bugs / Feature Requests
Think youve found a bug? Want to see a new feature in node-mongodb-native? Please open a
case in our issue management tool, JIRA:
- Create an account and login <https://jira.mongodb.org>.
- Navigate to the NODE project <https://jira.mongodb.org/browse/NODE>.
- Click **Create Issue** - Please provide as much information as possible about the issue type and how to reproduce it.
Bug reports in JIRA for all driver projects (i.e. NODE, PYTHON, CSHARP, JAVA) and the
Core Server (i.e. SERVER) project are **public**.
### Questions and Bug Reports
* mailing list: https://groups.google.com/forum/#!forum/node-mongodb-native
* jira: http://jira.mongodb.org/
### Change Log
http://jira.mongodb.org/browse/NODE
QuickStart
==========
The quick start guide will show you how to setup a simple application using node.js and MongoDB. It scope is only how to set up the driver and perform the simple crud operations. For more in depth coverage we encourage reading the tutorials.
Create the package.json file
----------------------------
Let's create a directory where our application will live. In our case we will put this under our projects directory.
```
mkdir myproject
cd myproject
```
Enter the following command and answer the questions to create the initial structure for your new project
```
npm init
```
Next we need to edit the generated package.json file to add the dependency for the MongoDB driver. The package.json file below is just an example and your will look different depending on how you answered the questions after entering `npm init`
```
{
"name": "myproject",
"version": "1.0.0",
"description": "My first project",
"main": "index.js",
"repository": {
"type": "git",
"url": "git://github.com/christkv/myfirstproject.git"
},
"dependencies": {
"mongodb": "~2.0"
},
"author": "Christian Kvalheim",
"license": "Apache 2.0",
"bugs": {
"url": "https://github.com/christkv/myfirstproject/issues"
},
"homepage": "https://github.com/christkv/myfirstproject"
}
```
Save the file and return to the shell or command prompt and use **NPM** to install all the dependencies.
```
npm install
```
You should see **NPM** download a lot of files. Once it's done you'll find all the downloaded packages under the **node_modules** directory.
Booting up a MongoDB Server
---------------------------
Let's boot up a MongoDB server instance. Download the right MongoDB version from [MongoDB](http://www.mongodb.org), open a new shell or command line and ensure the **mongod** command is in the shell or command line path. Now let's create a database directory (in our case under **/data**).
```
mongod --dbpath=/data --port 27017
```
You should see the **mongod** process start up and print some status information.
Connecting to MongoDB
---------------------
Let's create a new **app.js** file that we will use to show the basic CRUD operations using the MongoDB driver.
First let's add code to connect to the server and the database **myproject**.
```js
var MongoClient = require('mongodb').MongoClient
, assert = require('assert');
// Connection URL
var url = 'mongodb://localhost:27017/myproject';
// Use connect method to connect to the Server
MongoClient.connect(url, function(err, db) {
assert.equal(null, err);
console.log("Connected correctly to server");
db.close();
});
```
Given that you booted up the **mongod** process earlier the application should connect successfully and print **Connected correctly to server** to the console.
Let's Add some code to show the different CRUD operations available.
Inserting a Document
--------------------
Let's create a function that will insert some documents for us.
```js
var insertDocuments = function(db, callback) {
// Get the documents collection
var collection = db.collection('documents');
// Insert some documents
collection.insert([
{a : 1}, {a : 2}, {a : 3}
], function(err, result) {
assert.equal(err, null);
assert.equal(3, result.result.n);
assert.equal(3, result.ops.length);
console.log("Inserted 3 documents into the document collection");
callback(result);
});
}
```
The insert command will return a results object that contains several fields that might be useful.
* **result** Contains the result document from MongoDB
* **ops** Contains the documents inserted with added **_id** fields
* **connection** Contains the connection used to perform the insert
Let's add call the **insertDocuments** command to the **MongoClient.connect** method callback.
```js
var MongoClient = require('mongodb').MongoClient
, assert = require('assert');
// Connection URL
var url = 'mongodb://localhost:27017/myproject';
// Use connect method to connect to the Server
MongoClient.connect(url, function(err, db) {
assert.equal(null, err);
console.log("Connected correctly to server");
insertDocuments(db, function() {
db.close();
});
});
```
We can now run the update **app.js** file.
```
node app.js
```
You should see the following output after running the **app.js** file.
```
Connected correctly to server
Inserted 3 documents into the document collection
```
Updating a document
-------------------
Let's look at how to do a simple document update by adding a new field **b** to the document that has the field **a** set to **2**.
```js
var updateDocument = function(db, callback) {
// Get the documents collection
var collection = db.collection('documents');
// Update document where a is 2, set b equal to 1
collection.update({ a : 2 }
, { $set: { b : 1 } }, function(err, result) {
assert.equal(err, null);
assert.equal(1, result.result.n);
console.log("Updated the document with the field a equal to 2");
callback(result);
});
}
```
The method will update the first document where the field **a** is equal to **2** by adding a new field **b** to the document set to **1**. Let's update the callback function from **MongoClient.connect** to include the update method.
```js
var MongoClient = require('mongodb').MongoClient
, assert = require('assert');
// Connection URL
var url = 'mongodb://localhost:27017/myproject';
// Use connect method to connect to the Server
MongoClient.connect(url, function(err, db) {
assert.equal(null, err);
console.log("Connected correctly to server");
insertDocuments(db, function() {
updateDocument(db, function() {
db.close();
});
});
});
```
Remove a document
-----------------
Next lets remove the document where the field **a** equals to **3**.
```js
var removeDocument = function(db, callback) {
// Get the documents collection
var collection = db.collection('documents');
// Insert some documents
collection.remove({ a : 3 }, function(err, result) {
assert.equal(err, null);
assert.equal(1, result.result.n);
console.log("Removed the document with the field a equal to 3");
callback(result);
});
}
```
This will remove the first document where the field **a** equals to **3**. Let's add the method to the **MongoClient.connect** callback function.
```js
var MongoClient = require('mongodb').MongoClient
, assert = require('assert');
// Connection URL
var url = 'mongodb://localhost:27017/myproject';
// Use connect method to connect to the Server
MongoClient.connect(url, function(err, db) {
assert.equal(null, err);
console.log("Connected correctly to server");
insertDocuments(db, function() {
updateDocument(db, function() {
removeDocument(db, function() {
db.close();
});
});
});
});
```
Finally let's retrieve all the documents using a simple find.
Find All Documents
------------------
We will finish up the Quickstart CRUD methods by performing a simple query that returns all the documents matching the query.
```js
var findDocuments = function(db, callback) {
// Get the documents collection
var collection = db.collection('documents');
// Find some documents
collection.find({}).toArray(function(err, docs) {
assert.equal(err, null);
assert.equal(2, docs.length);
console.log("Found the following records");
console.dir(docs);
callback(docs);
});
}
```
This query will return all the documents in the **documents** collection. Since we removed a document the total documents returned is **2**. Finally let's add the findDocument method to the **MongoClient.connect** callback.
```js
var MongoClient = require('mongodb').MongoClient
, assert = require('assert');
// Connection URL
var url = 'mongodb://localhost:27017/myproject';
// Use connect method to connect to the Server
MongoClient.connect(url, function(err, db) {
assert.equal(null, err);
console.log("Connected correctly to server");
insertDocuments(db, function() {
updateDocument(db, function() {
removeDocument(db, function() {
findDocuments(db, function() {
db.close();
});
});
});
});
});
```
This concludes the QuickStart of connecting and performing some Basic operations using the MongoDB Node.js driver. For more detailed information you can look at the tutorials covering more specific topics of interest.
## Next Steps
* [MongoDB Documentation](http://mongodb.org/)
* [Read about Schemas](http://learnmongodbthehardway.com/)
* [Star us on GitHub](https://github.com/mongodb/node-mongodb-native)

65
server/node_modules/mongodb/conf.json generated vendored Executable file
View File

@@ -0,0 +1,65 @@
{
"plugins": ["plugins/markdown", "docs/lib/jsdoc/examples_plugin.js"],
"source": {
"include": [
"test/functional/operation_example_tests.js",
"lib/admin.js",
"lib/aggregation_cursor.js",
"lib/command_cursor.js",
"lib/collection.js",
"lib/cursor.js",
"lib/db.js",
"lib/mongo_client.js",
"lib/mongos.js",
"lib/read_preference.js",
"lib/replset.js",
"lib/server.js",
"lib/bulk/common.js",
"lib/bulk/ordered.js",
"lib/bulk/unordered.js",
"lib/gridfs/grid_store.js",
"node_modules/mongodb-core/lib/connection/logger.js",
"node_modules/bson/lib/bson/binary.js",
"node_modules/bson/lib/bson/code.js",
"node_modules/bson/lib/bson/db_ref.js",
"node_modules/bson/lib/bson/double.js",
"node_modules/bson/lib/bson/long.js",
"node_modules/bson/lib/bson/objectid.js",
"node_modules/bson/lib/bson/symbol.js",
"node_modules/bson/lib/bson/timestamp.js",
"node_modules/bson/lib/bson/max_key.js",
"node_modules/bson/lib/bson/min_key.js"
]
},
"templates": {
"cleverLinks": true,
"monospaceLinks": true,
"default": {
"outputSourceFiles" : true
},
"applicationName": "Node.js MongoDB Driver API",
"disqus": true,
"googleAnalytics": "UA-29229787-1",
"openGraph": {
"title": "",
"type": "website",
"image": "",
"site_name": "",
"url": ""
},
"meta": {
"title": "",
"description": "",
"keyword": ""
},
"linenums": true
},
"markdown": {
"parser": "gfm",
"hardwrap": true,
"tags": ["examples"]
},
"examples": {
"indent": 4
}
}

39
server/node_modules/mongodb/index.js generated vendored Executable file
View File

@@ -0,0 +1,39 @@
// Core module
var core = require('mongodb-core');
// Set up the connect function
var connect = require('./lib/mongo_client').connect;
// Expose error class
connect.MongoError = core.MongoError;
// Actual driver classes exported
connect.MongoClient = require('./lib/mongo_client');
connect.Db = require('./lib/db');
connect.Collection = require('./lib/collection');
connect.Server = require('./lib/server');
connect.ReplSet = require('./lib/replset');
connect.Mongos = require('./lib/mongos');
connect.ReadPreference = require('./lib/read_preference');
connect.GridStore = require('./lib/gridfs/grid_store');
connect.Chunk = require('./lib/gridfs/chunk');
connect.Logger = core.Logger;
connect.Cursor = require('./lib/cursor');
// BSON types exported
connect.Binary = core.BSON.Binary;
connect.Code = core.BSON.Code;
connect.DBRef = core.BSON.DBRef;
connect.Double = core.BSON.Double;
connect.Long = core.BSON.Long;
connect.MinKey = core.BSON.MinKey;
connect.MaxKey = core.BSON.MaxKey;
connect.ObjectID = core.BSON.ObjectID;
connect.ObjectId = core.BSON.ObjectID;
connect.Symbol = core.BSON.Symbol;
connect.Timestamp = core.BSON.Timestamp;
// Add connect method
connect.connect = connect;
// Set our exports to be the connect function
module.exports = connect;

347
server/node_modules/mongodb/lib/admin.js generated vendored Executable file
View File

@@ -0,0 +1,347 @@
"use strict";
var toError = require('./utils').toError;
/**
* @fileOverview The **Admin** class is an internal class that allows convenient access to
* the admin functionality and commands for MongoDB.
*
* **ADMIN Cannot directly be instantiated**
* @example
* var MongoClient = require('mongodb').MongoClient,
* test = require('assert');
* // Connection url
* var url = 'mongodb://localhost:27017/test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, db) {
* // Use the admin database for the operation
* var adminDb = db.admin();
*
* // List all the available databases
* adminDb.listDatabases(function(err, dbs) {
* test.equal(null, err);
* test.ok(dbs.databases.length > 0);
* db.close();
* });
* });
*/
/**
* Create a new Admin instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @return {Admin} a collection instance.
*/
var Admin = function(db, topology) {
if(!(this instanceof Admin)) return new Admin(db, topology);
var self = this;
// Internal state
this.s = {
db: db
, topology: topology
}
}
/**
* The callback format for results
* @callback Admin~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {object} result The result object if the command was executed successfully.
*/
/**
* Execute a command
* @method
* @param {object} command The command hash
* @param {object} [options=null] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {number} [options.maxTimeMS=null] Number of milliseconds to wait before aborting the query.
* @param {Admin~resultCallback} callback The command result callback
* @return {null}
*/
Admin.prototype.command = function(command, options, callback) {
var args = Array.prototype.slice.call(arguments, 1);
callback = args.pop();
options = args.length ? args.shift() : {};
// Execute a command
this.s.db.executeDbAdminCommand(command, options, function(err, doc) {
return callback != null ? callback(err, doc) : null;
});
}
/**
* Retrieve the server information for the current
* instance of the db client
*
* @param {Admin~resultCallback} callback The command result callback
* @return {null}
*/
Admin.prototype.buildInfo = function(callback) {
this.serverInfo(callback);
}
/**
* Retrieve the server information for the current
* instance of the db client
*
* @param {Admin~resultCallback} callback The command result callback
* @return {null}
*/
Admin.prototype.serverInfo = function(callback) {
this.s.db.executeDbAdminCommand({buildinfo:1}, function(err, doc) {
if(err != null) return callback(err, null);
return callback(null, doc);
});
}
/**
* Retrieve this db's server status.
*
* @param {Admin~resultCallback} callback The command result callback
* @return {null}
*/
Admin.prototype.serverStatus = function(callback) {
var self = this;
this.s.db.executeDbAdminCommand({serverStatus: 1}, function(err, doc) {
if(err == null && doc.ok === 1) {
callback(null, doc);
} else {
if(err) return callback(err, false);
return callback(toError(doc), false);
}
});
};
/**
* Retrieve the current profiling Level for MongoDB
*
* @param {Admin~resultCallback} callback The command result callback
* @return {null}
*/
Admin.prototype.profilingLevel = function(callback) {
var self = this;
this.s.db.executeDbAdminCommand({profile:-1}, function(err, doc) {
doc = doc;
if(err == null && doc.ok === 1) {
var was = doc.was;
if(was == 0) return callback(null, "off");
if(was == 1) return callback(null, "slow_only");
if(was == 2) return callback(null, "all");
return callback(new Error("Error: illegal profiling level value " + was), null);
} else {
err != null ? callback(err, null) : callback(new Error("Error with profile command"), null);
}
});
};
/**
* Ping the MongoDB server and retrieve results
*
* @param {Admin~resultCallback} callback The command result callback
* @return {null}
*/
Admin.prototype.ping = function(options, callback) {
var args = Array.prototype.slice.call(arguments, 0);
this.s.db.executeDbAdminCommand({ping: 1}, args.pop());
}
/**
* Authenticate a user against the server.
* @method
* @param {string} username The username.
* @param {string} [password] The password.
* @param {Admin~resultCallback} callback The command result callback
* @return {null}
*/
Admin.prototype.authenticate = function(username, password, callback) {
this.s.db.authenticate(username, password, {authdb: 'admin'}, function(err, doc) {
return callback(err, doc);
})
}
/**
* Logout user from server, fire off on all connections and remove all auth info
* @method
* @param {Admin~resultCallback} callback The command result callback
* @return {null}
*/
Admin.prototype.logout = function(callback) {
this.s.db.logout({authdb: 'admin'}, function(err, doc) {
return callback(err, doc);
})
}
/**
* Add a user to the database.
* @method
* @param {string} username The username.
* @param {string} password The password.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {object} [options.customData=null] Custom data associated with the user (only Mongodb 2.6 or higher)
* @param {object[]} [options.roles=null] Roles associated with the created user (only Mongodb 2.6 or higher)
* @param {Admin~resultCallback} callback The command result callback
* @return {null}
*/
Admin.prototype.addUser = function(username, password, options, callback) {
var args = Array.prototype.slice.call(arguments, 2);
callback = args.pop();
options = args.length ? args.shift() : {};
// Set the db name to admin
options.dbName = 'admin';
// Add user
this.s.db.addUser(username, password, options, function(err, doc) {
return callback(err, doc);
})
}
/**
* Remove a user from a database
* @method
* @param {string} username The username.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {Admin~resultCallback} callback The command result callback
* @return {null}
*/
Admin.prototype.removeUser = function(username, options, callback) {
var self = this;
var args = Array.prototype.slice.call(arguments, 1);
callback = args.pop();
options = args.length ? args.shift() : {};
options.dbName = 'admin';
this.s.db.removeUser(username, options, function(err, doc) {
return callback(err, doc);
})
}
/**
* Set the current profiling level of MongoDB
*
* @param {string} level The new profiling level (off, slow_only, all).
* @param {Admin~resultCallback} callback The command result callback.
* @return {null}
*/
Admin.prototype.setProfilingLevel = function(level, callback) {
var self = this;
var command = {};
var profile = 0;
if(level == "off") {
profile = 0;
} else if(level == "slow_only") {
profile = 1;
} else if(level == "all") {
profile = 2;
} else {
return callback(new Error("Error: illegal profiling level value " + level));
}
// Set up the profile number
command['profile'] = profile;
this.s.db.executeDbAdminCommand(command, function(err, doc) {
doc = doc;
if(err == null && doc.ok === 1)
return callback(null, level);
return err != null ? callback(err, null) : callback(new Error("Error with profile command"), null);
});
};
/**
* Retrive the current profiling information for MongoDB
*
* @param {Admin~resultCallback} callback The command result callback.
* @return {null}
*/
Admin.prototype.profilingInfo = function(callback) {
try {
this.s.topology.cursor("admin.system.profile", { find: 'system.profile', query: {}}, {}).toArray(callback);
} catch (err) {
return callback(err, null);
}
};
/**
* Validate an existing collection
*
* @param {string} collectionName The name of the collection to validate.
* @param {object} [options=null] Optional settings.
* @param {Admin~resultCallback} callback The command result callback.
* @return {null}
*/
Admin.prototype.validateCollection = function(collectionName, options, callback) {
var args = Array.prototype.slice.call(arguments, 1);
callback = args.pop();
options = args.length ? args.shift() : {};
var self = this;
var command = {validate: collectionName};
var keys = Object.keys(options);
// Decorate command with extra options
for(var i = 0; i < keys.length; i++) {
if(options.hasOwnProperty(keys[i])) {
command[keys[i]] = options[keys[i]];
}
}
this.s.db.command(command, function(err, doc) {
if(err != null) return callback(err, null);
if(doc.ok === 0)
return callback(new Error("Error with validate command"), null);
if(doc.result != null && doc.result.constructor != String)
return callback(new Error("Error with validation data"), null);
if(doc.result != null && doc.result.match(/exception|corrupt/) != null)
return callback(new Error("Error: invalid collection " + collectionName), null);
if(doc.valid != null && !doc.valid)
return callback(new Error("Error: invalid collection " + collectionName), null);
return callback(null, doc);
});
};
/**
* List the available databases
*
* @param {Admin~resultCallback} callback The command result callback.
* @return {null}
*/
Admin.prototype.listDatabases = function(callback) {
this.s.db.executeDbAdminCommand({listDatabases:1}, {}, function(err, doc) {
if(err != null) return callback(err, null);
return callback(null, doc);
});
}
/**
* Get ReplicaSet status
*
* @param {Admin~resultCallback} callback The command result callback.
* @return {null}
*/
Admin.prototype.replSetGetStatus = function(callback) {
var self = this;
this.s.db.executeDbAdminCommand({replSetGetStatus:1}, function(err, doc) {
if(err == null && doc.ok === 1)
return callback(null, doc);
if(err) return callback(err, false);
return callback(toError(doc), false);
});
};
module.exports = Admin;

397
server/node_modules/mongodb/lib/aggregation_cursor.js generated vendored Executable file
View File

@@ -0,0 +1,397 @@
"use strict";
var inherits = require('util').inherits
, f = require('util').format
, toError = require('./utils').toError
, getSingleProperty = require('./utils').getSingleProperty
, formattedOrderClause = require('./utils').formattedOrderClause
, handleCallback = require('./utils').handleCallback
, Logger = require('mongodb-core').Logger
, EventEmitter = require('events').EventEmitter
, ReadPreference = require('./read_preference')
, MongoError = require('mongodb-core').MongoError
, Readable = require('stream').Readable || require('readable-stream').Readable
// , CoreCursor = require('mongodb-core').Cursor
, CoreCursor = require('./cursor')
, Query = require('mongodb-core').Query
, CoreReadPreference = require('mongodb-core').ReadPreference;
/**
* @fileOverview The **AggregationCursor** class is an internal class that embodies an aggregation cursor on MongoDB
* allowing for iteration over the results returned from the underlying query. It supports
* one by one document iteration, conversion to an array or can be iterated as a Node 0.10.X
* or higher stream
*
* **AGGREGATIONCURSOR Cannot directly be instantiated**
* @example
* var MongoClient = require('mongodb').MongoClient,
* test = require('assert');
* // Connection url
* var url = 'mongodb://localhost:27017/test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, db) {
* // Create a collection we want to drop later
* var col = db.collection('createIndexExample1');
* // Insert a bunch of documents
* col.insert([{a:1, b:1}
* , {a:2, b:2}, {a:3, b:3}
* , {a:4, b:4}], {w:1}, function(err, result) {
* test.equal(null, err);
*
* // Show that duplicate records got dropped
* col.aggregation({}, {cursor: {}}).toArray(function(err, items) {
* test.equal(null, err);
* test.equal(4, items.length);
* db.close();
* });
* });
* });
*/
/**
* Namespace provided by the browser.
* @external Readable
*/
/**
* Creates a new Aggregation Cursor instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @extends external:Readable
* @fires AggregationCursor#data
* @fires AggregationCursor#end
* @fires AggregationCursor#close
* @fires AggregationCursor#readable
* @return {AggregationCursor} an AggregationCursor instance.
*/
var AggregationCursor = function(bson, ns, cmd, options, topology, topologyOptions) {
CoreCursor.apply(this, Array.prototype.slice.call(arguments, 0));
var self = this;
var state = AggregationCursor.INIT;
var streamOptions = {};
// MaxTimeMS
var maxTimeMS = null;
// Set up
Readable.call(this, {objectMode: true});
// Internal state
this.s = {
// MaxTimeMS
maxTimeMS: maxTimeMS
// State
, state: state
// Stream options
, streamOptions: streamOptions
// BSON
, bson: bson
// Namespae
, ns: ns
// Command
, cmd: cmd
// Options
, options: options
// Topology
, topology: topology
// Topology Options
, topologyOptions: topologyOptions
}
}
/**
* AggregationCursor stream data event, fired for each document in the cursor.
*
* @event AggregationCursor#data
* @type {object}
*/
/**
* AggregationCursor stream end event
*
* @event AggregationCursor#end
* @type {null}
*/
/**
* AggregationCursor stream close event
*
* @event AggregationCursor#close
* @type {null}
*/
/**
* AggregationCursor stream readable event
*
* @event AggregationCursor#readable
* @type {null}
*/
// // Extend the Cursor
// inherits(AggregationCursor, CoreCursor);
// Inherit from Readable
inherits(AggregationCursor, Readable);
// Extend the Cursor
for(var name in CoreCursor.prototype) {
AggregationCursor.prototype[name] = CoreCursor.prototype[name];
}
/**
* Set the batch size for the cursor.
* @method
* @param {number} value The batchSize for the cursor.
* @throws {MongoError}
* @return {AggregationCursor}
*/
AggregationCursor.prototype.batchSize = function(value) {
if(this.s.state == AggregationCursor.CLOSED || this.isDead()) throw new MongoError("Cursor is closed");
if(typeof value != 'number') throw new MongoError("batchSize requires an integer");
if(this.s.cmd.cursor) this.s.cmd.cursor.batchSize = value;
this.setCursorBatchSize(value);
return this;
}
/**
* Add a geoNear stage to the aggregation pipeline
* @method
* @param {object} document The geoNear stage document.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.geoNear = function(document) {
this.s.cmd.pipeline.push({$geoNear: document});
return this;
}
/**
* Add a group stage to the aggregation pipeline
* @method
* @param {object} document The group stage document.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.group = function(document) {
this.s.cmd.pipeline.push({$group: document});
return this;
}
/**
* Add a limit stage to the aggregation pipeline
* @method
* @param {number} value The state limit value.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.limit = function(value) {
this.s.cmd.pipeline.push({$limit: value});
return this;
}
/**
* Add a match stage to the aggregation pipeline
* @method
* @param {object} document The match stage document.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.match = function(document) {
this.s.cmd.pipeline.push({$match: document});
return this;
}
/**
* Add a maxTimeMS stage to the aggregation pipeline
* @method
* @param {number} value The state maxTimeMS value.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.maxTimeMS = function(value) {
if(this.s.topology.lastIsMaster().minWireVersion > 2) {
this.s.cmd.maxTimeMS = value;
}
return this;
}
/**
* Add a out stage to the aggregation pipeline
* @method
* @param {number} destination The destination name.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.out = function(destination) {
this.s.cmd.pipeline.push({$out: destination});
return this;
}
/**
* Add a project stage to the aggregation pipeline
* @method
* @param {object} document The project stage document.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.project = function(document) {
this.s.cmd.pipeline.push({$project: document});
return this;
}
/**
* Add a redact stage to the aggregation pipeline
* @method
* @param {object} document The redact stage document.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.redact = function(document) {
this.s.cmd.pipeline.push({$redact: document});
return this;
}
/**
* Add a skip stage to the aggregation pipeline
* @method
* @param {number} value The state skip value.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.skip = function(value) {
this.s.cmd.pipeline.push({$skip: value});
return this;
}
/**
* Add a sort stage to the aggregation pipeline
* @method
* @param {object} document The sort stage document.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.sort = function(document) {
this.s.cmd.pipeline.push({$sort: document});
return this;
}
/**
* Add a unwind stage to the aggregation pipeline
* @method
* @param {number} field The unwind field name.
* @return {AggregationCursor}
*/
AggregationCursor.prototype.unwind = function(field) {
this.s.cmd.pipeline.push({$unwind: field});
return this;
}
AggregationCursor.prototype.get = AggregationCursor.prototype.toArray;
/**
* Get the next available document from the cursor, returns null if no more documents are available.
* @function AggregationCursor.prototype.next
* @param {AggregationCursor~resultCallback} callback The result callback.
* @throws {MongoError}
* @return {null}
*/
/**
* Set the new batchSize of the cursor
* @function AggregationCursor.prototype.setBatchSize
* @param {number} value The new batchSize for the cursor
* @return {null}
*/
/**
* Get the batchSize of the cursor
* @function AggregationCursor.prototype.batchSize
* @param {number} value The current batchSize for the cursor
* @return {null}
*/
/**
* The callback format for results
* @callback AggregationCursor~toArrayResultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {object[]} documents All the documents the satisfy the cursor.
*/
/**
* Returns an array of documents. The caller is responsible for making sure that there
* is enough memory to store the results. Note that the array only contain partial
* results when this cursor had been previouly accessed. In that case,
* cursor.rewind() can be used to reset the cursor.
* @method AggregationCursor.prototype.toArray
* @param {AggregationCursor~toArrayResultCallback} callback The result callback.
* @throws {MongoError}
* @return {null}
*/
/**
* The callback format for results
* @callback AggregationCursor~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {(object|null)} result The result object if the command was executed successfully.
*/
/**
* Iterates over all the documents for this cursor. As with **{cursor.toArray}**,
* not all of the elements will be iterated if this cursor had been previouly accessed.
* In that case, **{cursor.rewind}** can be used to reset the cursor. However, unlike
* **{cursor.toArray}**, the cursor will only hold a maximum of batch size elements
* at any given time if batch size is specified. Otherwise, the caller is responsible
* for making sure that the entire result can fit the memory.
* @method AggregationCursor.prototype.each
* @param {AggregationCursor~resultCallback} callback The result callback.
* @throws {MongoError}
* @return {null}
*/
/**
* Close the cursor, sending a KillCursor command and emitting close.
* @method AggregationCursor.prototype.close
* @param {AggregationCursor~resultCallback} [callback] The result callback.
* @return {null}
*/
/**
* Is the cursor closed
* @method AggregationCursor.prototype.isClosed
* @return {boolean}
*/
/**
* Execute the explain for the cursor
* @method AggregationCursor.prototype.explain
* @param {AggregationCursor~resultCallback} [callback] The result callback.
* @return {null}
*/
/**
* Clone the cursor
* @function AggregationCursor.prototype.clone
* @return {AggregationCursor}
*/
/**
* Resets the cursor
* @function AggregationCursor.prototype.rewind
* @return {AggregationCursor}
*/
/**
* The callback format for the forEach iterator method
* @callback AggregationCursor~iteratorCallback
* @param {Object} doc An emitted document for the iterator
*/
/**
* The callback error format for the forEach iterator method
* @callback AggregationCursor~endCallback
* @param {MongoError} error An error instance representing the error during the execution.
*/
/*
* Iterates over all the documents for this cursor using the iterator, callback pattern.
* @method AggregationCursor.prototype.forEach
* @param {AggregationCursor~iteratorCallback} iterator The iteration callback.
* @param {AggregationCursor~endCallback} callback The end callback.
* @throws {MongoError}
* @return {null}
*/
AggregationCursor.INIT = 0;
AggregationCursor.OPEN = 1;
AggregationCursor.CLOSED = 2;
module.exports = AggregationCursor;

393
server/node_modules/mongodb/lib/bulk/common.js generated vendored Executable file
View File

@@ -0,0 +1,393 @@
"use strict";
var utils = require('../utils');
// Error codes
var UNKNOWN_ERROR = 8;
var INVALID_BSON_ERROR = 22;
var WRITE_CONCERN_ERROR = 64;
var MULTIPLE_ERROR = 65;
// Insert types
var INSERT = 1;
var UPDATE = 2;
var REMOVE = 3
// Get write concern
var writeConcern = function(target, col, options) {
if(options.w != null || options.j != null || options.fsync != null) {
target.writeConcern = options;
} else if(col.writeConcern.w != null || col.writeConcern.j != null || col.writeConcern.fsync != null) {
target.writeConcern = col.writeConcern;
}
return target
}
/**
* Helper function to define properties
* @ignore
*/
var defineReadOnlyProperty = function(self, name, value) {
Object.defineProperty(self, name, {
enumerable: true
, get: function() {
return value;
}
});
}
/**
* Keeps the state of a unordered batch so we can rewrite the results
* correctly after command execution
* @ignore
*/
var Batch = function(batchType, originalZeroIndex) {
this.originalZeroIndex = originalZeroIndex;
this.currentIndex = 0;
this.originalIndexes = [];
this.batchType = batchType;
this.operations = [];
this.size = 0;
this.sizeBytes = 0;
}
/**
* Wraps a legacy operation so we can correctly rewrite it's error
* @ignore
*/
var LegacyOp = function(batchType, operation, index) {
this.batchType = batchType;
this.index = index;
this.operation = operation;
}
/**
* Create a new BulkWriteResult instance (INTERNAL TYPE, do not instantiate directly)
*
* @class
* @property {boolean} ok Did bulk operation correctly execute
* @property {number} nInserted number of inserted documents
* @property {number} nUpdated number of documents updated logically
* @property {number} nUpserted Number of upserted documents
* @property {number} nModified Number of documents updated physically on disk
* @property {number} nRemoved Number of removed documents
* @return {BulkWriteResult} a BulkWriteResult instance
*/
var BulkWriteResult = function(bulkResult) {
defineReadOnlyProperty(this, "ok", bulkResult.ok);
defineReadOnlyProperty(this, "nInserted", bulkResult.nInserted);
defineReadOnlyProperty(this, "nUpserted", bulkResult.nUpserted);
defineReadOnlyProperty(this, "nMatched", bulkResult.nMatched);
defineReadOnlyProperty(this, "nModified", bulkResult.nModified);
defineReadOnlyProperty(this, "nRemoved", bulkResult.nRemoved);
/**
* Return an array of inserted ids
*
* @return {object[]}
*/
this.getInsertedIds = function() {
return bulkResult.insertedIds;
}
/**
* Return an array of upserted ids
*
* @return {object[]}
*/
this.getUpsertedIds = function() {
return bulkResult.upserted;
}
/**
* Return the upserted id at position x
*
* @param {number} index the number of the upserted id to return, returns undefined if no result for passed in index
* @return {object}
*/
this.getUpsertedIdAt = function(index) {
return bulkResult.upserted[index];
}
/**
* Return raw internal result
*
* @return {object}
*/
this.getRawResponse = function() {
return bulkResult;
}
/**
* Returns true if the bulk operation contains a write error
*
* @return {boolean}
*/
this.hasWriteErrors = function() {
return bulkResult.writeErrors.length > 0;
}
/**
* Returns the number of write errors off the bulk operation
*
* @return {number}
*/
this.getWriteErrorCount = function() {
return bulkResult.writeErrors.length;
}
/**
* Returns a specific write error object
*
* @return {WriteError}
*/
this.getWriteErrorAt = function(index) {
if(index < bulkResult.writeErrors.length) {
return bulkResult.writeErrors[index];
}
return null;
}
/**
* Retrieve all write errors
*
* @return {object[]}
*/
this.getWriteErrors = function() {
return bulkResult.writeErrors;
}
/**
* Retrieve lastOp if available
*
* @return {object}
*/
this.getLastOp = function() {
return bulkResult.lastOp;
}
/**
* Retrieve the write concern error if any
*
* @return {WriteConcernError}
*/
this.getWriteConcernError = function() {
if(bulkResult.writeConcernErrors.length == 0) {
return null;
} else if(bulkResult.writeConcernErrors.length == 1) {
// Return the error
return bulkResult.writeConcernErrors[0];
} else {
// Combine the errors
var errmsg = "";
for(var i = 0; i < bulkResult.writeConcernErrors.length; i++) {
var err = bulkResult.writeConcernErrors[i];
errmsg = errmsg + err.errmsg;
// TODO: Something better
if(i == 0) errmsg = errmsg + " and ";
}
return new WriteConcernError({ errmsg : errmsg, code : WRITE_CONCERN_ERROR });
}
}
this.toJSON = function() {
return bulkResult;
}
this.toString = function() {
return "BulkWriteResult(" + this.toJSON(bulkResult) + ")";
}
this.isOk = function() {
return bulkResult.ok == 1;
}
}
/**
* Create a new WriteConcernError instance (INTERNAL TYPE, do not instantiate directly)
*
* @class
* @property {number} code Write concern error code.
* @property {string} errmsg Write concern error message.
* @return {WriteConcernError} a WriteConcernError instance
*/
var WriteConcernError = function(err) {
if(!(this instanceof WriteConcernError)) return new WriteConcernError(err);
// Define properties
defineReadOnlyProperty(this, "code", err.code);
defineReadOnlyProperty(this, "errmsg", err.errmsg);
this.toJSON = function() {
return {code: err.code, errmsg: err.errmsg};
}
this.toString = function() {
return "WriteConcernError(" + err.errmsg + ")";
}
}
/**
* Create a new WriteError instance (INTERNAL TYPE, do not instantiate directly)
*
* @class
* @property {number} code Write concern error code.
* @property {number} index Write concern error original bulk operation index.
* @property {string} errmsg Write concern error message.
* @return {WriteConcernError} a WriteConcernError instance
*/
var WriteError = function(err) {
if(!(this instanceof WriteError)) return new WriteError(err);
// Define properties
defineReadOnlyProperty(this, "code", err.code);
defineReadOnlyProperty(this, "index", err.index);
defineReadOnlyProperty(this, "errmsg", err.errmsg);
//
// Define access methods
this.getOperation = function() {
return err.op;
}
this.toJSON = function() {
return {code: err.code, index: err.index, errmsg: err.errmsg, op: err.op};
}
this.toString = function() {
return "WriteError(" + JSON.stringify(this.toJSON()) + ")";
}
}
/**
* Merges results into shared data structure
* @ignore
*/
var mergeBatchResults = function(ordered, batch, bulkResult, err, result) {
// If we have an error set the result to be the err object
if(err) {
result = err;
} else if(result && result.result) {
result = result.result;
} else if(result == null) {
return;
}
// Do we have a top level error stop processing and return
if(result.ok == 0 && bulkResult.ok == 1) {
bulkResult.ok = 0;
// bulkResult.error = utils.toError(result);
var writeError = {
index: 0
, code: result.code || 0
, errmsg: result.message
, op: batch.operations[0]
};
bulkResult.writeErrors.push(new WriteError(writeError));
return;
} else if(result.ok == 0 && bulkResult.ok == 0) {
return;
}
// Add lastop if available
if(result.lastOp) {
bulkResult.lastOp = result.lastOp;
}
// If we have an insert Batch type
if(batch.batchType == INSERT && result.n) {
bulkResult.nInserted = bulkResult.nInserted + result.n;
}
// If we have an insert Batch type
if(batch.batchType == REMOVE && result.n) {
bulkResult.nRemoved = bulkResult.nRemoved + result.n;
}
var nUpserted = 0;
// We have an array of upserted values, we need to rewrite the indexes
if(Array.isArray(result.upserted)) {
nUpserted = result.upserted.length;
for(var i = 0; i < result.upserted.length; i++) {
bulkResult.upserted.push({
index: result.upserted[i].index + batch.originalZeroIndex
, _id: result.upserted[i]._id
});
}
} else if(result.upserted) {
nUpserted = 1;
bulkResult.upserted.push({
index: batch.originalZeroIndex
, _id: result.upserted
});
}
// If we have an update Batch type
if(batch.batchType == UPDATE && result.n) {
var nModified = result.nModified;
bulkResult.nUpserted = bulkResult.nUpserted + nUpserted;
bulkResult.nMatched = bulkResult.nMatched + (result.n - nUpserted);
if(typeof nModified == 'number') {
bulkResult.nModified = bulkResult.nModified + nModified;
} else {
bulkResult.nModified = null;
}
}
if(Array.isArray(result.writeErrors)) {
for(var i = 0; i < result.writeErrors.length; i++) {
var writeError = {
index: batch.originalZeroIndex + result.writeErrors[i].index
, code: result.writeErrors[i].code
, errmsg: result.writeErrors[i].errmsg
, op: batch.operations[result.writeErrors[i].index]
};
bulkResult.writeErrors.push(new WriteError(writeError));
}
}
if(result.writeConcernError) {
bulkResult.writeConcernErrors.push(new WriteConcernError(result.writeConcernError));
}
}
//
// Clone the options
var cloneOptions = function(options) {
var clone = {};
var keys = Object.keys(options);
for(var i = 0; i < keys.length; i++) {
clone[keys[i]] = options[keys[i]];
}
return clone;
}
// Exports symbols
exports.BulkWriteResult = BulkWriteResult;
exports.WriteError = WriteError;
exports.Batch = Batch;
exports.LegacyOp = LegacyOp;
exports.mergeBatchResults = mergeBatchResults;
exports.cloneOptions = cloneOptions;
exports.writeConcern = writeConcern;
exports.INVALID_BSON_ERROR = INVALID_BSON_ERROR;
exports.WRITE_CONCERN_ERROR = WRITE_CONCERN_ERROR;
exports.MULTIPLE_ERROR = MULTIPLE_ERROR;
exports.UNKNOWN_ERROR = UNKNOWN_ERROR;
exports.INSERT = INSERT;
exports.UPDATE = UPDATE;
exports.REMOVE = REMOVE;

470
server/node_modules/mongodb/lib/bulk/ordered.js generated vendored Executable file
View File

@@ -0,0 +1,470 @@
"use strict";
var common = require('./common')
, utils = require('../utils')
, toError = require('../utils').toError
, f = require('util').format
, shallowClone = utils.shallowClone
, WriteError = common.WriteError
, BulkWriteResult = common.BulkWriteResult
, LegacyOp = common.LegacyOp
, ObjectID = require('mongodb-core').BSON.ObjectID
, Batch = common.Batch
, mergeBatchResults = common.mergeBatchResults;
/**
* Create a FindOperatorsOrdered instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @return {FindOperatorsOrdered} a FindOperatorsOrdered instance.
*/
var FindOperatorsOrdered = function(self) {
this.s = self.s;
}
/**
* Add a single update document to the bulk operation
*
* @method
* @param {object} doc update operations
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.update = function(updateDocument) {
// Perform upsert
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
// Establish the update command
var document = {
q: this.s.currentOp.selector
, u: updateDocument
, multi: true
, upsert: upsert
}
// Clear out current Op
this.s.currentOp = null;
// Add the update document to the list
return addToOperationsList(this, common.UPDATE, document);
}
/**
* Add a single update one document to the bulk operation
*
* @method
* @param {object} doc update operations
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.updateOne = function(updateDocument) {
// Perform upsert
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
// Establish the update command
var document = {
q: this.s.currentOp.selector
, u: updateDocument
, multi: false
, upsert: upsert
}
// Clear out current Op
this.s.currentOp = null;
// Add the update document to the list
return addToOperationsList(this, common.UPDATE, document);
}
/**
* Add a replace one operation to the bulk operation
*
* @method
* @param {object} doc the new document to replace the existing one with
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.replaceOne = function(updateDocument) {
this.updateOne(updateDocument);
}
/**
* Upsert modifier for update bulk operation
*
* @method
* @throws {MongoError}
* @return {FindOperatorsOrdered}
*/
FindOperatorsOrdered.prototype.upsert = function() {
this.s.currentOp.upsert = true;
return this;
}
/**
* Add a remove one operation to the bulk operation
*
* @method
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.deleteOne = function() {
// Establish the update command
var document = {
q: this.s.currentOp.selector
, limit: 1
}
// Clear out current Op
this.s.currentOp = null;
// Add the remove document to the list
return addToOperationsList(this, common.REMOVE, document);
}
// Backward compatibility
FindOperatorsOrdered.prototype.removeOne = FindOperatorsOrdered.prototype.deleteOne;
/**
* Add a remove operation to the bulk operation
*
* @method
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.delete = function() {
// Establish the update command
var document = {
q: this.s.currentOp.selector
, limit: 0
}
// Clear out current Op
this.s.currentOp = null;
// Add the remove document to the list
return addToOperationsList(this, common.REMOVE, document);
}
// Backward compatibility
FindOperatorsOrdered.prototype.remove = FindOperatorsOrdered.prototype.delete;
// Add to internal list of documents
var addToOperationsList = function(_self, docType, document) {
// Get the bsonSize
var bsonSize = _self.s.bson.calculateObjectSize(document, false);
// Throw error if the doc is bigger than the max BSON size
if(bsonSize >= _self.s.maxBatchSizeBytes) throw toError("document is larger than the maximum size " + _self.s.maxBatchSizeBytes);
// Create a new batch object if we don't have a current one
if(_self.s.currentBatch == null) _self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
// Check if we need to create a new batch
if(((_self.s.currentBatchSize + 1) >= _self.s.maxWriteBatchSize)
|| ((_self.s.currentBatchSizeBytes + _self.s.currentBatchSizeBytes) >= _self.s.maxBatchSizeBytes)
|| (_self.s.currentBatch.batchType != docType)) {
// Save the batch to the execution stack
_self.s.batches.push(_self.s.currentBatch);
// Create a new batch
_self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
// Reset the current size trackers
_self.s.currentBatchSize = 0;
_self.s.currentBatchSizeBytes = 0;
} else {
// Update current batch size
_self.s.currentBatchSize = _self.s.currentBatchSize + 1;
_self.s.currentBatchSizeBytes = _self.s.currentBatchSizeBytes + bsonSize;
}
if(docType == common.INSERT) {
_self.s.bulkResult.insertedIds.push({index: _self.s.currentIndex, _id: document._id});
}
// We have an array of documents
if(Array.isArray(document)) {
throw toError("operation passed in cannot be an Array");
} else {
_self.s.currentBatch.originalIndexes.push(_self.s.currentIndex);
_self.s.currentBatch.operations.push(document)
_self.s.currentIndex = _self.s.currentIndex + 1;
}
// Return self
return _self;
}
/**
* Create a new OrderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @property {number} length Get the number of operations in the bulk.
* @return {OrderedBulkOperation} a OrderedBulkOperation instance.
*/
function OrderedBulkOperation(topology, collection, options) {
options = options == null ? {} : options;
// TODO Bring from driver information in isMaster
var self = this;
var executed = false;
// Current item
var currentOp = null;
// Handle to the bson serializer, used to calculate running sizes
var bson = topology.bson;
// Namespace for the operation
var namespace = collection.collectionName;
// Set max byte size
var maxBatchSizeBytes = topology.isMasterDoc.maxBsonObjectSize;
var maxWriteBatchSize = topology.isMasterDoc.maxWriteBatchSize || 1000;
// Get the capabilities
var capabilities = topology.capabilities();
// Get the write concern
var writeConcern = common.writeConcern(shallowClone(options), collection, options);
// Current batch
var currentBatch = null;
var currentIndex = 0;
var currentBatchSize = 0;
var currentBatchSizeBytes = 0;
var batches = [];
// Final results
var bulkResult = {
ok: 1
, writeErrors: []
, writeConcernErrors: []
, insertedIds: []
, nInserted: 0
, nUpserted: 0
, nMatched: 0
, nModified: 0
, nRemoved: 0
, upserted: []
};
// Internal state
this.s = {
// Final result
bulkResult: bulkResult
// Current batch state
, currentBatch: null
, currentIndex: 0
, currentBatchSize: 0
, currentBatchSizeBytes: 0
, batches: []
// Write concern
, writeConcern: writeConcern
// Capabilities
, capabilities: capabilities
// Max batch size options
, maxBatchSizeBytes: maxBatchSizeBytes
, maxWriteBatchSize: maxWriteBatchSize
// Namespace
, namespace: namespace
// BSON
, bson: bson
// Topology
, topology: topology
// Options
, options: options
// Current operation
, currentOp: currentOp
// Executed
, executed: executed
// Collection
, collection: collection
}
}
OrderedBulkOperation.prototype.raw = function(op) {
var key = Object.keys(op)[0];
// Update operations
if((op.updateOne && op.updateOne.q)
|| (op.updateMany && op.updateMany.q)
|| (op.replaceOne && op.replaceOne.q)) {
op[key].multi = op.updateOne || op.replaceOne ? false : true;
return addToOperationsList(this, common.UPDATE, op[key]);
}
// Crud spec update format
if(op.updateOne || op.updateMany || op.replaceOne) {
var multi = op.updateOne || op.replaceOne ? false : true;
var operation = {q: op[key].filter, u: op[key].update || op[key].replacement, multi: multi}
if(op[key].upsert) operation.upsert = true;
return addToOperationsList(this, common.UPDATE, operation);
}
// Remove operations
if(op.removeOne || op.removeMany || (op.deleteOne && op.deleteOne.q) || op.deleteMany && op.deleteMany.q) {
op[key].limit = op.removeOne ? 1 : 0;
return addToOperationsList(this, common.REMOVE, op[key]);
}
// Crud spec delete operations, less efficient
if(op.deleteOne || op.deleteMany) {
var limit = op.deleteOne ? 1 : 0;
var operation = {q: op[key].filter, limit: limit}
return addToOperationsList(this, common.REMOVE, operation);
}
// Insert operations
if(op.insertOne && op.insertOne.document == null) {
if(op.insertOne._id == null) op.insertOne._id = new ObjectID();
return addToOperationsList(this, common.INSERT, op.insertOne);
} else if(op.insertOne && op.insertOne.document) {
if(op.insertOne.document._id == null) op.insertOne.document._id = new ObjectID();
return addToOperationsList(this, common.INSERT, op.insertOne.document);
}
if(op.insertMany) {
for(var i = 0; i < op.insertMany.length; i++) {
if(op.insertMany[i]._id == null) op.insertMany[i]._id = new ObjectID();
addToOperationsList(this, common.INSERT, op.insertMany[i]);
}
return;
}
// No valid type of operation
throw toError("bulkWrite only supports insertOne, insertMany, updateOne, updateMany, removeOne, removeMany, deleteOne, deleteMany");
}
/**
* Add a single insert document to the bulk operation
*
* @param {object} doc the document to insert
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
OrderedBulkOperation.prototype.insert = function(document) {
if(document._id == null) document._id = new ObjectID();
return addToOperationsList(this, common.INSERT, document);
}
/**
* Initiate a find operation for an update/updateOne/remove/removeOne/replaceOne
*
* @method
* @param {object} selector The selector for the bulk operation.
* @throws {MongoError}
* @return {FindOperatorsOrdered}
*/
OrderedBulkOperation.prototype.find = function(selector) {
if (!selector) {
throw toError("Bulk find operation must specify a selector");
}
// Save a current selector
this.s.currentOp = {
selector: selector
}
return new FindOperatorsOrdered(this);
}
Object.defineProperty(OrderedBulkOperation.prototype, 'length', {
enumerable: true,
get: function() {
return this.s.currentIndex;
}
});
//
// Execute next write command in a chain
var executeCommands = function(self, callback) {
if(self.s.batches.length == 0) {
return callback(null, new BulkWriteResult(self.s.bulkResult));
}
// Ordered execution of the command
var batch = self.s.batches.shift();
var resultHandler = function(err, result) {
// If we have and error
if(err) err.ok = 0;
// Merge the results together
var mergeResult = mergeBatchResults(true, batch, self.s.bulkResult, err, result);
if(mergeResult != null) {
return callback(null, new BulkWriteResult(self.s.bulkResult));
}
// If we are ordered and have errors and they are
// not all replication errors terminate the operation
if(self.s.bulkResult.writeErrors.length > 0) {
return callback(self.s.bulkResult.writeErrors[0], new BulkWriteResult(self.s.bulkResult));
}
// Execute the next command in line
executeCommands(self, callback);
}
var finalOptions = {ordered: true}
if(self.s.writeConcern != null) {
finalOptions.writeConcern = self.s.writeConcern;
}
try {
if(batch.batchType == common.INSERT) {
self.s.topology.insert(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
} else if(batch.batchType == common.UPDATE) {
self.s.topology.update(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
} else if(batch.batchType == common.REMOVE) {
self.s.topology.remove(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
}
} catch(err) {
// Force top level error
err.ok = 0;
// Merge top level error and return
callback(null, mergeBatchResults(false, batch, self.s.bulkResult, err, null));
}
}
/**
* The callback format for results
* @callback OrderedBulkOperation~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {BulkWriteResult} result The bulk write result.
*/
/**
* Execute the ordered bulk operation
*
* @method
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {OrderedBulkOperation~resultCallback} callback The result callback
* @throws {MongoError}
* @return {null}
*/
OrderedBulkOperation.prototype.execute = function(_writeConcern, callback) {
if(this.s.executed) throw new toError("batch cannot be re-executed");
if(typeof _writeConcern == 'function') {
callback = _writeConcern;
} else {
this.s.writeConcern = _writeConcern;
}
// If we have current batch
if(this.s.currentBatch) this.s.batches.push(this.s.currentBatch);
// If we have no operations in the bulk raise an error
if(this.s.batches.length == 0) {
throw toError("Invalid Operation, No operations in bulk");
}
// Execute the commands
return executeCommands(this, callback);
}
/**
* Returns an unordered batch object
* @ignore
*/
var initializeOrderedBulkOp = function(topology, collection, options) {
return new OrderedBulkOperation(topology, collection, options);
}
module.exports = initializeOrderedBulkOp;

482
server/node_modules/mongodb/lib/bulk/unordered.js generated vendored Executable file
View File

@@ -0,0 +1,482 @@
"use strict";
var common = require('./common')
, utils = require('../utils')
, toError = require('../utils').toError
, f = require('util').format
, shallowClone = utils.shallowClone
, WriteError = common.WriteError
, BulkWriteResult = common.BulkWriteResult
, LegacyOp = common.LegacyOp
, ObjectID = require('mongodb-core').BSON.ObjectID
, Batch = common.Batch
, mergeBatchResults = common.mergeBatchResults;
/**
* Create a FindOperatorsUnordered instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @property {number} length Get the number of operations in the bulk.
* @return {FindOperatorsUnordered} a FindOperatorsUnordered instance.
*/
var FindOperatorsUnordered = function(self) {
this.s = self.s;
}
/**
* Add a single update document to the bulk operation
*
* @method
* @param {object} doc update operations
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.update = function(updateDocument) {
// Perform upsert
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
// Establish the update command
var document = {
q: this.s.currentOp.selector
, u: updateDocument
, multi: true
, upsert: upsert
}
// Clear out current Op
this.s.currentOp = null;
// Add the update document to the list
return addToOperationsList(this, common.UPDATE, document);
}
/**
* Add a single update one document to the bulk operation
*
* @method
* @param {object} doc update operations
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.updateOne = function(updateDocument) {
// Perform upsert
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
// Establish the update command
var document = {
q: this.s.currentOp.selector
, u: updateDocument
, multi: false
, upsert: upsert
}
// Clear out current Op
this.s.currentOp = null;
// Add the update document to the list
return addToOperationsList(this, common.UPDATE, document);
}
/**
* Add a replace one operation to the bulk operation
*
* @method
* @param {object} doc the new document to replace the existing one with
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.replaceOne = function(updateDocument) {
this.updateOne(updateDocument);
}
/**
* Upsert modifier for update bulk operation
*
* @method
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.upsert = function() {
this.s.currentOp.upsert = true;
return this;
}
/**
* Add a remove one operation to the bulk operation
*
* @method
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.removeOne = function() {
// Establish the update command
var document = {
q: this.s.currentOp.selector
, limit: 1
}
// Clear out current Op
this.s.currentOp = null;
// Add the remove document to the list
return addToOperationsList(this, common.REMOVE, document);
}
/**
* Add a remove operation to the bulk operation
*
* @method
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.remove = function() {
// Establish the update command
var document = {
q: this.s.currentOp.selector
, limit: 0
}
// Clear out current Op
this.s.currentOp = null;
// Add the remove document to the list
return addToOperationsList(this, common.REMOVE, document);
}
//
// Add to the operations list
//
var addToOperationsList = function(_self, docType, document) {
// Get the bsonSize
var bsonSize = _self.s.bson.calculateObjectSize(document, false);
// Throw error if the doc is bigger than the max BSON size
if(bsonSize >= _self.s.maxBatchSizeBytes) throw toError("document is larger than the maximum size " + _self.s.maxBatchSizeBytes);
// Holds the current batch
_self.s.currentBatch = null;
// Get the right type of batch
if(docType == common.INSERT) {
_self.s.currentBatch = _self.s.currentInsertBatch;
} else if(docType == common.UPDATE) {
_self.s.currentBatch = _self.s.currentUpdateBatch;
} else if(docType == common.REMOVE) {
_self.s.currentBatch = _self.s.currentRemoveBatch;
}
// Create a new batch object if we don't have a current one
if(_self.s.currentBatch == null) _self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
// Check if we need to create a new batch
if(((_self.s.currentBatch.size + 1) >= _self.s.maxWriteBatchSize)
|| ((_self.s.currentBatch.sizeBytes + bsonSize) >= _self.s.maxBatchSizeBytes)
|| (_self.s.currentBatch.batchType != docType)) {
// Save the batch to the execution stack
_self.s.batches.push(_self.s.currentBatch);
// Create a new batch
_self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
}
// We have an array of documents
if(Array.isArray(document)) {
throw toError("operation passed in cannot be an Array");
} else {
_self.s.currentBatch.operations.push(document);
_self.s.currentBatch.originalIndexes.push(_self.s.currentIndex);
_self.s.currentIndex = _self.s.currentIndex + 1;
}
// Save back the current Batch to the right type
if(docType == common.INSERT) {
_self.s.currentInsertBatch = _self.s.currentBatch;
_self.s.bulkResult.insertedIds.push({index: _self.s.currentIndex, _id: document._id});
} else if(docType == common.UPDATE) {
_self.s.currentUpdateBatch = _self.s.currentBatch;
} else if(docType == common.REMOVE) {
_self.s.currentRemoveBatch = _self.s.currentBatch;
}
// Update current batch size
_self.s.currentBatch.size = _self.s.currentBatch.size + 1;
_self.s.currentBatch.sizeBytes = _self.s.currentBatch.sizeBytes + bsonSize;
// Return self
return _self;
}
/**
* Create a new UnorderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @return {UnorderedBulkOperation} a UnorderedBulkOperation instance.
*/
var UnorderedBulkOperation = function(topology, collection, options) {
options = options == null ? {} : options;
// Contains reference to self
var self = this;
// Get the namesspace for the write operations
var namespace = collection.collectionName;
// Used to mark operation as executed
var executed = false;
// Current item
// var currentBatch = null;
var currentOp = null;
var currentIndex = 0;
var batches = [];
// The current Batches for the different operations
var currentInsertBatch = null;
var currentUpdateBatch = null;
var currentRemoveBatch = null;
// Handle to the bson serializer, used to calculate running sizes
var bson = topology.bson;
// Get the capabilities
var capabilities = topology.capabilities();
// Set max byte size
var maxBatchSizeBytes = topology.isMasterDoc.maxBsonObjectSize;
var maxWriteBatchSize = topology.isMasterDoc.maxWriteBatchSize || 1000;
// Get the write concern
var writeConcern = common.writeConcern(shallowClone(options), collection, options);
// Final results
var bulkResult = {
ok: 1
, writeErrors: []
, writeConcernErrors: []
, insertedIds: []
, nInserted: 0
, nUpserted: 0
, nMatched: 0
, nModified: 0
, nRemoved: 0
, upserted: []
};
// Internal state
this.s = {
// Final result
bulkResult: bulkResult
// Current batch state
, currentInsertBatch: null
, currentUpdateBatch: null
, currentRemoveBatch: null
, currentBatch: null
, currentIndex: 0
, batches: []
// Write concern
, writeConcern: writeConcern
// Capabilities
, capabilities: capabilities
// Max batch size options
, maxBatchSizeBytes: maxBatchSizeBytes
, maxWriteBatchSize: maxWriteBatchSize
// Namespace
, namespace: namespace
// BSON
, bson: bson
// Topology
, topology: topology
// Options
, options: options
// Current operation
, currentOp: currentOp
// Executed
, executed: executed
// Collection
, collection: collection
}
}
/**
* Add a single insert document to the bulk operation
*
* @param {object} doc the document to insert
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
UnorderedBulkOperation.prototype.insert = function(document) {
if(document._id == null) document._id = new ObjectID();
return addToOperationsList(this, common.INSERT, document);
}
/**
* Initiate a find operation for an update/updateOne/remove/removeOne/replaceOne
*
* @method
* @param {object} selector The selector for the bulk operation.
* @throws {MongoError}
* @return {FindOperatorsUnordered}
*/
UnorderedBulkOperation.prototype.find = function(selector) {
if (!selector) {
throw toError("Bulk find operation must specify a selector");
}
// Save a current selector
this.s.currentOp = {
selector: selector
}
return new FindOperatorsUnordered(this);
}
Object.defineProperty(UnorderedBulkOperation.prototype, 'length', {
enumerable: true,
get: function() {
return this.s.currentIndex;
}
});
UnorderedBulkOperation.prototype.raw = function(op) {
var key = Object.keys(op)[0];
// Update operations
if((op.updateOne && op.updateOne.q)
|| (op.updateMany && op.updateMany.q)
|| (op.replaceOne && op.replaceOne.q)) {
op[key].multi = op.updateOne || op.replaceOne ? false : true;
return addToOperationsList(this, common.UPDATE, op[key]);
}
// Crud spec update format
if(op.updateOne || op.updateMany || op.replaceOne) {
var multi = op.updateOne || op.replaceOne ? false : true;
var operation = {q: op[key].filter, u: op[key].update || op[key].replacement, multi: multi}
if(op[key].upsert) operation.upsert = true;
return addToOperationsList(this, common.UPDATE, operation);
}
// Remove operations
if(op.removeOne || op.removeMany || (op.deleteOne && op.deleteOne.q) || op.deleteMany && op.deleteMany.q) {
op[key].limit = op.removeOne ? 1 : 0;
return addToOperationsList(this, common.REMOVE, op[key]);
}
// Crud spec delete operations, less efficient
if(op.deleteOne || op.deleteMany) {
var limit = op.deleteOne ? 1 : 0;
var operation = {q: op[key].filter, limit: limit}
return addToOperationsList(this, common.REMOVE, operation);
}
// Insert operations
if(op.insertOne && op.insertOne.document == null) {
if(op.insertOne._id == null) op.insertOne._id = new ObjectID();
return addToOperationsList(this, common.INSERT, op.insertOne);
} else if(op.insertOne && op.insertOne.document) {
if(op.insertOne.document._id == null) op.insertOne.document._id = new ObjectID();
return addToOperationsList(this, common.INSERT, op.insertOne.document);
}
if(op.insertMany) {
for(var i = 0; i < op.insertMany.length; i++) {
addToOperationsList(this, common.INSERT, op.insertMany[i]);
}
return;
}
// No valid type of operation
throw toError("bulkWrite only supports insertOne, insertMany, updateOne, updateMany, removeOne, removeMany, deleteOne, deleteMany");
}
//
// Execute the command
var executeBatch = function(self, batch, callback) {
var finalOptions = {ordered: false}
if(self.s.writeConcern != null) {
finalOptions.writeConcern = self.s.writeConcern;
}
var resultHandler = function(err, result) {
// If we have and error
if(err) err.ok = 0;
callback(null, mergeBatchResults(false, batch, self.s.bulkResult, err, result));
}
try {
if(batch.batchType == common.INSERT) {
self.s.topology.insert(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
} else if(batch.batchType == common.UPDATE) {
self.s.topology.update(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
} else if(batch.batchType == common.REMOVE) {
self.s.topology.remove(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
}
} catch(err) {
// Force top level error
err.ok = 0;
// Merge top level error and return
callback(null, mergeBatchResults(false, batch, self.s.bulkResult, err, null));
}
}
//
// Execute all the commands
var executeBatches = function(self, callback) {
var numberOfCommandsToExecute = self.s.batches.length;
// Execute over all the batches
for(var i = 0; i < self.s.batches.length; i++) {
executeBatch(self, self.s.batches[i], function(err, result) {
numberOfCommandsToExecute = numberOfCommandsToExecute - 1;
// Execute
if(numberOfCommandsToExecute == 0) {
var error = self.s.bulkResult.writeErrors.length > 0 ? self.s.bulkResult.writeErrors[0] : null;
callback(error, new BulkWriteResult(self.s.bulkResult));
}
});
}
}
/**
* The callback format for results
* @callback UnorderedBulkOperation~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {BulkWriteResult} result The bulk write result.
*/
/**
* Execute the ordered bulk operation
*
* @method
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {UnorderedBulkOperation~resultCallback} callback The result callback
* @throws {MongoError}
* @return {null}
*/
UnorderedBulkOperation.prototype.execute = function(_writeConcern, callback) {
if(this.s.executed) throw toError("batch cannot be re-executed");
if(typeof _writeConcern == 'function') {
callback = _writeConcern;
} else {
this.s.writeConcern = _writeConcern;
}
// If we have current batch
if(this.s.currentInsertBatch) this.s.batches.push(this.s.currentInsertBatch);
if(this.s.currentUpdateBatch) this.s.batches.push(this.s.currentUpdateBatch);
if(this.s.currentRemoveBatch) this.s.batches.push(this.s.currentRemoveBatch);
// If we have no operations in the bulk raise an error
if(this.s.batches.length == 0) {
throw toError("Invalid Operation, No operations in bulk");
}
// Execute batches
return executeBatches(this, function(err, result) {
callback(err, result);
});
}
/**
* Returns an unordered batch object
* @ignore
*/
var initializeUnorderedBulkOp = function(topology, collection, options) {
return new UnorderedBulkOperation(topology, collection, options);
}
module.exports = initializeUnorderedBulkOp;

2079
server/node_modules/mongodb/lib/collection.js generated vendored Executable file

File diff suppressed because it is too large Load Diff

279
server/node_modules/mongodb/lib/command_cursor.js generated vendored Executable file
View File

@@ -0,0 +1,279 @@
"use strict";
var inherits = require('util').inherits
, f = require('util').format
, toError = require('./utils').toError
, getSingleProperty = require('./utils').getSingleProperty
, formattedOrderClause = require('./utils').formattedOrderClause
, handleCallback = require('./utils').handleCallback
, Logger = require('mongodb-core').Logger
, EventEmitter = require('events').EventEmitter
, ReadPreference = require('./read_preference')
, MongoError = require('mongodb-core').MongoError
, Readable = require('stream').Readable || require('readable-stream').Readable
// , CoreCursor = require('mongodb-core').Cursor
, CoreCursor = require('./cursor')
, Query = require('mongodb-core').Query
, CoreReadPreference = require('mongodb-core').ReadPreference;
/**
* @fileOverview The **CommandCursor** class is an internal class that embodies a
* generalized cursor based on a MongoDB command allowing for iteration over the
* results returned. It supports one by one document iteration, conversion to an
* array or can be iterated as a Node 0.10.X or higher stream
*
* **CommandCursor Cannot directly be instantiated**
* @example
* var MongoClient = require('mongodb').MongoClient,
* test = require('assert');
* // Connection url
* var url = 'mongodb://localhost:27017/test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, db) {
* // Create a collection we want to drop later
* var col = db.collection('listCollectionsExample1');
* // Insert a bunch of documents
* col.insert([{a:1, b:1}
* , {a:2, b:2}, {a:3, b:3}
* , {a:4, b:4}], {w:1}, function(err, result) {
* test.equal(null, err);
*
* // List the database collections available
* db.listCollections().toArray(function(err, items) {
* test.equal(null, err);
* db.close();
* });
* });
* });
*/
/**
* Namespace provided by the browser.
* @external Readable
*/
/**
* Creates a new Command Cursor instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @extends external:Readable
* @fires CommandCursor#data
* @fires CommandCursor#end
* @fires CommandCursor#close
* @fires CommandCursor#readable
* @return {CommandCursor} an CommandCursor instance.
*/
var CommandCursor = function(bson, ns, cmd, options, topology, topologyOptions) {
CoreCursor.apply(this, Array.prototype.slice.call(arguments, 0));
var self = this;
var state = CommandCursor.INIT;
var streamOptions = {};
// MaxTimeMS
var maxTimeMS = null;
// Set up
Readable.call(this, {objectMode: true});
// Internal state
this.s = {
// MaxTimeMS
maxTimeMS: maxTimeMS
// State
, state: state
// Stream options
, streamOptions: streamOptions
// BSON
, bson: bson
// Namespae
, ns: ns
// Command
, cmd: cmd
// Options
, options: options
// Topology
, topology: topology
// Topology Options
, topologyOptions: topologyOptions
}
}
/**
* CommandCursor stream data event, fired for each document in the cursor.
*
* @event CommandCursor#data
* @type {object}
*/
/**
* CommandCursor stream end event
*
* @event CommandCursor#end
* @type {null}
*/
/**
* CommandCursor stream close event
*
* @event CommandCursor#close
* @type {null}
*/
/**
* CommandCursor stream readable event
*
* @event CommandCursor#readable
* @type {null}
*/
// Inherit from Readable
inherits(CommandCursor, Readable);
// Set the methods to inherit from prototype
var methodsToInherit = ['next', 'each', 'forEach', 'toArray'
, 'rewind', 'bufferedCount', 'readBufferedDocuments', 'close', 'isClosed'];
// Only inherit the types we need
for(var i = 0; i < methodsToInherit.length; i++) {
CommandCursor.prototype[methodsToInherit[i]] = CoreCursor.prototype[methodsToInherit[i]];
}
/**
* Set the batch size for the cursor.
* @method
* @param {number} value The batchSize for the cursor.
* @throws {MongoError}
* @return {CommandCursor}
*/
CommandCursor.prototype.batchSize = function(value) {
if(this.s.state == CommandCursor.CLOSED || this.isDead()) throw new MongoError("Cursor is closed");
if(typeof value != 'number') throw new MongoError("batchSize requires an integer");
if(this.s.cmd.cursor) this.s.cmd.cursor.batchSize = value;
this.setCursorBatchSize(value);
return this;
}
/**
* Add a maxTimeMS stage to the aggregation pipeline
* @method
* @param {number} value The state maxTimeMS value.
* @return {CommandCursor}
*/
CommandCursor.prototype.maxTimeMS = function(value) {
if(this.s.topology.lastIsMaster().minWireVersion > 2) {
this.s.cmd.maxTimeMS = value;
}
return this;
}
CommandCursor.prototype.get = CommandCursor.prototype.toArray;
/**
* Get the next available document from the cursor, returns null if no more documents are available.
* @function CommandCursor.prototype.next
* @param {CommandCursor~resultCallback} callback The result callback.
* @throws {MongoError}
* @return {null}
*/
/**
* Set the new batchSize of the cursor
* @function CommandCursor.prototype.setBatchSize
* @param {number} value The new batchSize for the cursor
* @return {null}
*/
/**
* Get the batchSize of the cursor
* @function CommandCursor.prototype.batchSize
* @param {number} value The current batchSize for the cursor
* @return {null}
*/
/**
* The callback format for results
* @callback CommandCursor~toArrayResultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {object[]} documents All the documents the satisfy the cursor.
*/
/**
* Returns an array of documents. The caller is responsible for making sure that there
* is enough memory to store the results. Note that the array only contain partial
* results when this cursor had been previouly accessed.
* @method CommandCursor.prototype.toArray
* @param {CommandCursor~toArrayResultCallback} callback The result callback.
* @throws {MongoError}
* @return {null}
*/
/**
* The callback format for results
* @callback CommandCursor~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {(object|null)} result The result object if the command was executed successfully.
*/
/**
* Iterates over all the documents for this cursor. As with **{cursor.toArray}**,
* not all of the elements will be iterated if this cursor had been previouly accessed.
* In that case, **{cursor.rewind}** can be used to reset the cursor. However, unlike
* **{cursor.toArray}**, the cursor will only hold a maximum of batch size elements
* at any given time if batch size is specified. Otherwise, the caller is responsible
* for making sure that the entire result can fit the memory.
* @method CommandCursor.prototype.each
* @param {CommandCursor~resultCallback} callback The result callback.
* @throws {MongoError}
* @return {null}
*/
/**
* Close the cursor, sending a KillCursor command and emitting close.
* @method CommandCursor.prototype.close
* @param {CommandCursor~resultCallback} [callback] The result callback.
* @return {null}
*/
/**
* Is the cursor closed
* @method CommandCursor.prototype.isClosed
* @return {boolean}
*/
/**
* Clone the cursor
* @function CommandCursor.prototype.clone
* @return {CommandCursor}
*/
/**
* Resets the cursor
* @function CommandCursor.prototype.rewind
* @return {CommandCursor}
*/
/**
* The callback format for the forEach iterator method
* @callback CommandCursor~iteratorCallback
* @param {Object} doc An emitted document for the iterator
*/
/**
* The callback error format for the forEach iterator method
* @callback CommandCursor~endCallback
* @param {MongoError} error An error instance representing the error during the execution.
*/
/*
* Iterates over all the documents for this cursor using the iterator, callback pattern.
* @method CommandCursor.prototype.forEach
* @param {CommandCursor~iteratorCallback} iterator The iteration callback.
* @param {CommandCursor~endCallback} callback The end callback.
* @throws {MongoError}
* @return {null}
*/
CommandCursor.INIT = 0;
CommandCursor.OPEN = 1;
CommandCursor.CLOSED = 2;
module.exports = CommandCursor;

836
server/node_modules/mongodb/lib/cursor.js generated vendored Executable file
View File

@@ -0,0 +1,836 @@
"use strict";
var inherits = require('util').inherits
, f = require('util').format
, toError = require('./utils').toError
, getSingleProperty = require('./utils').getSingleProperty
, formattedOrderClause = require('./utils').formattedOrderClause
, handleCallback = require('./utils').handleCallback
, Logger = require('mongodb-core').Logger
, EventEmitter = require('events').EventEmitter
, ReadPreference = require('./read_preference')
, MongoError = require('mongodb-core').MongoError
, Readable = require('stream').Readable || require('readable-stream').Readable
, CoreCursor = require('mongodb-core').Cursor
, Query = require('mongodb-core').Query
, CoreReadPreference = require('mongodb-core').ReadPreference;
/**
* @fileOverview The **Cursor** class is an internal class that embodies a cursor on MongoDB
* allowing for iteration over the results returned from the underlying query. It supports
* one by one document iteration, conversion to an array or can be iterated as a Node 0.10.X
* or higher stream
*
* **CURSORS Cannot directly be instantiated**
* @example
* var MongoClient = require('mongodb').MongoClient,
* test = require('assert');
* // Connection url
* var url = 'mongodb://localhost:27017/test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, db) {
* // Create a collection we want to drop later
* var col = db.collection('createIndexExample1');
* // Insert a bunch of documents
* col.insert([{a:1, b:1}
* , {a:2, b:2}, {a:3, b:3}
* , {a:4, b:4}], {w:1}, function(err, result) {
* test.equal(null, err);
*
* // Show that duplicate records got dropped
* col.find({}).toArray(function(err, items) {
* test.equal(null, err);
* test.equal(4, items.length);
* db.close();
* });
* });
* });
*/
/**
* Namespace provided by the mongodb-core and node.js
* @external CoreCursor
* @external Readable
*/
// Flags allowed for cursor
var flags = ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'exhaust', 'partial'];
/**
* Creates a new Cursor instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @extends external:CoreCursor
* @extends external:Readable
* @property {string} sortValue Cursor query sort setting.
* @property {boolean} timeout Is Cursor able to time out.
* @property {ReadPreference} readPreference Get cursor ReadPreference.
* @fires Cursor#data
* @fires Cursor#end
* @fires Cursor#close
* @fires Cursor#readable
* @return {Cursor} a Cursor instance.
* @example
* Some example
*/
var Cursor = function(bson, ns, cmd, options, topology, topologyOptions) {
CoreCursor.apply(this, Array.prototype.slice.call(arguments, 0));
var self = this;
var state = Cursor.INIT;
var streamOptions = {};
// Tailable cursor options
var numberOfRetries = options.numberOfRetries || 5;
var tailableRetryInterval = options.tailableRetryInterval || 500;
var currentNumberOfRetries = numberOfRetries;
// MaxTimeMS
var maxTimeMS = null;
// Set up
Readable.call(this, {objectMode: true});
// Internal cursor state
this.s = {
// MaxTimeMS
maxTimeMS: null
// Tailable cursor options
, numberOfRetries: numberOfRetries
, tailableRetryInterval: tailableRetryInterval
, currentNumberOfRetries: currentNumberOfRetries
// State
, state: state
// Stream options
, streamOptions: streamOptions
// BSON
, bson: bson
// Namespace
, ns: ns
// Command
, cmd: cmd
// Options
, options: options
// Topology
, topology: topology
// Topology options
, topologyOptions: topologyOptions
}
// Legacy fields
this.timeout = self.s.options.noCursorTimeout == true;
this.sortValue = self.s.cmd.sort;
this.readPreference = self.s.options.readPreference;
}
/**
* Cursor stream data event, fired for each document in the cursor.
*
* @event Cursor#data
* @type {object}
*/
/**
* Cursor stream end event
*
* @event Cursor#end
* @type {null}
*/
/**
* Cursor stream close event
*
* @event Cursor#close
* @type {null}
*/
/**
* Cursor stream readable event
*
* @event Cursor#readable
* @type {null}
*/
// Inherit from Readable
inherits(Cursor, Readable);
for(var name in CoreCursor.prototype) {
Cursor.prototype[name] = CoreCursor.prototype[name];
}
/**
* Set the cursor query
* @method
* @param {object} filter The filter object used for the cursor.
* @return {Cursor}
*/
Cursor.prototype.filter = function(filter) {
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
this.s.cmd.query = filter;
return this;
}
/**
* Add a cursor flag to the cursor
* @method
* @param {string} flag The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'exhaust', 'partial'].
* @param {boolean} value The flag boolean value.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.addCursorFlag = function(flag, value) {
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
if(flags.indexOf(flag) == -1) throw new MongoError(f("flag % not a supported flag %s", flag, flags));
if(typeof value != 'boolean') throw new MongoError(f("flag % must be a boolean value", flag));
this.s.cmd[flag] = value;
return this;
}
/**
* Add a query modifier to the cursor query
* @method
* @param {string} name The query modifier (must start with $, such as $orderby etc)
* @param {boolean} value The flag boolean value.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.addQueryModifier = function(name, value) {
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
if(name[0] != '$') throw new MongoError(f("%s is not a valid query modifier"));
// Strip of the $
var field = name.substr(1);
// Set on the command
this.s.cmd[field] = value;
// Deal with the special case for sort
if(field == 'orderby') this.s.cmd.sort = this.s.cmd[field];
return this;
}
/**
* Add a comment to the cursor query allowing for tracking the comment in the log.
* @method
* @param {string} value The comment attached to this query.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.comment = function(value) {
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
this.s.cmd.comment = value;
return this;
}
/**
* Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)
* @method
* @param {number} value Number of milliseconds to wait before aborting the query.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.maxTimeMS = function(value) {
if(typeof value != 'number') throw new MongoError("maxTimeMS must be a number");
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
this.s.maxTimeMS = value;
this.s.cmd.maxTimeMS = value;
return this;
}
Cursor.prototype.maxTimeMs = Cursor.prototype.maxTimeMS;
/**
* Sets a field projection for the query.
* @method
* @param {object} value The field projection object.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.project = function(value) {
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
this.s.cmd.fields = value;
return this;
}
/**
* Sets the sort order of the cursor query.
* @method
* @param {(string|array|object)} keyOrList The key or keys set for the sort.
* @param {number} [direction] The direction of the sorting (1 or -1).
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.sort = function(keyOrList, direction) {
if(this.s.options.tailable) throw new MongoError("Tailable cursor doesn't support sorting");
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
var order = keyOrList;
if(direction != null) {
order = [[keyOrList, direction]];
}
this.s.cmd.sort = order;
this.sortValue = order;
return this;
}
/**
* Set the batch size for the cursor.
* @method
* @param {number} value The batchSize for the cursor.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.batchSize = function(value) {
if(this.s.options.tailable) throw new MongoError("Tailable cursor doesn't support limit");
if(this.s.state == Cursor.CLOSED || this.isDead()) throw new MongoError("Cursor is closed");
if(typeof value != 'number') throw new MongoError("batchSize requires an integer");
this.s.cmd.batchSize = value;
this.setCursorBatchSize(value);
return this;
}
/**
* Set the limit for the cursor.
* @method
* @param {number} value The limit for the cursor query.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.limit = function(value) {
if(this.s.options.tailable) throw new MongoError("Tailable cursor doesn't support limit");
if(this.s.state == Cursor.OPEN || this.s.state == Cursor.CLOSED || this.isDead()) throw new MongoError("Cursor is closed");
if(typeof value != 'number') throw new MongoError("limit requires an integer");
this.s.cmd.limit = value;
// this.cursorLimit = value;
this.setCursorLimit(value);
return this;
}
/**
* Set the skip for the cursor.
* @method
* @param {number} value The skip for the cursor query.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.skip = function(value) {
if(this.s.options.tailable) throw new MongoError("Tailable cursor doesn't support skip");
if(this.s.state == Cursor.OPEN || this.s.state == Cursor.CLOSED || this.isDead()) throw new MongoError("Cursor is closed");
if(typeof value != 'number') throw new MongoError("skip requires an integer");
this.s.cmd.skip = value;
this.setCursorSkip(value);
return this;
}
/**
* The callback format for results
* @callback Cursor~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {(object|null)} result The result object if the command was executed successfully.
*/
/**
* Get the next available document from the cursor, returns null if no more documents are available.
* @function external:CoreCursor#next
* @param {Cursor~resultCallback} callback The result callback.
* @throws {MongoError}
* @return {null}
*/
/**
* Set the new batchSize of the cursor
* @function Cursor.prototype.setBatchSize
* @param {number} value The new batchSize for the cursor
* @return {null}
*/
/**
* Get the batchSize of the cursor
* @function Cursor.prototype.batchSize
* @param {number} value The current batchSize for the cursor
* @return {null}
*/
/**
* Set the new skip value of the cursor
* @function Cursor.prototype.setCursorSkip
* @param {number} value The new skip for the cursor
* @return {null}
*/
/**
* Get the skip value of the cursor
* @function Cursor.prototype.cursorSkip
* @param {number} value The current skip value for the cursor
* @return {null}
*/
/**
* Set the new limit value of the cursor
* @function Cursor.prototype.setCursorLimit
* @param {number} value The new limit for the cursor
* @return {null}
*/
/**
* Get the limit value of the cursor
* @function Cursor.prototype.cursorLimit
* @param {number} value The current limit value for the cursor
* @return {null}
*/
/**
* Clone the cursor
* @function external:CoreCursor#clone
* @return {Cursor}
*/
/**
* Resets the cursor
* @function external:CoreCursor#rewind
* @return {null}
*/
/**
* Get the next available document from the cursor, returns null if no more documents are available.
* @method
* @param {Cursor~resultCallback} callback The result callback.
* @throws {MongoError}
* @deprecated
* @return {null}
*/
Cursor.prototype.nextObject = function(callback) {
var self = this;
if(this.s.state == Cursor.CLOSED || self.isDead()) return handleCallback(callback, new MongoError("Cursor is closed"));
if(this.s.state == Cursor.INIT && this.s.cmd.sort) {
try {
this.s.cmd.sort = formattedOrderClause(this.s.cmd.sort);
} catch(err) {
return handleCallback(callback, err);
}
}
// Get the next object
self.next(function(err, doc) {
if(err && err.tailable && self.s.currentNumberOfRetries == 0) return callback(err);
if(err && err.tailable && self.s.currentNumberOfRetries > 0) {
self.s.currentNumberOfRetries = self.s.currentNumberOfRetries - 1;
return setTimeout(function() {
self.nextObject(callback);
}, self.s.tailableRetryInterval);
}
self.s.state = Cursor.OPEN;
if(err) return handleCallback(callback, err);
handleCallback(callback, null, doc);
});
}
// Trampoline emptying the number of retrieved items
// without incurring a nextTick operation
var loop = function(self, callback) {
// No more items we are done
if(self.bufferedCount() == 0) return;
// Get the next document
self.next(callback);
// Loop
return loop;
}
/**
* Iterates over all the documents for this cursor. As with **{cursor.toArray}**,
* not all of the elements will be iterated if this cursor had been previouly accessed.
* In that case, **{cursor.rewind}** can be used to reset the cursor. However, unlike
* **{cursor.toArray}**, the cursor will only hold a maximum of batch size elements
* at any given time if batch size is specified. Otherwise, the caller is responsible
* for making sure that the entire result can fit the memory.
* @method
* @deprecated
* @param {Cursor~resultCallback} callback The result callback.
* @throws {MongoError}
* @return {null}
*/
Cursor.prototype.each = function(callback) {
// Rewind cursor state
this.rewind();
// Set current cursor to INIT
this.s.state = Cursor.INIT;
// Run the query
_each(this, callback);
};
// Run the each loop
var _each = function(self, callback) {
if(!callback) throw new MongoError('callback is mandatory');
if(self.isNotified()) return;
if(self.s.state == Cursor.CLOSED || self.isDead()) {
return handleCallback(callback, new MongoError("Cursor is closed"), null);
}
if(self.s.state == Cursor.INIT) self.s.state = Cursor.OPEN;
// Define function to avoid global scope escape
var fn = null;
// Trampoline all the entries
if(self.bufferedCount() > 0) {
while(fn = loop(self, callback)) fn(self, callback);
_each(self, callback);
} else {
self.next(function(err, item) {
if(err) return handleCallback(callback, err);
if(item == null) {
self.s.state = Cursor.CLOSED;
return handleCallback(callback, null, null);
}
if(handleCallback(callback, null, item) == false) return;
_each(self, callback);
})
}
}
/**
* The callback format for the forEach iterator method
* @callback Cursor~iteratorCallback
* @param {Object} doc An emitted document for the iterator
*/
/**
* The callback error format for the forEach iterator method
* @callback Cursor~endCallback
* @param {MongoError} error An error instance representing the error during the execution.
*/
/**
* Iterates over all the documents for this cursor using the iterator, callback pattern.
* @method
* @param {Cursor~iteratorCallback} iterator The iteration callback.
* @param {Cursor~endCallback} callback The end callback.
* @throws {MongoError}
* @return {null}
*/
Cursor.prototype.forEach = function(iterator, callback) {
this.each(function(err, doc){
if(err) { callback(err); return false; }
if(doc != null) { iterator(doc); return true; }
if(doc == null && callback) {
var internalCallback = callback;
callback = null;
internalCallback(null);
return false;
}
});
}
/**
* Set the ReadPreference for the cursor.
* @method
* @param {(string|ReadPreference)} readPreference The new read preference for the cursor.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.setReadPreference = function(r) {
if(this.s.state != Cursor.INIT) throw new MongoError('cannot change cursor readPreference after cursor has been accessed');
if(r instanceof ReadPreference) {
this.s.options.readPreference = new CoreReadPreference(r.mode, r.tags);
} else {
this.s.options.readPreference = new CoreReadPreference(r);
}
return this;
}
/**
* The callback format for results
* @callback Cursor~toArrayResultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {object[]} documents All the documents the satisfy the cursor.
*/
/**
* Returns an array of documents. The caller is responsible for making sure that there
* is enough memory to store the results. Note that the array only contain partial
* results when this cursor had been previouly accessed. In that case,
* cursor.rewind() can be used to reset the cursor.
* @method
* @param {Cursor~toArrayResultCallback} callback The result callback.
* @throws {MongoError}
* @return {null}
*/
Cursor.prototype.toArray = function(callback) {
var self = this;
if(!callback) throw new MongoError('callback is mandatory');
if(self.s.options.tailable) return handleCallback(callback, new MongoError("Tailable cursor cannot be converted to array"), null);
var items = [];
// Reset cursor
this.rewind();
self.s.state = Cursor.INIT;
// Fetch all the documents
var fetchDocs = function() {
self.next(function(err, doc) {
if(err) return handleCallback(callback, err);
if(doc == null) {
self.s.state = Cursor.CLOSED;
return handleCallback(callback, null, items);
}
// Add doc to items
items.push(doc)
// Get all buffered objects
if(self.bufferedCount() > 0) {
var a = self.readBufferedDocuments(self.bufferedCount())
items = items.concat(a);
}
// Attempt a fetch
fetchDocs();
})
}
fetchDocs();
}
/**
* The callback format for results
* @callback Cursor~countResultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {number} count The count of documents.
*/
/**
* Get the count of documents for this cursor
* @method
* @param {boolean} applySkipLimit Should the count command apply limit and skip settings on the cursor or in the passed in options.
* @param {object} [options=null] Optional settings.
* @param {number} [options.skip=null] The number of documents to skip.
* @param {number} [options.limit=null] The maximum amounts to count before aborting.
* @param {number} [options.maxTimeMS=null] Number of miliseconds to wait before aborting the query.
* @param {string} [options.hint=null] An index name hint for the query.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {Cursor~countResultCallback} callback The result callback.
* @return {null}
*/
Cursor.prototype.count = function(applySkipLimit, opts, callback) {
var self = this;
if(typeof opts == 'function') callback = opts, opts = {};
opts = opts || {};
if(self.s.cmd.query == null) callback(new MongoError("count can only be used with find command"));
if(typeof applySkipLimit == 'function') {
callback = applySkipLimit;
applySkipLimit = true;
}
var opts = {};
if(applySkipLimit) {
if(typeof this.cursorSkip() == 'number') opts.skip = this.cursorSkip();
if(typeof this.cursorLimit() == 'number') opts.limit = this.cursorLimit();
}
// Command
var delimiter = self.s.ns.indexOf('.');
var command = {
'count': self.s.ns.substr(delimiter+1), 'query': self.s.cmd.query
}
// If maxTimeMS set
if(typeof maxTimeMS == 'number') {
command.maxTimeMS = self.s.maxTimeMS;
}
// Get a server
var server = self.s.topology.getServer(opts);
// Get a connection
var connection = self.s.topology.getConnection(opts);
// Get the callbacks
var callbacks = server.getCallbacks();
// Merge in any options
if(opts.skip) command.skip = opts.skip;
if(opts.limit) command.limit = opts.limit;
if(self.s.options.hint) command.hint = self.s.options.hint;
// Build Query object
var query = new Query(self.s.bson, f("%s.$cmd", self.s.ns.substr(0, delimiter)), command, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false
});
// Set up callback
callbacks.register(query.requestId, function(err, result) {
if(err) return handleCallback(callback, err);
if(result.documents.length == 1
&& (result.documents[0].errmsg
|| result.documents[0].err
|| result.documents[0]['$err'])) return callback(MongoError.create(result.documents[0]));
handleCallback(callback, null, result.documents[0].n);
});
// Write the initial command out
connection.write(query.toBin());
};
/**
* Close the cursor, sending a KillCursor command and emitting close.
* @method
* @param {Cursor~resultCallback} [callback] The result callback.
* @return {null}
*/
Cursor.prototype.close = function(callback) {
this.s.state = Cursor.CLOSED;
// Kill the cursor
this.kill();
// Emit the close event for the cursor
this.emit('close');
// Callback if provided
if(callback) return handleCallback(callback, null, this);
}
/**
* Is the cursor closed
* @method
* @return {boolean}
*/
Cursor.prototype.isClosed = function() {
return this.isDead();
}
Cursor.prototype.destroy = function(err) {
this.pause();
this.close();
if(err) this.emit('error', err);
}
/**
* Return a modified Readable stream including a possible transform method.
* @method
* @param {object} [options=null] Optional settings.
* @param {function} [options.transform=null] A transformation method applied to each document emitted by the stream.
* @return {Cursor}
*/
Cursor.prototype.stream = function(options) {
this.s.streamOptions = options || {};
return this;
}
/**
* Execute the explain for the cursor
* @method
* @param {Cursor~resultCallback} [callback] The result callback.
* @return {null}
*/
Cursor.prototype.explain = function(callback) {
this.s.cmd.explain = true;
this.next(callback);
}
Cursor.prototype._read = function(n) {
var self = this;
if(self.s.state == Cursor.CLOSED || self.isDead()) {
return self.push(null);
}
// Get the next item
self.nextObject(function(err, result) {
if(err) {
if(!self.isDead()) self.close();
if(self.listeners('error') && self.listeners('error').length > 0) {
self.emit('error', err);
}
// Emit end event
return self.emit('end');
}
// If we provided a transformation method
if(typeof self.s.streamOptions.transform == 'function' && result != null) {
return self.push(self.s.streamOptions.transform(result));
}
// Return the result
self.push(result);
});
}
Object.defineProperty(Cursor.prototype, 'namespace', {
enumerable: true,
get: function() {
if (!this || !this.s) {
return null;
}
// TODO: refactor this logic into core
var ns = this.s.ns || '';
var firstDot = ns.indexOf('.');
if (firstDot < 0) {
return {
database: this.s.ns,
collection: ''
};
}
return {
database: ns.substr(0, firstDot),
collection: ns.substr(firstDot + 1)
};
}
});
/**
* The read() method pulls some data out of the internal buffer and returns it. If there is no data available, then it will return null.
* @function external:Readable#read
* @param {number} size Optional argument to specify how much data to read.
* @return {(String | Buffer | null)}
*/
/**
* Call this function to cause the stream to return strings of the specified encoding instead of Buffer objects.
* @function external:Readable#setEncoding
* @param {string} encoding The encoding to use.
* @return {null}
*/
/**
* This method will cause the readable stream to resume emitting data events.
* @function external:Readable#resume
* @return {null}
*/
/**
* This method will cause a stream in flowing-mode to stop emitting data events. Any data that becomes available will remain in the internal buffer.
* @function external:Readable#pause
* @return {null}
*/
/**
* This method pulls all the data out of a readable stream, and writes it to the supplied destination, automatically managing the flow so that the destination is not overwhelmed by a fast readable stream.
* @function external:Readable#pipe
* @param {Writable} destination The destination for writing data
* @param {object} [options] Pipe options
* @return {null}
*/
/**
* This method will remove the hooks set up for a previous pipe() call.
* @function external:Readable#unpipe
* @param {Writable} [destination] The destination for writing data
* @return {null}
*/
/**
* This is useful in certain cases where a stream is being consumed by a parser, which needs to "un-consume" some data that it has optimistically pulled out of the source, so that the stream can be passed on to some other party.
* @function external:Readable#unshift
* @param {(Buffer|string)} chunk Chunk of data to unshift onto the read queue.
* @return {null}
*/
/**
* Versions of Node prior to v0.10 had streams that did not implement the entire Streams API as it is today. (See "Compatibility" below for more information.)
* @function external:Readable#wrap
* @param {Stream} stream An "old style" readable stream.
* @return {null}
*/
Cursor.INIT = 0;
Cursor.OPEN = 1;
Cursor.CLOSED = 2;
Cursor.GET_MORE = 3;
module.exports = Cursor;

1352
server/node_modules/mongodb/lib/db.js generated vendored Executable file

File diff suppressed because it is too large Load Diff

236
server/node_modules/mongodb/lib/gridfs/chunk.js generated vendored Executable file
View File

@@ -0,0 +1,236 @@
"use strict";
var Binary = require('mongodb-core').BSON.Binary,
ObjectID = require('mongodb-core').BSON.ObjectID;
/**
* Class for representing a single chunk in GridFS.
*
* @class
*
* @param file {GridStore} The {@link GridStore} object holding this chunk.
* @param mongoObject {object} The mongo object representation of this chunk.
*
* @throws Error when the type of data field for {@link mongoObject} is not
* supported. Currently supported types for data field are instances of
* {@link String}, {@link Array}, {@link Binary} and {@link Binary}
* from the bson module
*
* @see Chunk#buildMongoObject
*/
var Chunk = function(file, mongoObject, writeConcern) {
if(!(this instanceof Chunk)) return new Chunk(file, mongoObject);
this.file = file;
var self = this;
var mongoObjectFinal = mongoObject == null ? {} : mongoObject;
this.writeConcern = writeConcern || {w:1};
this.objectId = mongoObjectFinal._id == null ? new ObjectID() : mongoObjectFinal._id;
this.chunkNumber = mongoObjectFinal.n == null ? 0 : mongoObjectFinal.n;
this.data = new Binary();
if(mongoObjectFinal.data == null) {
} else if(typeof mongoObjectFinal.data == "string") {
var buffer = new Buffer(mongoObjectFinal.data.length);
buffer.write(mongoObjectFinal.data, 'binary', 0);
this.data = new Binary(buffer);
} else if(Array.isArray(mongoObjectFinal.data)) {
var buffer = new Buffer(mongoObjectFinal.data.length);
buffer.write(mongoObjectFinal.data.join(''), 'binary', 0);
this.data = new Binary(buffer);
} else if(mongoObjectFinal.data._bsontype === 'Binary') {
this.data = mongoObjectFinal.data;
} else if(Buffer.isBuffer(mongoObjectFinal.data)) {
} else {
throw Error("Illegal chunk format");
}
// Update position
this.internalPosition = 0;
};
/**
* Writes a data to this object and advance the read/write head.
*
* @param data {string} the data to write
* @param callback {function(*, GridStore)} This will be called after executing
* this method. The first parameter will contain null and the second one
* will contain a reference to this object.
*/
Chunk.prototype.write = function(data, callback) {
this.data.write(data, this.internalPosition);
this.internalPosition = this.data.length();
if(callback != null) return callback(null, this);
return this;
};
/**
* Reads data and advances the read/write head.
*
* @param length {number} The length of data to read.
*
* @return {string} The data read if the given length will not exceed the end of
* the chunk. Returns an empty String otherwise.
*/
Chunk.prototype.read = function(length) {
// Default to full read if no index defined
length = length == null || length == 0 ? this.length() : length;
if(this.length() - this.internalPosition + 1 >= length) {
var data = this.data.read(this.internalPosition, length);
this.internalPosition = this.internalPosition + length;
return data;
} else {
return '';
}
};
Chunk.prototype.readSlice = function(length) {
if ((this.length() - this.internalPosition) >= length) {
var data = null;
if (this.data.buffer != null) { //Pure BSON
data = this.data.buffer.slice(this.internalPosition, this.internalPosition + length);
} else { //Native BSON
data = new Buffer(length);
length = this.data.readInto(data, this.internalPosition);
}
this.internalPosition = this.internalPosition + length;
return data;
} else {
return null;
}
};
/**
* Checks if the read/write head is at the end.
*
* @return {boolean} Whether the read/write head has reached the end of this
* chunk.
*/
Chunk.prototype.eof = function() {
return this.internalPosition == this.length() ? true : false;
};
/**
* Reads one character from the data of this chunk and advances the read/write
* head.
*
* @return {string} a single character data read if the the read/write head is
* not at the end of the chunk. Returns an empty String otherwise.
*/
Chunk.prototype.getc = function() {
return this.read(1);
};
/**
* Clears the contents of the data in this chunk and resets the read/write head
* to the initial position.
*/
Chunk.prototype.rewind = function() {
this.internalPosition = 0;
this.data = new Binary();
};
/**
* Saves this chunk to the database. Also overwrites existing entries having the
* same id as this chunk.
*
* @param callback {function(*, GridStore)} This will be called after executing
* this method. The first parameter will contain null and the second one
* will contain a reference to this object.
*/
Chunk.prototype.save = function(options, callback) {
var self = this;
if(typeof options == 'function') {
callback = options;
options = {};
}
self.file.chunkCollection(function(err, collection) {
if(err) return callback(err);
// Merge the options
var writeOptions = {};
for(var name in options) writeOptions[name] = options[name];
for(var name in self.writeConcern) writeOptions[name] = self.writeConcern[name];
// collection.remove({'_id':self.objectId}, self.writeConcern, function(err, result) {
collection.remove({'_id':self.objectId}, writeOptions, function(err, result) {
if(err) return callback(err);
if(self.data.length() > 0) {
self.buildMongoObject(function(mongoObject) {
var options = {forceServerObjectId:true};
for(var name in self.writeConcern) {
options[name] = self.writeConcern[name];
}
collection.insert(mongoObject, writeOptions, function(err, collection) {
callback(err, self);
});
});
} else {
callback(null, self);
}
});
});
};
/**
* Creates a mongoDB object representation of this chunk.
*
* @param callback {function(Object)} This will be called after executing this
* method. The object will be passed to the first parameter and will have
* the structure:
*
* <pre><code>
* {
* '_id' : , // {number} id for this chunk
* 'files_id' : , // {number} foreign key to the file collection
* 'n' : , // {number} chunk number
* 'data' : , // {bson#Binary} the chunk data itself
* }
* </code></pre>
*
* @see <a href="http://www.mongodb.org/display/DOCS/GridFS+Specification#GridFSSpecification-{{chunks}}">MongoDB GridFS Chunk Object Structure</a>
*/
Chunk.prototype.buildMongoObject = function(callback) {
var mongoObject = {
'files_id': this.file.fileId,
'n': this.chunkNumber,
'data': this.data};
// If we are saving using a specific ObjectId
if(this.objectId != null) mongoObject._id = this.objectId;
callback(mongoObject);
};
/**
* @return {number} the length of the data
*/
Chunk.prototype.length = function() {
return this.data.length();
};
/**
* The position of the read/write head
* @name position
* @lends Chunk#
* @field
*/
Object.defineProperty(Chunk.prototype, "position", { enumerable: true
, get: function () {
return this.internalPosition;
}
, set: function(value) {
this.internalPosition = value;
}
});
/**
* The default chunk size
* @constant
*/
Chunk.DEFAULT_CHUNK_SIZE = 1024 * 255;
module.exports = Chunk;

1582
server/node_modules/mongodb/lib/gridfs/grid_store.js generated vendored Executable file

File diff suppressed because it is too large Load Diff

413
server/node_modules/mongodb/lib/mongo_client.js generated vendored Executable file
View File

@@ -0,0 +1,413 @@
"use strict";
var parse = require('./url_parser')
, Server = require('./server')
, Mongos = require('./mongos')
, ReplSet = require('./replset')
, ReadPreference = require('./read_preference')
, Db = require('./db');
/**
* @fileOverview The **MongoClient** class is a class that allows for making Connections to MongoDB.
*
* @example
* var MongoClient = require('mongodb').MongoClient,
* test = require('assert');
* // Connection url
* var url = 'mongodb://localhost:27017/test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, db) {
* // Get an additional db
* db.close();
* });
*/
/**
* Creates a new MongoClient instance
* @class
* @return {MongoClient} a MongoClient instance.
*/
function MongoClient() {
/**
* The callback format for results
* @callback MongoClient~connectCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Db} db The connected database.
*/
/**
* Connect to MongoDB using a url as documented at
*
* docs.mongodb.org/manual/reference/connection-string/
*
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
*
* @method
* @param {string} url The connection URI string
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.uri_decode_auth=false] Uri decode the user name and password for authentication
* @param {object} [options.db=null] A hash of options to set on the db object, see **Db constructor**
* @param {object} [options.server=null] A hash of options to set on the server objects, see **Server** constructor**
* @param {object} [options.replSet=null] A hash of options to set on the replSet object, see **ReplSet** constructor**
* @param {object} [options.mongos=null] A hash of options to set on the mongos object, see **Mongos** constructor**
* @param {MongoClient~connectCallback} callback The command result callback
* @return {null}
*/
this.connect = MongoClient.connect;
}
/**
* Connect to MongoDB using a url as documented at
*
* docs.mongodb.org/manual/reference/connection-string/
*
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
*
* @method
* @static
* @param {string} url The connection URI string
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.uri_decode_auth=false] Uri decode the user name and password for authentication
* @param {object} [options.db=null] A hash of options to set on the db object, see **Db constructor**
* @param {object} [options.server=null] A hash of options to set on the server objects, see **Server** constructor**
* @param {object} [options.replSet=null] A hash of options to set on the replSet object, see **ReplSet** constructor**
* @param {object} [options.mongos=null] A hash of options to set on the mongos object, see **Mongos** constructor**
* @param {MongoClient~connectCallback} callback The command result callback
* @return {null}
*/
MongoClient.connect = function(url, options, callback) {
var args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] == 'function' ? args.pop() : null;
options = args.length ? args.shift() : null;
options = options || {};
// Set default empty server options
var serverOptions = options.server || {};
var mongosOptions = options.mongos || {};
var replSetServersOptions = options.replSet || options.replSetServers || {};
var dbOptions = options.db || {};
// If callback is null throw an exception
if(callback == null)
throw new Error("no callback function provided");
// Parse the string
var object = parse(url, options);
// Merge in any options for db in options object
if(dbOptions) {
for(var name in dbOptions) object.db_options[name] = dbOptions[name];
}
// Added the url to the options
object.db_options.url = url;
// Merge in any options for server in options object
if(serverOptions) {
for(var name in serverOptions) object.server_options[name] = serverOptions[name];
}
// Merge in any replicaset server options
if(replSetServersOptions) {
for(var name in replSetServersOptions) object.rs_options[name] = replSetServersOptions[name];
}
if(replSetServersOptions.ssl
|| replSetServersOptions.sslValidate
|| replSetServersOptions.sslCA
|| replSetServersOptions.sslCert
|| replSetServersOptions.sslKey
|| replSetServersOptions.sslPass) {
object.server_options.ssl = replSetServersOptions.ssl;
object.server_options.sslValidate = replSetServersOptions.sslValidate;
object.server_options.sslCA = replSetServersOptions.sslCA;
object.server_options.sslCert = replSetServersOptions.sslCert;
object.server_options.sslKey = replSetServersOptions.sslKey;
object.server_options.sslPass = replSetServersOptions.sslPass;
}
// Merge in any replicaset server options
if(mongosOptions) {
for(var name in mongosOptions) object.mongos_options[name] = mongosOptions[name];
}
if(typeof object.server_options.poolSize == 'number') {
if(!object.mongos_options.poolSize) object.mongos_options.poolSize = object.server_options.poolSize;
if(!object.rs_options.poolSize) object.rs_options.poolSize = object.server_options.poolSize;
}
if(mongosOptions.ssl
|| mongosOptions.sslValidate
|| mongosOptions.sslCA
|| mongosOptions.sslCert
|| mongosOptions.sslKey
|| mongosOptions.sslPass) {
object.server_options.ssl = mongosOptions.ssl;
object.server_options.sslValidate = mongosOptions.sslValidate;
object.server_options.sslCA = mongosOptions.sslCA;
object.server_options.sslCert = mongosOptions.sslCert;
object.server_options.sslKey = mongosOptions.sslKey;
object.server_options.sslPass = mongosOptions.sslPass;
}
// We need to ensure that the list of servers are only either direct members or mongos
// they cannot be a mix of monogs and mongod's
var totalNumberOfServers = object.servers.length;
var totalNumberOfMongosServers = 0;
var totalNumberOfMongodServers = 0;
var serverConfig = null;
var errorServers = {};
// Failure modes
if(object.servers.length == 0) throw new Error("connection string must contain at least one seed host");
// If we have no db setting for the native parser try to set the c++ one first
object.db_options.native_parser = _setNativeParser(object.db_options);
// If no auto_reconnect is set, set it to true as default for single servers
if(typeof object.server_options.auto_reconnect != 'boolean') {
object.server_options.auto_reconnect = true;
}
// If we have more than a server, it could be replicaset or mongos list
// need to verify that it's one or the other and fail if it's a mix
// Connect to all servers and run ismaster
for(var i = 0; i < object.servers.length; i++) {
// Set up socket options
var providedSocketOptions = object.server_options.socketOptions || {};
var _server_options = {
poolSize:1
, socketOptions: {
connectTimeoutMS: providedSocketOptions.connectTimeoutMS || 30000
, socketTimeoutMS: providedSocketOptions.socketTimeoutMS || 30000
}
, auto_reconnect:false};
// Ensure we have ssl setup for the servers
if(object.server_options.ssl) {
_server_options.ssl = object.server_options.ssl;
_server_options.sslValidate = object.server_options.sslValidate;
_server_options.sslCA = object.server_options.sslCA;
_server_options.sslCert = object.server_options.sslCert;
_server_options.sslKey = object.server_options.sslKey;
_server_options.sslPass = object.server_options.sslPass;
} else if(object.rs_options.ssl) {
_server_options.ssl = object.rs_options.ssl;
_server_options.sslValidate = object.rs_options.sslValidate;
_server_options.sslCA = object.rs_options.sslCA;
_server_options.sslCert = object.rs_options.sslCert;
_server_options.sslKey = object.rs_options.sslKey;
_server_options.sslPass = object.rs_options.sslPass;
}
// Error
var error = null;
// Set up the Server object
var _server = object.servers[i].domain_socket
? new Server(object.servers[i].domain_socket, _server_options)
: new Server(object.servers[i].host, object.servers[i].port, _server_options);
var setName;
var connectFunction = function(__server) {
// Attempt connect
new Db(object.dbName, __server, {w:1, native_parser:false}).open(function(err, db) {
// Update number of servers
totalNumberOfServers = totalNumberOfServers - 1;
// If no error do the correct checks
if(!err) {
// Close the connection
db.close();
var isMasterDoc = db.serverConfig.isMasterDoc;
// Check what type of server we have
if(isMasterDoc.setName) {
totalNumberOfMongodServers++;
setName = isMasterDoc.setName;
}
if(isMasterDoc.msg && isMasterDoc.msg == "isdbgrid") totalNumberOfMongosServers++;
} else {
error = err;
errorServers[__server.host + ":" + __server.port] = __server;
}
if(totalNumberOfServers == 0) {
// Error out
if(totalNumberOfMongodServers == 0 && totalNumberOfMongosServers == 0 && error) {
return callback(error, null);
}
// If we have a mix of mongod and mongos, throw an error
if(totalNumberOfMongosServers > 0 && totalNumberOfMongodServers > 0) {
if(db) db.close();
return process.nextTick(function() {
try {
callback(new Error("cannot combine a list of replicaset seeds and mongos seeds"));
} catch (err) {
throw err
}
})
}
if(totalNumberOfMongodServers == 0
&& totalNumberOfMongosServers == 0
&& object.servers.length == 1) {
var obj = object.servers[0];
serverConfig = obj.domain_socket ?
new Server(obj.domain_socket, object.server_options)
: new Server(obj.host, obj.port, object.server_options);
} else if(totalNumberOfMongodServers > 0 || totalNumberOfMongosServers > 0) {
var finalServers = object.servers
.filter(function(serverObj) {
return errorServers[serverObj.host + ":" + serverObj.port] == null;
})
.map(function(serverObj) {
return new Server(serverObj.host, serverObj.port, object.server_options);
});
// Clean out any error servers
errorServers = {};
// Set up the final configuration
if(totalNumberOfMongodServers > 0) {
try {
if (totalNumberOfMongodServers == 1) {
object.rs_options.replicaSet = object.rs_options.replicaSet || setName;
}
serverConfig = new ReplSet(finalServers, object.rs_options);
} catch(err) {
return callback(err, null);
}
} else {
serverConfig = new Mongos(finalServers, object.mongos_options);
}
}
if(serverConfig == null) {
return process.nextTick(function() {
try {
callback(new Error("Could not locate any valid servers in initial seed list"));
} catch (err) {
if(db) db.close();
throw err
}
});
}
// Ensure no firing of open event before we are ready
serverConfig.emitOpen = false;
// Set up all options etc and connect to the database
_finishConnecting(serverConfig, object, options, callback)
}
});
}
// Wrap the context of the call
connectFunction(_server);
}
}
var _setNativeParser = function(db_options) {
if(typeof db_options.native_parser == 'boolean') return db_options.native_parser;
try {
require('mongodb-core').BSON.BSONNative.BSON;
return true;
} catch(err) {
return false;
}
}
var _finishConnecting = function(serverConfig, object, options, callback) {
// If we have a readPreference passed in by the db options
if(typeof object.db_options.readPreference == 'string') {
object.db_options.readPreference = new ReadPreference(object.db_options.readPreference);
} else if(typeof object.db_options.read_preference == 'string') {
object.db_options.readPreference = new ReadPreference(object.db_options.read_preference);
}
// Do we have readPreference tags
if(object.db_options.readPreference && object.db_options.readPreferenceTags) {
object.db_options.readPreference.tags = object.db_options.readPreferenceTags;
} else if(object.db_options.readPreference && object.db_options.read_preference_tags) {
object.db_options.readPreference.tags = object.db_options.read_preference_tags;
}
// Get the socketTimeoutMS
var socketTimeoutMS = object.server_options.socketOptions.socketTimeoutMS || 0;
// If we have a replset, override with replicaset socket timeout option if available
if(serverConfig instanceof ReplSet) {
socketTimeoutMS = object.rs_options.socketOptions.socketTimeoutMS || socketTimeoutMS;
}
// Set socketTimeout to the same as the connectTimeoutMS or 30 sec
serverConfig.connectTimeoutMS = serverConfig.connectTimeoutMS || 30000;
serverConfig.socketTimeoutMS = serverConfig.connectTimeoutMS;
// Set up the db options
var db = new Db(object.dbName, serverConfig, object.db_options);
// Open the db
db.open(function(err, db){
if(err) {
return process.nextTick(function() {
try {
callback(err, null);
} catch (err) {
if(db) db.close();
throw err
}
});
}
// Reset the socket timeout
serverConfig.socketTimeoutMS = socketTimeoutMS || 0;
// Return object
if(err == null && object.auth){
// What db to authenticate against
var authentication_db = db;
if(object.db_options && object.db_options.authSource) {
authentication_db = db.db(object.db_options.authSource);
}
// Build options object
var options = {};
if(object.db_options.authMechanism) options.authMechanism = object.db_options.authMechanism;
if(object.db_options.gssapiServiceName) options.gssapiServiceName = object.db_options.gssapiServiceName;
// Authenticate
authentication_db.authenticate(object.auth.user, object.auth.password, options, function(err, success){
if(success){
process.nextTick(function() {
try {
callback(null, db);
} catch (err) {
if(db) db.close();
throw err
}
});
} else {
if(db) db.close();
process.nextTick(function() {
try {
callback(err ? err : new Error('Could not authenticate user ' + object.auth[0]), null);
} catch (err) {
if(db) db.close();
throw err
}
});
}
});
} else {
process.nextTick(function() {
try {
callback(err, db);
} catch (err) {
if(db) db.close();
throw err
}
})
}
});
}
module.exports = MongoClient

454
server/node_modules/mongodb/lib/mongos.js generated vendored Executable file
View File

@@ -0,0 +1,454 @@
"use strict";
var EventEmitter = require('events').EventEmitter
, inherits = require('util').inherits
, f = require('util').format
, ServerCapabilities = require('./topology_base').ServerCapabilities
, MongoCR = require('mongodb-core').MongoCR
, CMongos = require('mongodb-core').Mongos
, Cursor = require('./cursor')
, Server = require('./server')
, Store = require('./topology_base').Store
, shallowClone = require('./utils').shallowClone;
/**
* @fileOverview The **Mongos** class is a class that represents a Mongos Proxy topology and is
* used to construct connections.
*
* **Mongos Should not be used, use MongoClient.connect**
* @example
* var Db = require('mongodb').Db,
* Mongos = require('mongodb').Mongos,
* Server = require('mongodb').Server,
* test = require('assert');
* // Connect using Mongos
* var server = new Server('localhost', 27017);
* var db = new Db('test', new Mongos([server]));
* db.open(function(err, db) {
* // Get an additional db
* db.close();
* });
*/
/**
* Creates a new Mongos instance
* @class
* @deprecated
* @param {Server[]} servers A seedlist of servers participating in the replicaset.
* @param {object} [options=null] Optional settings.
* @param {booelan} [options.ha=true] Turn on high availability monitoring.
* @param {number} [options.haInterval=5000] Time between each replicaset status check.
* @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons.
* @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support)
* @param {object} [options.sslValidate=true] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {array} [options.sslCA=null] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslCert=null] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslKey=null] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslPass=null] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {object} [options.socketOptions=null] Socket options
* @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option.
* @param {number} [options.socketOptions.keepAlive=0] TCP KeepAlive on the socket with a X ms delay before start.
* @param {number} [options.socketOptions.connectTimeoutMS=0] TCP Connection timeout setting
* @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting
* @fires Mongos#connect
* @fires Mongos#ha
* @fires Mongos#joined
* @fires Mongos#left
* @fires Mongos#fullsetup
* @fires Mongos#open
* @fires Mongos#close
* @fires Mongos#error
* @fires Mongos#timeout
* @fires Mongos#parseError
* @return {Mongos} a Mongos instance.
*/
var Mongos = function(servers, options) {
if(!(this instanceof Mongos)) return new Mongos(servers, options);
options = options || {};
var self = this;
// Ensure all the instances are Server
for(var i = 0; i < servers.length; i++) {
if(!(servers[i] instanceof Server)) {
throw new MongoError("all seed list instances must be of the Server type");
}
}
// Store option defaults
var storeOptions = {
force: false
, bufferMaxEntries: -1
}
// Shared global store
var store = options.store || new Store(self, storeOptions);
// Set up event emitter
EventEmitter.call(this);
// Debug tag
var tag = options.tag;
// Build seed list
var seedlist = servers.map(function(x) {
return {host: x.host, port: x.port}
});
// Final options
var finalOptions = shallowClone(options);
// Default values
finalOptions.size = typeof options.poolSize == 'number' ? options.poolSize : 5;
finalOptions.reconnect = typeof options.auto_reconnect == 'boolean' ? options.auto_reconnect : true;
finalOptions.emitError = typeof options.emitError == 'boolean' ? options.emitError : true;
finalOptions.cursorFactory = Cursor;
// Add the store
finalOptions.disconnectHandler = store;
// Socket options passed down
if(options.socketOptions) {
if(options.socketOptions.connectTimeoutMS) {
this.connectTimeoutMS = options.socketOptions.connectTimeoutMS;
finalOptions.connectionTimeout = options.socketOptions.connectTimeoutMS;
}
if(options.socketOptions.socketTimeoutMS)
finalOptions.socketTimeout = options.socketOptions.socketTimeoutMS;
}
// Are we running in debug mode
var debug = typeof options.debug == 'boolean' ? options.debug : false;
if(debug) {
finalOptions.debug = debug;
}
// Map keep alive setting
if(options.socketOptions && typeof options.socketOptions.keepAlive == 'number') {
finalOptions.keepAlive = true;
if(typeof options.socketOptions.keepAlive == 'number') {
finalOptions.keepAliveInitialDelay = options.socketOptions.keepAlive;
}
}
// Connection timeout
if(options.socketOptions && typeof options.socketOptions.connectionTimeout == 'number') {
finalOptions.connectionTimeout = options.socketOptions.connectionTimeout;
}
// Socket timeout
if(options.socketOptions && typeof options.socketOptions.socketTimeout == 'number') {
finalOptions.socketTimeout = options.socketOptions.socketTimeout;
}
// noDelay
if(options.socketOptions && typeof options.socketOptions.noDelay == 'boolean') {
finalOptions.noDelay = options.socketOptions.noDelay;
}
if(typeof options.secondaryAcceptableLatencyMS == 'number') {
finalOptions.acceptableLatency = options.secondaryAcceptableLatencyMS;
}
// Add the non connection store
finalOptions.disconnectHandler = store;
// Create the Mongos
var mongos = new CMongos(seedlist, finalOptions)
// Server capabilities
var sCapabilities = null;
// Add auth prbufferMaxEntriesoviders
mongos.addAuthProvider('mongocr', new MongoCR());
// Internal state
this.s = {
// Create the Mongos
mongos: mongos
// Server capabilities
, sCapabilities: sCapabilities
// Debug turned on
, debug: debug
// Store option defaults
, storeOptions: storeOptions
// Cloned options
, clonedOptions: finalOptions
// Actual store of callbacks
, store: store
// Options
, options: options
}
// Last ismaster
Object.defineProperty(this, 'isMasterDoc', {
enumerable:true, get: function() { return self.s.mongos.lastIsMaster(); }
});
// Last ismaster
Object.defineProperty(this, 'numberOfConnectedServers', {
enumerable:true, get: function() { return self.s.mongos.connectedServers().length; }
});
// BSON property
Object.defineProperty(this, 'bson', {
enumerable: true, get: function() {
return self.s.mongos.bson;
}
});
Object.defineProperty(this, 'haInterval', {
enumerable:true, get: function() { return self.s.mongos.haInterval; }
});
}
/**
* @ignore
*/
inherits(Mongos, EventEmitter);
// Connect
Mongos.prototype.connect = function(db, _options, callback) {
var self = this;
if('function' === typeof _options) callback = _options, _options = {};
if(_options == null) _options = {};
if(!('function' === typeof callback)) callback = null;
self.s.options = _options;
// Update bufferMaxEntries
self.s.storeOptions.bufferMaxEntries = db.bufferMaxEntries;
// Error handler
var connectErrorHandler = function(event) {
return function(err) {
// Remove all event handlers
var events = ['timeout', 'error', 'close'];
events.forEach(function(e) {
self.removeListener(e, connectErrorHandler);
});
self.s.mongos.removeListener('connect', connectErrorHandler);
// Try to callback
try {
callback(err);
} catch(err) {
process.nextTick(function() { throw err; })
}
}
}
// Actual handler
var errorHandler = function(event) {
return function(err) {
if(event != 'error') {
self.emit(event, err);
}
}
}
// Error handler
var reconnectHandler = function(err) {
self.emit('reconnect');
self.s.store.execute();
}
// Connect handler
var connectHandler = function() {
// Clear out all the current handlers left over
["timeout", "error", "close"].forEach(function(e) {
self.s.mongos.removeAllListeners(e);
});
// Set up listeners
self.s.mongos.once('timeout', errorHandler('timeout'));
self.s.mongos.once('error', errorHandler('error'));
self.s.mongos.once('close', errorHandler('close'));
// relay the event
var relay = function(event) {
return function(t, server) {
self.emit(event, t, server);
}
}
// Set up serverConfig listeners
self.s.mongos.on('joined', relay('joined'));
self.s.mongos.on('left', relay('left'));
self.s.mongos.on('fullsetup', relay('fullsetup'));
// Emit open event
self.emit('open', null, self);
// Return correctly
try {
callback(null, self);
} catch(err) {
process.nextTick(function() { throw err; })
}
}
// Set up listeners
self.s.mongos.once('timeout', connectErrorHandler('timeout'));
self.s.mongos.once('error', connectErrorHandler('error'));
self.s.mongos.once('close', connectErrorHandler('close'));
self.s.mongos.once('connect', connectHandler);
// Reconnect server
self.s.mongos.on('reconnect', reconnectHandler);
// Start connection
self.s.mongos.connect(_options);
}
Mongos.prototype.parserType = function() {
return this.s.mongos.parserType();
}
// Server capabilities
Mongos.prototype.capabilities = function() {
if(this.s.sCapabilities) return this.s.sCapabilities;
this.s.sCapabilities = new ServerCapabilities(this.s.mongos.lastIsMaster());
return this.s.sCapabilities;
}
// Command
Mongos.prototype.command = function(ns, cmd, options, callback) {
this.s.mongos.command(ns, cmd, options, callback);
}
// Insert
Mongos.prototype.insert = function(ns, ops, options, callback) {
this.s.mongos.insert(ns, ops, options, function(e, m) {
callback(e, m)
});
}
// Update
Mongos.prototype.update = function(ns, ops, options, callback) {
this.s.mongos.update(ns, ops, options, callback);
}
// Remove
Mongos.prototype.remove = function(ns, ops, options, callback) {
this.s.mongos.remove(ns, ops, options, callback);
}
// IsConnected
Mongos.prototype.isConnected = function() {
return this.s.mongos.isConnected();
}
// Insert
Mongos.prototype.cursor = function(ns, cmd, options) {
options.disconnectHandler = this.s.store;
return this.s.mongos.cursor(ns, cmd, options);
}
Mongos.prototype.setBSONParserType = function(type) {
return this.s.mongos.setBSONParserType(type);
}
Mongos.prototype.lastIsMaster = function() {
return this.s.mongos.lastIsMaster();
}
Mongos.prototype.close = function(forceClosed) {
this.s.mongos.destroy();
// We need to wash out all stored processes
if(forceClosed == true) {
this.s.storeOptions.force = forceClosed;
this.s.store.flush();
}
}
Mongos.prototype.auth = function() {
var args = Array.prototype.slice.call(arguments, 0);
this.s.mongos.auth.apply(this.s.mongos, args);
}
/**
* All raw connections
* @method
* @return {array}
*/
Mongos.prototype.connections = function() {
return this.s.mongos.connections();
}
/**
* A mongos connect event, used to verify that the connection is up and running
*
* @event Mongos#connect
* @type {Mongos}
*/
/**
* The mongos high availability event
*
* @event Mongos#ha
* @type {function}
* @param {string} type The stage in the high availability event (start|end)
* @param {boolean} data.norepeat This is a repeating high availability process or a single execution only
* @param {number} data.id The id for this high availability request
* @param {object} data.state An object containing the information about the current replicaset
*/
/**
* A server member left the mongos set
*
* @event Mongos#left
* @type {function}
* @param {string} type The type of member that left (primary|secondary|arbiter)
* @param {Server} server The server object that left
*/
/**
* A server member joined the mongos set
*
* @event Mongos#joined
* @type {function}
* @param {string} type The type of member that joined (primary|secondary|arbiter)
* @param {Server} server The server object that joined
*/
/**
* Mongos fullsetup event, emitted when all proxies in the topology have been connected to.
*
* @event Mongos#fullsetup
* @type {Mongos}
*/
/**
* Mongos open event, emitted when mongos can start processing commands.
*
* @event Mongos#open
* @type {Mongos}
*/
/**
* Mongos close event
*
* @event Mongos#close
* @type {object}
*/
/**
* Mongos error event, emitted if there is an error listener.
*
* @event Mongos#error
* @type {MongoError}
*/
/**
* Mongos timeout event
*
* @event Mongos#timeout
* @type {object}
*/
/**
* Mongos parseError event
*
* @event Mongos#parseError
* @type {object}
*/
module.exports = Mongos;

104
server/node_modules/mongodb/lib/read_preference.js generated vendored Executable file
View File

@@ -0,0 +1,104 @@
"use strict";
/**
* @fileOverview The **ReadPreference** class is a class that represents a MongoDB ReadPreference and is
* used to construct connections.
*
* @example
* var Db = require('mongodb').Db,
* ReplSet = require('mongodb').ReplSet,
* Server = require('mongodb').Server,
* ReadPreference = require('mongodb').ReadPreference,
* test = require('assert');
* // Connect using ReplSet
* var server = new Server('localhost', 27017);
* var db = new Db('test', new ReplSet([server]));
* db.open(function(err, db) {
* test.equal(null, err);
* // Perform a read
* var cursor = db.collection('t').find({});
* cursor.setReadPreference(ReadPreference.PRIMARY);
* cursor.toArray(function(err, docs) {
* test.equal(null, err);
* db.close();
* });
* });
*/
/**
* Creates a new ReadPreference instance
*
* Read Preferences
* - **ReadPreference.PRIMARY**, Read from primary only. All operations produce an error (throw an exception where applicable) if primary is unavailable. Cannot be combined with tags (This is the default.).
* - **ReadPreference.PRIMARY_PREFERRED**, Read from primary if available, otherwise a secondary.
* - **ReadPreference.SECONDARY**, Read from secondary if available, otherwise error.
* - **ReadPreference.SECONDARY_PREFERRED**, Read from a secondary if available, otherwise read from the primary.
* - **ReadPreference.NEAREST**, All modes read from among the nearest candidates, but unlike other modes, NEAREST will include both the primary and all secondaries in the random selection.
*
* @class
* @param {string} mode The ReadPreference mode as listed above.
* @param {object} tags An object representing read preference tags.
* @property {string} mode The ReadPreference mode.
* @property {object} tags The ReadPreference tags.
* @return {ReadPreference} a ReadPreference instance.
*/
var ReadPreference = function(mode, tags) {
if(!(this instanceof ReadPreference))
return new ReadPreference(mode, tags);
this._type = 'ReadPreference';
this.mode = mode;
this.tags = tags;
}
/**
* Validate if a mode is legal
*
* @method
* @param {string} mode The string representing the read preference mode.
* @return {boolean}
*/
ReadPreference.isValid = function(_mode) {
return (_mode == ReadPreference.PRIMARY || _mode == ReadPreference.PRIMARY_PREFERRED
|| _mode == ReadPreference.SECONDARY || _mode == ReadPreference.SECONDARY_PREFERRED
|| _mode == ReadPreference.NEAREST
|| _mode == true || _mode == false || _mode == null);
}
/**
* Validate if a mode is legal
*
* @method
* @param {string} mode The string representing the read preference mode.
* @return {boolean}
*/
ReadPreference.prototype.isValid = function(mode) {
var _mode = typeof mode == 'string' ? mode : this.mode;
return ReadPreference.isValid(_mode);
}
/**
* @ignore
*/
ReadPreference.prototype.toObject = function() {
var object = {mode:this.mode};
if(this.tags != null) {
object['tags'] = this.tags;
}
return object;
}
/**
* @ignore
*/
ReadPreference.PRIMARY = 'primary';
ReadPreference.PRIMARY_PREFERRED = 'primaryPreferred';
ReadPreference.SECONDARY = 'secondary';
ReadPreference.SECONDARY_PREFERRED = 'secondaryPreferred';
ReadPreference.NEAREST = 'nearest'
/**
* @ignore
*/
module.exports = ReadPreference;

527
server/node_modules/mongodb/lib/replset.js generated vendored Executable file
View File

@@ -0,0 +1,527 @@
"use strict";
var EventEmitter = require('events').EventEmitter
, inherits = require('util').inherits
, f = require('util').format
, Server = require('./server')
, Mongos = require('./mongos')
, Cursor = require('./cursor')
, ReadPreference = require('./read_preference')
, MongoCR = require('mongodb-core').MongoCR
, MongoError = require('mongodb-core').MongoError
, ServerCapabilities = require('./topology_base').ServerCapabilities
, Store = require('./topology_base').Store
, CServer = require('mongodb-core').Server
, CReplSet = require('mongodb-core').ReplSet
, CoreReadPreference = require('mongodb-core').ReadPreference
, shallowClone = require('./utils').shallowClone;
/**
* @fileOverview The **ReplSet** class is a class that represents a Replicaset topology and is
* used to construct connections.
*
* **ReplSet Should not be used, use MongoClient.connect**
* @example
* var Db = require('mongodb').Db,
* ReplSet = require('mongodb').ReplSet,
* Server = require('mongodb').Server,
* test = require('assert');
* // Connect using ReplSet
* var server = new Server('localhost', 27017);
* var db = new Db('test', new ReplSet([server]));
* db.open(function(err, db) {
* // Get an additional db
* db.close();
* });
*/
/**
* Creates a new ReplSet instance
* @class
* @deprecated
* @param {Server[]} servers A seedlist of servers participating in the replicaset.
* @param {object} [options=null] Optional settings.
* @param {booelan} [options.ha=true] Turn on high availability monitoring.
* @param {number} [options.haInterval=5000] Time between each replicaset status check.
* @param {string} options.replicaSet The name of the replicaset to connect to.
* @param {number} [options.secondaryAcceptableLatencyMS=15] Sets the range of servers to pick when using NEAREST (lowest ping ms + the latency fence, ex: range of 1 to (1 + 15) ms)
* @param {boolean} [options.connectWithNoPrimary=false] Sets if the driver should connect even if no primary is available
* @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons.
* @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support)
* @param {object} [options.sslValidate=true] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {array} [options.sslCA=null] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslCert=null] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslKey=null] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslPass=null] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {object} [options.socketOptions=null] Socket options
* @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option.
* @param {number} [options.socketOptions.keepAlive=0] TCP KeepAlive on the socket with a X ms delay before start.
* @param {number} [options.socketOptions.connectTimeoutMS=0] TCP Connection timeout setting
* @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting
* @fires ReplSet#connect
* @fires ReplSet#ha
* @fires ReplSet#joined
* @fires ReplSet#left
* @fires ReplSet#fullsetup
* @fires ReplSet#open
* @fires ReplSet#close
* @fires ReplSet#error
* @fires ReplSet#timeout
* @fires ReplSet#parseError
* @return {ReplSet} a ReplSet instance.
*/
var ReplSet = function(servers, options) {
if(!(this instanceof ReplSet)) return new ReplSet(servers, options);
options = options || {};
var self = this;
// Ensure all the instances are Server
for(var i = 0; i < servers.length; i++) {
if(!(servers[i] instanceof Server)) {
throw new MongoError("all seed list instances must be of the Server type");
}
}
// Store option defaults
var storeOptions = {
force: false
, bufferMaxEntries: -1
}
// Shared global store
var store = options.store || new Store(self, storeOptions);
// Set up event emitter
EventEmitter.call(this);
// Debug tag
var tag = options.tag;
// Build seed list
var seedlist = servers.map(function(x) {
return {host: x.host, port: x.port}
});
// Final options
var finalOptions = shallowClone(options);
// Default values
finalOptions.size = typeof options.poolSize == 'number' ? options.poolSize : 5;
finalOptions.reconnect = typeof options.auto_reconnect == 'boolean' ? options.auto_reconnect : true;
finalOptions.emitError = typeof options.emitError == 'boolean' ? options.emitError : true;
finalOptions.cursorFactory = Cursor;
// Add the store
finalOptions.disconnectHandler = store;
// Socket options passed down
if(options.socketOptions) {
if(options.socketOptions.connectTimeoutMS) {
this.connectTimeoutMS = options.socketOptions.connectTimeoutMS;
finalOptions.connectionTimeout = options.socketOptions.connectTimeoutMS;
}
if(options.socketOptions.socketTimeoutMS) {
finalOptions.socketTimeout = options.socketOptions.socketTimeoutMS;
}
}
// Get the name
var replicaSet = options.replicaSet || options.rs_name;
// Set up options
finalOptions.setName = replicaSet;
// Are we running in debug mode
var debug = typeof options.debug == 'boolean' ? options.debug : false;
if(debug) {
finalOptions.debug = debug;
}
// Map keep alive setting
if(options.socketOptions && typeof options.socketOptions.keepAlive == 'number') {
finalOptions.keepAlive = true;
if(typeof options.socketOptions.keepAlive == 'number') {
finalOptions.keepAliveInitialDelay = options.socketOptions.keepAlive;
}
}
// Connection timeout
if(options.socketOptions && typeof options.socketOptions.connectionTimeout == 'number') {
finalOptions.connectionTimeout = options.socketOptions.connectionTimeout;
}
// Socket timeout
if(options.socketOptions && typeof options.socketOptions.socketTimeout == 'number') {
finalOptions.socketTimeout = options.socketOptions.socketTimeout;
}
// noDelay
if(options.socketOptions && typeof options.socketOptions.noDelay == 'boolean') {
finalOptions.noDelay = options.socketOptions.noDelay;
}
if(typeof options.secondaryAcceptableLatencyMS == 'number') {
finalOptions.acceptableLatency = options.secondaryAcceptableLatencyMS;
}
if(options.connectWithNoPrimary == true) {
finalOptions.secondaryOnlyConnectionAllowed = true;
}
// Add the non connection store
finalOptions.disconnectHandler = store;
// Translate the options
if(options.sslCA) finalOptions.ca = options.sslCA;
if(typeof options.sslValidate == 'boolean') finalOptions.rejectUnauthorized = options.sslValidate;
if(options.sslKey) finalOptions.key = options.sslKey;
if(options.sslCert) finalOptions.cert = options.sslCert;
if(options.sslPass) finalOptions.passphrase = options.sslPass;
// Create the ReplSet
var replset = new CReplSet(seedlist, finalOptions)
// Server capabilities
var sCapabilities = null;
// Add auth prbufferMaxEntriesoviders
replset.addAuthProvider('mongocr', new MongoCR());
// Listen to reconnect event
replset.on('reconnect', function() {
self.emit('reconnect');
store.execute();
});
// Internal state
this.s = {
// Replicaset
replset: replset
// Server capabilities
, sCapabilities: null
// Debug tag
, tag: options.tag
// Store options
, storeOptions: storeOptions
// Cloned options
, clonedOptions: finalOptions
// Store
, store: store
// Options
, options: options
}
// Debug
if(debug) {
// Last ismaster
Object.defineProperty(this, 'replset', {
enumerable:true, get: function() { return replset; }
});
}
// Last ismaster
Object.defineProperty(this, 'isMasterDoc', {
enumerable:true, get: function() { return replset.lastIsMaster(); }
});
// BSON property
Object.defineProperty(this, 'bson', {
enumerable: true, get: function() {
return replset.bson;
}
});
Object.defineProperty(this, 'haInterval', {
enumerable:true, get: function() { return replset.haInterval; }
});
}
/**
* @ignore
*/
inherits(ReplSet, EventEmitter);
// Ensure the right read Preference object
var translateReadPreference = function(options) {
if(typeof options.readPreference == 'string') {
options.readPreference = new CoreReadPreference(options.readPreference);
} else if(options.readPreference instanceof ReadPreference) {
options.readPreference = new CoreReadPreference(options.readPreference.mode
, options.readPreference.tags);
}
return options;
}
ReplSet.prototype.parserType = function() {
return this.s.replset.parserType();
}
// Connect method
ReplSet.prototype.connect = function(db, _options, callback) {
var self = this;
if('function' === typeof _options) callback = _options, _options = {};
if(_options == null) _options = {};
if(!('function' === typeof callback)) callback = null;
self.s.options = _options;
// Update bufferMaxEntries
self.s.storeOptions.bufferMaxEntries = db.bufferMaxEntries;
// Actual handler
var errorHandler = function(event) {
return function(err) {
if(event != 'error') {
self.emit(event, err);
}
}
}
// Connect handler
var connectHandler = function() {
// Clear out all the current handlers left over
["timeout", "error", "close"].forEach(function(e) {
self.s.replset.removeAllListeners(e);
});
// Set up listeners
self.s.replset.once('timeout', errorHandler('timeout'));
self.s.replset.once('error', errorHandler('error'));
self.s.replset.once('close', errorHandler('close'));
// relay the event
var relay = function(event) {
return function(t, server) {
self.emit(event, t, server);
}
}
// Replset events relay
var replsetRelay = function(event) {
return function(t, server) {
self.emit(event, t, server.lastIsMaster(), server);
}
}
// Relay ha
var relayHa = function(t, state) {
self.emit('ha', t, state);
if(t == 'start') {
self.emit('ha_connect', t, state);
} else if(t == 'end') {
self.emit('ha_ismaster', t, state);
}
}
// Set up serverConfig listeners
self.s.replset.on('joined', replsetRelay('joined'));
self.s.replset.on('left', relay('left'));
self.s.replset.on('ping', relay('ping'));
self.s.replset.on('ha', relayHa);
self.s.replset.on('fullsetup', function(topology) {
self.emit('fullsetup', null, self);
});
self.s.replset.on('all', function(topology) {
self.emit('all', null, self);
});
// Emit open event
self.emit('open', null, self);
// Return correctly
try {
callback(null, self);
} catch(err) {
process.nextTick(function() { throw err; })
}
}
// Error handler
var connectErrorHandler = function(event) {
return function(err) {
['timeout', 'error', 'close'].forEach(function(e) {
self.s.replset.removeListener(e, connectErrorHandler);
});
self.s.replset.removeListener('connect', connectErrorHandler);
// Destroy the replset
self.s.replset.destroy();
// Try to callback
try {
callback(err);
} catch(err) {
if(!self.s.replset.isConnected())
process.nextTick(function() { throw err; })
}
}
}
// Set up listeners
self.s.replset.once('timeout', connectErrorHandler('timeout'));
self.s.replset.once('error', connectErrorHandler('error'));
self.s.replset.once('close', connectErrorHandler('close'));
self.s.replset.once('connect', connectHandler);
// Start connection
self.s.replset.connect(_options);
}
// Server capabilities
ReplSet.prototype.capabilities = function() {
if(this.s.sCapabilities) return this.s.sCapabilities;
this.s.sCapabilities = new ServerCapabilities(this.s.replset.lastIsMaster());
return this.s.sCapabilities;
}
// Command
ReplSet.prototype.command = function(ns, cmd, options, callback) {
options = translateReadPreference(options);
this.s.replset.command(ns, cmd, options, callback);
}
// Insert
ReplSet.prototype.insert = function(ns, ops, options, callback) {
this.s.replset.insert(ns, ops, options, callback);
}
// Update
ReplSet.prototype.update = function(ns, ops, options, callback) {
this.s.replset.update(ns, ops, options, callback);
}
// Remove
ReplSet.prototype.remove = function(ns, ops, options, callback) {
this.s.replset.remove(ns, ops, options, callback);
}
// IsConnected
ReplSet.prototype.isConnected = function() {
return this.s.replset.isConnected();
}
ReplSet.prototype.setBSONParserType = function(type) {
return this.s.replset.setBSONParserType(type);
}
// Insert
ReplSet.prototype.cursor = function(ns, cmd, options) {
options = translateReadPreference(options);
options.disconnectHandler = this.s.store;
return this.s.replset.cursor(ns, cmd, options);
}
ReplSet.prototype.lastIsMaster = function() {
return this.s.replset.lastIsMaster();
}
ReplSet.prototype.close = function(forceClosed) {
var self = this;
this.s.replset.destroy();
// We need to wash out all stored processes
if(forceClosed == true) {
this.s.storeOptions.force = forceClosed;
this.s.store.flush();
}
var events = ['timeout', 'error', 'close', 'joined', 'left'];
events.forEach(function(e) {
self.removeAllListeners(e);
});
}
ReplSet.prototype.auth = function() {
var args = Array.prototype.slice.call(arguments, 0);
this.s.replset.auth.apply(this.s.replset, args);
}
/**
* All raw connections
* @method
* @return {array}
*/
ReplSet.prototype.connections = function() {
return this.s.replset.connections();
}
/**
* A replset connect event, used to verify that the connection is up and running
*
* @event ReplSet#connect
* @type {ReplSet}
*/
/**
* The replset high availability event
*
* @event ReplSet#ha
* @type {function}
* @param {string} type The stage in the high availability event (start|end)
* @param {boolean} data.norepeat This is a repeating high availability process or a single execution only
* @param {number} data.id The id for this high availability request
* @param {object} data.state An object containing the information about the current replicaset
*/
/**
* A server member left the replicaset
*
* @event ReplSet#left
* @type {function}
* @param {string} type The type of member that left (primary|secondary|arbiter)
* @param {Server} server The server object that left
*/
/**
* A server member joined the replicaset
*
* @event ReplSet#joined
* @type {function}
* @param {string} type The type of member that joined (primary|secondary|arbiter)
* @param {Server} server The server object that joined
*/
/**
* ReplSet open event, emitted when replicaset can start processing commands.
*
* @event ReplSet#open
* @type {Replset}
*/
/**
* ReplSet fullsetup event, emitted when all servers in the topology have been connected to.
*
* @event ReplSet#fullsetup
* @type {Replset}
*/
/**
* ReplSet close event
*
* @event ReplSet#close
* @type {object}
*/
/**
* ReplSet error event, emitted if there is an error listener.
*
* @event ReplSet#error
* @type {MongoError}
*/
/**
* ReplSet timeout event
*
* @event ReplSet#timeout
* @type {object}
*/
/**
* ReplSet parseError event
*
* @event ReplSet#parseError
* @type {object}
*/
module.exports = ReplSet;

408
server/node_modules/mongodb/lib/server.js generated vendored Executable file
View File

@@ -0,0 +1,408 @@
"use strict";
var EventEmitter = require('events').EventEmitter
, inherits = require('util').inherits
, CServer = require('mongodb-core').Server
, Cursor = require('./cursor')
, f = require('util').format
, ServerCapabilities = require('./topology_base').ServerCapabilities
, Store = require('./topology_base').Store
, MongoError = require('mongodb-core').MongoError
, shallowClone = require('./utils').shallowClone;
/**
* @fileOverview The **Server** class is a class that represents a single server topology and is
* used to construct connections.
*
* **Server Should not be used, use MongoClient.connect**
* @example
* var Db = require('mongodb').Db,
* Server = require('mongodb').Server,
* test = require('assert');
* // Connect using single Server
* var db = new Db('test', new Server('localhost', 27017););
* db.open(function(err, db) {
* // Get an additional db
* db.close();
* });
*/
/**
* Creates a new Server instance
* @class
* @deprecated
* @param {string} host The host for the server, can be either an IP4, IP6 or domain socket style host.
* @param {number} [port] The server port if IP4.
* @param {object} [options=null] Optional settings.
* @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons.
* @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support)
* @param {object} [options.sslValidate=true] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {array} [options.sslCA=null] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslCert=null] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslKey=null] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {(Buffer|string)} [options.sslPass=null] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher)
* @param {object} [options.socketOptions=null] Socket options
* @param {boolean} [options.socketOptions.autoReconnect=false] Reconnect on error.
* @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option.
* @param {number} [options.socketOptions.keepAlive=0] TCP KeepAlive on the socket with a X ms delay before start.
* @param {number} [options.socketOptions.connectTimeoutMS=0] TCP Connection timeout setting
* @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting
* @param {number} [options.reconnectTries=30] Server attempt to reconnect #times
* @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries
* @fires Server#connect
* @fires Server#close
* @fires Server#error
* @fires Server#timeout
* @fires Server#parseError
* @fires Server#reconnect
* @return {Server} a Server instance.
*/
var Server = function(host, port, options) {
options = options || {};
if(!(this instanceof Server)) return new Server(host, port, options);
EventEmitter.call(this);
var self = this;
// Store option defaults
var storeOptions = {
force: false
, bufferMaxEntries: -1
}
// Shared global store
var store = options.store || new Store(self, storeOptions);
// Detect if we have a socket connection
if(host.indexOf('\/') != -1) {
if(port != null && typeof port == 'object') {
options = port;
port = null;
}
} else if(port == null) {
throw new MongoError('port must be specified');
}
// Clone options
var clonedOptions = shallowClone(options);
clonedOptions.host = host;
clonedOptions.port = port;
// Reconnect
var reconnect = typeof options.auto_reconnect == 'boolean' ? options.auto_reconnect : true;
reconnect = typeof options.autoReconnect == 'boolean' ? options.autoReconnect : reconnect;
var emitError = typeof options.emitError == 'boolean' ? options.emitError : true;
var poolSize = typeof options.poolSize == 'number' ? options.poolSize : 5;
// Socket options passed down
if(options.socketOptions) {
if(options.socketOptions.connectTimeoutMS) {
this.connectTimeoutMS = options.socketOptions.connectTimeoutMS;
clonedOptions.connectionTimeout = options.socketOptions.connectTimeoutMS;
}
if(options.socketOptions.socketTimeoutMS) {
clonedOptions.socketTimeout = options.socketOptions.socketTimeoutMS;
}
if(typeof options.socketOptions.keepAlive == 'number') {
clonedOptions.keepAliveInitialDelay = options.socketOptions.keepAlive;
clonedOptions.keepAlive = true;
}
if(typeof options.socketOptions.noDelay == 'boolean') {
clonedOptions.noDelay = options.socketOptions.noDelay;
}
}
// Add the cursor factory function
clonedOptions.cursorFactory = Cursor;
clonedOptions.reconnect = reconnect;
clonedOptions.emitError = emitError;
clonedOptions.size = poolSize;
// Translate the options
if(clonedOptions.sslCA) clonedOptions.ca = clonedOptions.sslCA;
if(typeof clonedOptions.sslValidate == 'boolean') clonedOptions.rejectUnauthorized = clonedOptions.sslValidate;
if(clonedOptions.sslKey) clonedOptions.key = clonedOptions.sslKey;
if(clonedOptions.sslCert) clonedOptions.cert = clonedOptions.sslCert;
if(clonedOptions.sslPass) clonedOptions.passphrase = clonedOptions.sslPass;
// Add the non connection store
clonedOptions.disconnectHandler = store;
// Create an instance of a server instance from mongodb-core
var server = new CServer(clonedOptions);
// Server capabilities
var sCapabilities = null;
// Define the internal properties
this.s = {
// Create an instance of a server instance from mongodb-core
server: server
// Server capabilities
, sCapabilities: null
// Cloned options
, clonedOptions: clonedOptions
// Reconnect
, reconnect: reconnect
// Emit error
, emitError: emitError
// Pool size
, poolSize: poolSize
// Store Options
, storeOptions: storeOptions
// Store
, store: store
// Host
, host: host
// Port
, port: port
// Options
, options: options
}
// BSON property
Object.defineProperty(this, 'bson', {
enumerable: true, get: function() {
return self.s.server.bson;
}
});
// Last ismaster
Object.defineProperty(this, 'isMasterDoc', {
enumerable:true, get: function() {
return self.s.server.lastIsMaster();
}
});
// Last ismaster
Object.defineProperty(this, 'poolSize', {
enumerable:true, get: function() { return self.s.server.connections().length; }
});
Object.defineProperty(this, 'autoReconnect', {
enumerable:true, get: function() { return self.s.reconnect; }
});
Object.defineProperty(this, 'host', {
enumerable:true, get: function() { return self.s.host; }
});
Object.defineProperty(this, 'port', {
enumerable:true, get: function() { return self.s.port; }
});
}
inherits(Server, EventEmitter);
Server.prototype.parserType = function() {
return this.s.server.parserType();
}
// Connect
Server.prototype.connect = function(db, _options, callback) {
var self = this;
if('function' === typeof _options) callback = _options, _options = {};
if(_options == null) _options = {};
if(!('function' === typeof callback)) callback = null;
self.s.options = _options;
// Update bufferMaxEntries
self.s.storeOptions.bufferMaxEntries = db.bufferMaxEntries;
// Error handler
var connectErrorHandler = function(event) {
return function(err) {
// Remove all event handlers
var events = ['timeout', 'error', 'close'];
events.forEach(function(e) {
self.s.server.removeListener(e, connectHandlers[e]);
});
self.s.server.removeListener('connect', connectErrorHandler);
// Try to callback
try {
callback(err);
} catch(err) {
process.nextTick(function() { throw err; })
}
}
}
// Actual handler
var errorHandler = function(event) {
return function(err) {
if(event != 'error') {
self.emit(event, err);
}
}
}
// Error handler
var reconnectHandler = function(err) {
self.emit('reconnect', self);
self.s.store.execute();
}
// Destroy called on topology, perform cleanup
var destroyHandler = function() {
self.s.store.flush();
}
// Connect handler
var connectHandler = function() {
// Clear out all the current handlers left over
["timeout", "error", "close"].forEach(function(e) {
self.s.server.removeAllListeners(e);
});
// Set up listeners
self.s.server.once('timeout', errorHandler('timeout'));
self.s.server.once('error', errorHandler('error'));
self.s.server.once('close', errorHandler('close'));
// Only called on destroy
self.s.server.once('destroy', destroyHandler);
// Emit open event
self.emit('open', null, self);
// Return correctly
try {
callback(null, self);
} catch(err) {
process.nextTick(function() { throw err; })
}
}
// Set up listeners
var connectHandlers = {
timeout: connectErrorHandler('timeout'),
error: connectErrorHandler('error'),
close: connectErrorHandler('close')
};
// Add the event handlers
self.s.server.once('timeout', connectHandlers.timeout);
self.s.server.once('error', connectHandlers.error);
self.s.server.once('close', connectHandlers.close);
self.s.server.once('connect', connectHandler);
// Reconnect server
self.s.server.on('reconnect', reconnectHandler);
// Start connection
self.s.server.connect(_options);
}
// Server capabilities
Server.prototype.capabilities = function() {
if(this.s.sCapabilities) return this.s.sCapabilities;
this.s.sCapabilities = new ServerCapabilities(this.s.server.lastIsMaster());
return this.s.sCapabilities;
}
// Command
Server.prototype.command = function(ns, cmd, options, callback) {
this.s.server.command(ns, cmd, options, callback);
}
// Insert
Server.prototype.insert = function(ns, ops, options, callback) {
this.s.server.insert(ns, ops, options, callback);
}
// Update
Server.prototype.update = function(ns, ops, options, callback) {
this.s.server.update(ns, ops, options, callback);
}
// Remove
Server.prototype.remove = function(ns, ops, options, callback) {
this.s.server.remove(ns, ops, options, callback);
}
// IsConnected
Server.prototype.isConnected = function() {
return this.s.server.isConnected();
}
// Insert
Server.prototype.cursor = function(ns, cmd, options) {
options.disconnectHandler = this.s.store;
return this.s.server.cursor(ns, cmd, options);
}
Server.prototype.setBSONParserType = function(type) {
return this.s.server.setBSONParserType(type);
}
Server.prototype.lastIsMaster = function() {
return this.s.server.lastIsMaster();
}
Server.prototype.close = function(forceClosed) {
this.s.server.destroy();
// We need to wash out all stored processes
if(forceClosed == true) {
this.s.storeOptions.force = forceClosed;
this.s.store.flush();
}
}
Server.prototype.auth = function() {
var args = Array.prototype.slice.call(arguments, 0);
this.s.server.auth.apply(this.s.server, args);
}
/**
* All raw connections
* @method
* @return {array}
*/
Server.prototype.connections = function() {
return this.s.server.connections();
}
/**
* Server connect event
*
* @event Server#connect
* @type {object}
*/
/**
* Server close event
*
* @event Server#close
* @type {object}
*/
/**
* Server reconnect event
*
* @event Server#reconnect
* @type {object}
*/
/**
* Server error event
*
* @event Server#error
* @type {MongoError}
*/
/**
* Server timeout event
*
* @event Server#timeout
* @type {object}
*/
/**
* Server parseError event
*
* @event Server#parseError
* @type {object}
*/
module.exports = Server;

140
server/node_modules/mongodb/lib/topology_base.js generated vendored Executable file
View File

@@ -0,0 +1,140 @@
"use strict";
var MongoError = require('mongodb-core').MongoError
, f = require('util').format;
// The store of ops
var Store = function(topology, storeOptions) {
var self = this;
var storedOps = [];
storeOptions = storeOptions || {force:false, bufferMaxEntries: -1}
// Internal state
this.s = {
storedOps: storedOps
, storeOptions: storeOptions
, topology: topology
}
Object.defineProperty(this, 'length', {
enumerable:true, get: function() { return self.s.storedOps.length; }
});
}
Store.prototype.add = function(opType, ns, ops, options, callback) {
if(this.s.storeOptions.force) return callback(new MongoError("db closed by application"));
if(this.s.storeOptions.bufferMaxEntries == 0) return callback(new MongoError(f("no connection available for operation and number of stored operation > %s", this.s.storeOptions.bufferMaxEntries)));
if(this.s.storeOptions.bufferMaxEntries > 0 && this.s.storedOps.length > this.s.storeOptions.bufferMaxEntries) {
while(this.s.storedOps.length > 0) {
var op = this.s.storedOps.shift();
op.c(new MongoError(f("no connection available for operation and number of stored operation > %s", this.s.storeOptions.bufferMaxEntries)));
}
return;
}
this.s.storedOps.push({t: opType, n: ns, o: ops, op: options, c: callback})
}
Store.prototype.addObjectAndMethod = function(opType, object, method, params, callback) {
if(this.s.storeOptions.force) return callback(new MongoError("db closed by application"));
if(this.s.storeOptions.bufferMaxEntries == 0) return callback(new MongoError(f("no connection available for operation and number of stored operation > %s", this.s.storeOptions.bufferMaxEntries)));
if(this.s.storeOptions.bufferMaxEntries > 0 && this.s.storedOps.length > this.s.storeOptions.bufferMaxEntries) {
while(this.s.storedOps.length > 0) {
var op = this.s.storedOps.shift();
op.c(new MongoError(f("no connection available for operation and number of stored operation > %s", this.s.storeOptions.bufferMaxEntries)));
}
return;
}
this.s.storedOps.push({t: opType, m: method, o: object, p: params, c: callback})
}
Store.prototype.flush = function() {
while(this.s.storedOps.length > 0) {
this.s.storedOps.shift().c(new MongoError(f("no connection available for operation")));
}
}
Store.prototype.execute = function() {
// Get current ops
var ops = this.s.storedOps;
// Reset the ops
this.s.storedOps = [];
// Execute all the stored ops
while(ops.length > 0) {
var op = ops.shift();
if(op.t == 'cursor') {
op.o[op.m].apply(op.o, op.p);
} else {
this.s.topology[op.t](op.n, op.o, op.op, op.c);
}
}
}
Store.prototype.all = function() {
return this.s.storedOps;
}
// Server capabilities
var ServerCapabilities = function(ismaster) {
var setup_get_property = function(object, name, value) {
Object.defineProperty(object, name, {
enumerable: true
, get: function () { return value; }
});
}
// Capabilities
var aggregationCursor = false;
var writeCommands = false;
var textSearch = false;
var authCommands = false;
var listCollections = false;
var listIndexes = false;
var maxNumberOfDocsInBatch = ismaster.maxWriteBatchSize || 1000;
if(ismaster.minWireVersion >= 0) {
textSearch = true;
}
if(ismaster.maxWireVersion >= 1) {
aggregationCursor = true;
authCommands = true;
}
if(ismaster.maxWireVersion >= 2) {
writeCommands = true;
}
if(ismaster.maxWireVersion >= 3) {
listCollections = true;
listIndexes = true;
}
// If no min or max wire version set to 0
if(ismaster.minWireVersion == null) {
ismaster.minWireVersion = 0;
}
if(ismaster.maxWireVersion == null) {
ismaster.maxWireVersion = 0;
}
// Map up read only parameters
setup_get_property(this, "hasAggregationCursor", aggregationCursor);
setup_get_property(this, "hasWriteCommands", writeCommands);
setup_get_property(this, "hasTextSearch", textSearch);
setup_get_property(this, "hasAuthCommands", authCommands);
setup_get_property(this, "hasListCollectionsCommand", listCollections);
setup_get_property(this, "hasListIndexesCommand", listIndexes);
setup_get_property(this, "minWireVersion", ismaster.minWireVersion);
setup_get_property(this, "maxWireVersion", ismaster.maxWireVersion);
setup_get_property(this, "maxNumberOfDocsInBatch", maxNumberOfDocsInBatch);
}
exports.Store = Store;
exports.ServerCapabilities = ServerCapabilities;

281
server/node_modules/mongodb/lib/url_parser.js generated vendored Executable file
View File

@@ -0,0 +1,281 @@
"use strict";
var ReadPreference = require('./read_preference');
module.exports = function(url, options) {
// Ensure we have a default options object if none set
options = options || {};
// Variables
var connection_part = '';
var auth_part = '';
var query_string_part = '';
var dbName = 'admin';
// Must start with mongodb
if(url.indexOf("mongodb://") != 0)
throw Error("URL must be in the format mongodb://user:pass@host:port/dbname");
// If we have a ? mark cut the query elements off
if(url.indexOf("?") != -1) {
query_string_part = url.substr(url.indexOf("?") + 1);
connection_part = url.substring("mongodb://".length, url.indexOf("?"))
} else {
connection_part = url.substring("mongodb://".length);
}
// Check if we have auth params
if(connection_part.indexOf("@") != -1) {
auth_part = connection_part.split("@")[0];
connection_part = connection_part.split("@")[1];
}
// Check if the connection string has a db
if(connection_part.indexOf(".sock") != -1) {
if(connection_part.indexOf(".sock/") != -1) {
dbName = connection_part.split(".sock/")[1];
connection_part = connection_part.split("/", connection_part.indexOf(".sock") + ".sock".length);
}
} else if(connection_part.indexOf("/") != -1) {
dbName = connection_part.split("/")[1];
connection_part = connection_part.split("/")[0];
}
// Result object
var object = {};
// Pick apart the authentication part of the string
var authPart = auth_part || '';
var auth = authPart.split(':', 2);
// Decode the URI components
auth[0] = decodeURIComponent(auth[0]);
if(auth[1]){
auth[1] = decodeURIComponent(auth[1]);
}
// Add auth to final object if we have 2 elements
if(auth.length == 2) object.auth = {user: auth[0], password: auth[1]};
// Variables used for temporary storage
var hostPart;
var urlOptions;
var servers;
var serverOptions = {socketOptions: {}};
var dbOptions = {read_preference_tags: []};
var replSetServersOptions = {socketOptions: {}};
// Add server options to final object
object.server_options = serverOptions;
object.db_options = dbOptions;
object.rs_options = replSetServersOptions;
object.mongos_options = {};
// Let's check if we are using a domain socket
if(url.match(/\.sock/)) {
// Split out the socket part
var domainSocket = url.substring(
url.indexOf("mongodb://") + "mongodb://".length
, url.lastIndexOf(".sock") + ".sock".length);
// Clean out any auth stuff if any
if(domainSocket.indexOf("@") != -1) domainSocket = domainSocket.split("@")[1];
servers = [{domain_socket: domainSocket}];
} else {
// Split up the db
hostPart = connection_part;
// Parse all server results
servers = hostPart.split(',').map(function(h) {
var _host, _port, ipv6match;
//check if it matches [IPv6]:port, where the port number is optional
if ((ipv6match = /\[([^\]]+)\](?:\:(.+))?/.exec(h))) {
_host = ipv6match[1];
_port = parseInt(ipv6match[2], 10) || 27017;
} else {
//otherwise assume it's IPv4, or plain hostname
var hostPort = h.split(':', 2);
_host = hostPort[0] || 'localhost';
_port = hostPort[1] != null ? parseInt(hostPort[1], 10) : 27017;
// Check for localhost?safe=true style case
if(_host.indexOf("?") != -1) _host = _host.split(/\?/)[0];
}
// Return the mapped object
return {host: _host, port: _port};
});
}
// Get the db name
object.dbName = dbName || 'admin';
// Split up all the options
urlOptions = (query_string_part || '').split(/[&;]/);
// Ugh, we have to figure out which options go to which constructor manually.
urlOptions.forEach(function(opt) {
if(!opt) return;
var splitOpt = opt.split('='), name = splitOpt[0], value = splitOpt[1];
// Options implementations
switch(name) {
case 'slaveOk':
case 'slave_ok':
serverOptions.slave_ok = (value == 'true');
dbOptions.slaveOk = (value == 'true');
break;
case 'maxPoolSize':
case 'poolSize':
serverOptions.poolSize = parseInt(value, 10);
replSetServersOptions.poolSize = parseInt(value, 10);
break;
case 'autoReconnect':
case 'auto_reconnect':
serverOptions.auto_reconnect = (value == 'true');
break;
case 'minPoolSize':
throw new Error("minPoolSize not supported");
case 'maxIdleTimeMS':
throw new Error("maxIdleTimeMS not supported");
case 'waitQueueMultiple':
throw new Error("waitQueueMultiple not supported");
case 'waitQueueTimeoutMS':
throw new Error("waitQueueTimeoutMS not supported");
case 'uuidRepresentation':
throw new Error("uuidRepresentation not supported");
case 'ssl':
if(value == 'prefer') {
serverOptions.ssl = value;
replSetServersOptions.ssl = value;
break;
}
serverOptions.ssl = (value == 'true');
replSetServersOptions.ssl = (value == 'true');
break;
case 'replicaSet':
case 'rs_name':
replSetServersOptions.rs_name = value;
break;
case 'reconnectWait':
replSetServersOptions.reconnectWait = parseInt(value, 10);
break;
case 'retries':
replSetServersOptions.retries = parseInt(value, 10);
break;
case 'readSecondary':
case 'read_secondary':
replSetServersOptions.read_secondary = (value == 'true');
break;
case 'fsync':
dbOptions.fsync = (value == 'true');
break;
case 'journal':
dbOptions.j = (value == 'true');
break;
case 'safe':
dbOptions.safe = (value == 'true');
break;
case 'nativeParser':
case 'native_parser':
dbOptions.native_parser = (value == 'true');
break;
case 'connectTimeoutMS':
serverOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
replSetServersOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
break;
case 'socketTimeoutMS':
serverOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
replSetServersOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
break;
case 'w':
dbOptions.w = parseInt(value, 10);
if(isNaN(dbOptions.w)) dbOptions.w = value;
break;
case 'authSource':
dbOptions.authSource = value;
break;
case 'gssapiServiceName':
dbOptions.gssapiServiceName = value;
break;
case 'authMechanism':
if(value == 'GSSAPI') {
// If no password provided decode only the principal
if(object.auth == null) {
var urlDecodeAuthPart = decodeURIComponent(authPart);
if(urlDecodeAuthPart.indexOf("@") == -1) throw new Error("GSSAPI requires a provided principal");
object.auth = {user: urlDecodeAuthPart, password: null};
} else {
object.auth.user = decodeURIComponent(object.auth.user);
}
} else if(value == 'MONGODB-X509') {
object.auth = {user: decodeURIComponent(authPart)};
}
// Only support GSSAPI or MONGODB-CR for now
if(value != 'GSSAPI'
&& value != 'MONGODB-X509'
&& value != 'MONGODB-CR'
&& value != 'SCRAM-SHA-1'
&& value != 'PLAIN')
throw new Error("only GSSAPI, PLAIN, MONGODB-X509, SCRAM-SHA-1 or MONGODB-CR is supported by authMechanism");
// Authentication mechanism
dbOptions.authMechanism = value;
break;
case 'authMechanismProperties':
// Split up into key, value pairs
var values = value.split(',');
var o = {};
// For each value split into key, value
values.forEach(function(x) {
var v = x.split(':');
o[v[0]] = v[1];
});
// Set all authMechanismProperties
dbOptions.authMechanismProperties = o;
// Set the service name value
if(typeof o.SERVICE_NAME == 'string') dbOptions.gssapiServiceName = o.SERVICE_NAME;
break;
case 'wtimeoutMS':
dbOptions.wtimeout = parseInt(value, 10);
break;
case 'readPreference':
if(!ReadPreference.isValid(value)) throw new Error("readPreference must be either primary/primaryPreferred/secondary/secondaryPreferred/nearest");
dbOptions.read_preference = value;
break;
case 'readPreferenceTags':
// Decode the value
value = decodeURIComponent(value);
// Contains the tag object
var tagObject = {};
if(value == null || value == '') {
dbOptions.read_preference_tags.push(tagObject);
break;
}
// Split up the tags
var tags = value.split(/\,/);
for(var i = 0; i < tags.length; i++) {
var parts = tags[i].trim().split(/\:/);
tagObject[parts[0]] = parts[1];
}
// Set the preferences tags
dbOptions.read_preference_tags.push(tagObject);
break;
default:
break;
}
});
// No tags: should be null (not [])
if(dbOptions.read_preference_tags.length === 0) {
dbOptions.read_preference_tags = null;
}
// Validate if there are an invalid write concern combinations
if((dbOptions.w == -1 || dbOptions.w == 0) && (
dbOptions.journal == true
|| dbOptions.fsync == true
|| dbOptions.safe == true)) throw new Error("w set to -1 or 0 cannot be combined with safe/w/journal/fsync")
// If no read preference set it to primary
if(!dbOptions.read_preference) dbOptions.read_preference = 'primary';
// Add servers to result
object.servers = servers;
// Returned parsed object
return object;
}

233
server/node_modules/mongodb/lib/utils.js generated vendored Executable file
View File

@@ -0,0 +1,233 @@
"use strict";
var MongoError = require('mongodb-core').MongoError
var shallowClone = function(obj) {
var copy = {};
for(var name in obj) copy[name] = obj[name];
return copy;
}
// Set simple property
var getSingleProperty = function(obj, name, value) {
Object.defineProperty(obj, name, {
enumerable:true,
get: function() {
return value
}
});
}
var formatSortValue = exports.formatSortValue = function(sortDirection) {
var value = ("" + sortDirection).toLowerCase();
switch (value) {
case 'ascending':
case 'asc':
case '1':
return 1;
case 'descending':
case 'desc':
case '-1':
return -1;
default:
throw new Error("Illegal sort clause, must be of the form "
+ "[['field1', '(ascending|descending)'], "
+ "['field2', '(ascending|descending)']]");
}
};
var formattedOrderClause = exports.formattedOrderClause = function(sortValue) {
var orderBy = {};
if(sortValue == null) return null;
if (Array.isArray(sortValue)) {
if(sortValue.length === 0) {
return null;
}
for(var i = 0; i < sortValue.length; i++) {
if(sortValue[i].constructor == String) {
orderBy[sortValue[i]] = 1;
} else {
orderBy[sortValue[i][0]] = formatSortValue(sortValue[i][1]);
}
}
} else if(sortValue != null && typeof sortValue == 'object') {
orderBy = sortValue;
} else if (typeof sortValue == 'string') {
orderBy[sortValue] = 1;
} else {
throw new Error("Illegal sort clause, must be of the form " +
"[['field1', '(ascending|descending)'], ['field2', '(ascending|descending)']]");
}
return orderBy;
};
var checkCollectionName = function checkCollectionName (collectionName) {
if('string' !== typeof collectionName) {
throw Error("collection name must be a String");
}
if(!collectionName || collectionName.indexOf('..') != -1) {
throw Error("collection names cannot be empty");
}
if(collectionName.indexOf('$') != -1 &&
collectionName.match(/((^\$cmd)|(oplog\.\$main))/) == null) {
throw Error("collection names must not contain '$'");
}
if(collectionName.match(/^\.|\.$/) != null) {
throw Error("collection names must not start or end with '.'");
}
// Validate that we are not passing 0x00 in the colletion name
if(!!~collectionName.indexOf("\x00")) {
throw new Error("collection names cannot contain a null character");
}
};
var handleCallback = function(callback, err, value1, value2) {
try {
if(callback == null) return;
if(value2) return callback(err, value1, value2);
return callback(err, value1);
} catch(err) {
process.nextTick(function() { throw err; });
return false;
}
return true;
}
/**
* Wrap a Mongo error document in an Error instance
* @ignore
* @api private
*/
var toError = function(error) {
if (error instanceof Error) return error;
var msg = error.err || error.errmsg || error.errMessage || error;
var e = new MongoError(msg);
// Get all object keys
var keys = typeof error == 'object'
? Object.keys(error)
: [];
for(var i = 0; i < keys.length; i++) {
e[keys[i]] = error[keys[i]];
}
return e;
}
/**
* @ignore
*/
var normalizeHintField = function normalizeHintField(hint) {
var finalHint = null;
if(typeof hint == 'string') {
finalHint = hint;
} else if(Array.isArray(hint)) {
finalHint = {};
hint.forEach(function(param) {
finalHint[param] = 1;
});
} else if(hint != null && typeof hint == 'object') {
finalHint = {};
for (var name in hint) {
finalHint[name] = hint[name];
}
}
return finalHint;
};
/**
* Create index name based on field spec
*
* @ignore
* @api private
*/
var parseIndexOptions = function(fieldOrSpec) {
var fieldHash = {};
var indexes = [];
var keys;
// Get all the fields accordingly
if('string' == typeof fieldOrSpec) {
// 'type'
indexes.push(fieldOrSpec + '_' + 1);
fieldHash[fieldOrSpec] = 1;
} else if(Array.isArray(fieldOrSpec)) {
fieldOrSpec.forEach(function(f) {
if('string' == typeof f) {
// [{location:'2d'}, 'type']
indexes.push(f + '_' + 1);
fieldHash[f] = 1;
} else if(Array.isArray(f)) {
// [['location', '2d'],['type', 1]]
indexes.push(f[0] + '_' + (f[1] || 1));
fieldHash[f[0]] = f[1] || 1;
} else if(isObject(f)) {
// [{location:'2d'}, {type:1}]
keys = Object.keys(f);
keys.forEach(function(k) {
indexes.push(k + '_' + f[k]);
fieldHash[k] = f[k];
});
} else {
// undefined (ignore)
}
});
} else if(isObject(fieldOrSpec)) {
// {location:'2d', type:1}
keys = Object.keys(fieldOrSpec);
keys.forEach(function(key) {
indexes.push(key + '_' + fieldOrSpec[key]);
fieldHash[key] = fieldOrSpec[key];
});
}
return {
name: indexes.join("_"), keys: keys, fieldHash: fieldHash
}
}
var isObject = exports.isObject = function (arg) {
return '[object Object]' == toString.call(arg)
}
var debugOptions = function(debugFields, options) {
var finaloptions = {};
debugFields.forEach(function(n) {
finaloptions[n] = options[n];
});
return finaloptions;
}
var decorateCommand = function(command, options, exclude) {
for(var name in options) {
if(exclude[name] == null) command[name] = options[name];
}
return command;
}
exports.shallowClone = shallowClone;
exports.getSingleProperty = getSingleProperty;
exports.checkCollectionName = checkCollectionName;
exports.toError = toError;
exports.formattedOrderClause = formattedOrderClause;
exports.parseIndexOptions = parseIndexOptions;
exports.normalizeHintField = normalizeHintField;
exports.handleCallback = handleCallback;
exports.decorateCommand = decorateCommand;
exports.isObject = isObject;
exports.debugOptions = debugOptions;

View File

@@ -0,0 +1,4 @@
language: node_js
node_js:
- 0.10
- 0.11

View File

@@ -0,0 +1,122 @@
1.1.21 03-26-2015
-----------------
- Updated bson module to 0.3.0 that extracted the c++ parser into bson-ext and made it an optional dependency.
1.1.20 03-24-2015
-----------------
- NODE-395 Socket Not Closing, db.close called before full set finished initalizing leading to server connections in progress not being closed properly.
1.1.19 03-21-2015
-----------------
- Made kerberos module ~0.0 to allow for quicker releases due to io.js of kerberos module.
1.1.18 03-17-2015
-----------------
- Added support for minHeartbeatFrequencyMS on server reconnect according to the SDAM specification.
1.1.17 03-16-2015
-----------------
- NODE-377, fixed issue where tags would correctly be checked on secondary and nearest to filter out eligible server candidates.
1.1.16 03-06-2015
-----------------
- rejectUnauthorized parameter is set to true for ssl certificates by default instead of false.
1.1.15 03-04-2015
-----------------
- Removed check for type in replset pickserver function.
1.1.14 02-26-2015
-----------------
- NODE-374 correctly adding passive secondaries to the list of eligable servers for reads
1.1.13 02-24-2015
-----------------
- NODE-365 mongoDB native node.js driver infinite reconnect attempts (fixed issue around handling of retry attempts)
1.1.12 02-16-2015
-----------------
- Fixed cursor transforms for buffered document reads from cursor.
1.1.11 02-02-2015
-----------------
- Remove the required setName for replicaset connections, if not set it will pick the first setName returned.
1.1.10 31-01-2015
-----------------
- Added tranforms.doc option to cursor to allow for pr. document transformations.
1.1.9 21-01-2015
----------------
- Updated BSON dependency to 0.2.18 to fix issues with io.js and node.
- Updated Kerberos dependency to 0.0.8 to fix issues with io.js and node.
- Don't treat findOne() as a command cursor.
- Refactored out state changes into methods to simplify read the next method.
1.1.8 09-12-2015
----------------
- Stripped out Object.defineProperty for performance reasons
- Applied more performance optimizations.
- properties cursorBatchSize, cursorSkip, cursorLimit are not methods setCursorBatchSize/cursorBatchSize, setCursorSkip/cursorSkip, setCursorLimit/cursorLimit
1.1.7 18-12-2014
----------------
- Use ns variable for getMore commands for command cursors to work properly with cursor version of listCollections and listIndexes.
1.1.6 18-12-2014
----------------
- Server manager fixed to support 2.2.X servers for travis test matrix.
1.1.5 17-12-2014
----------------
- Fall back to errmsg when creating MongoError for command errors
1.1.4 17-12-2014
----------------
- Added transform method support for cursor (initially just for initial query results) to support listCollections/listIndexes in 2.8.
- Fixed variable leak in scram.
- Fixed server manager to deal better with killing processes.
- Bumped bson to 0.2.16.
1.1.3 01-12-2014
----------------
- Fixed error handling issue with nonce generation in mongocr.
- Fixed issues with restarting servers when using ssl.
- Using strict for all classes.
- Cleaned up any escaping global variables.
1.1.2 20-11-2014
----------------
- Correctly encoding UTF8 collection names on wire protocol messages.
- Added emitClose parameter to topology destroy methods to allow users to specify that they wish the topology to emit the close event to any listeners.
1.1.1 14-11-2014
----------------
- Refactored code to use prototype instead of privileged methods.
- Fixed issue with auth where a runtime condition could leave replicaset members without proper authentication.
- Several deopt optimizations for v8 to improve performance and reduce GC pauses.
1.0.5 29-10-2014
----------------
- Fixed issue with wrong namespace being created for command cursors.
1.0.4 24-10-2014
----------------
- switched from using shift for the cursor due to bad slowdown on big batchSizes as shift causes entire array to be copied on each call.
1.0.3 21-10-2014
----------------
- fixed error issuing problem on cursor.next when iterating over a huge dataset with a very small batchSize.
1.0.2 07-10-2014
----------------
- fullsetup is now defined as a primary and secondary being available allowing for all read preferences to be satisfied.
- fixed issue with replset_state logging.
1.0.1 07-10-2014
----------------
- Dependency issue solved
1.0.0 07-10-2014
----------------
- Initial release of mongodb-core

201
server/node_modules/mongodb/node_modules/mongodb-core/LICENSE generated vendored Executable file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,14 @@
NODE = node
NPM = npm
JSDOC = jsdoc
name = all
generate_docs:
cp -R ./docs/history-header.md ./docs/content/meta/release-notes.md
more ./HISTORY.md >> ./docs/content/meta/release-notes.md
hugo -s docs/ -d ../public
$(JSDOC) -c conf.json -t docs/jsdoc-template/ -d ./public/api
cp -R ./public/api/scripts ./public/.
cp -R ./public/api/styles ./public/.
.PHONY: total

View File

@@ -0,0 +1,225 @@
# Description
The MongoDB Core driver is the low level part of the 2.0 or higher MongoDB driver and is meant for library developers not end users. It does not contain any abstractions or helpers outside of the basic management of MongoDB topology connections, CRUD operations and authentication.
## MongoDB Node.JS Core Driver
| what | where |
|---------------|------------------------------------------------|
| documentation | http://mongodb.github.io/node-mongodb-native/ |
| apidoc | http://mongodb.github.io/node-mongodb-native/ |
| source | https://github.com/christkv/mongodb-core |
| mongodb | http://www.mongodb.org/ |
### Blogs of Engineers involved in the driver
- Christian Kvalheim [@christkv](https://twitter.com/christkv) <http://christiankvalheim.com>
### Bugs / Feature Requests
Think youve found a bug? Want to see a new feature in node-mongodb-native? Please open a
case in our issue management tool, JIRA:
- Create an account and login <https://jira.mongodb.org>.
- Navigate to the NODE project <https://jira.mongodb.org/browse/NODE>.
- Click **Create Issue** - Please provide as much information as possible about the issue type and how to reproduce it.
Bug reports in JIRA for all driver projects (i.e. NODE, PYTHON, CSHARP, JAVA) and the
Core Server (i.e. SERVER) project are **public**.
### Questions and Bug Reports
* mailing list: https://groups.google.com/forum/#!forum/node-mongodb-native
* jira: http://jira.mongodb.org/
### Change Log
http://jira.mongodb.org/browse/NODE
# QuickStart
The quick start guide will show you how to set up a simple application using Core driver and MongoDB. It scope is only how to set up the driver and perform the simple crud operations. For more inn depth coverage we encourage reading the tutorials.
## Create the package.json file
Let's create a directory where our application will live. In our case we will put this under our projects directory.
```
mkdir myproject
cd myproject
```
Create a **package.json** using your favorite text editor and fill it in.
```json
{
"name": "myproject",
"version": "1.0.0",
"description": "My first project",
"main": "index.js",
"repository": {
"type": "git",
"url": "git://github.com/christkv/myfirstproject.git"
},
"dependencies": {
"mongodb-core": "~1.0"
},
"author": "Christian Kvalheim",
"license": "Apache 2.0",
"bugs": {
"url": "https://github.com/christkv/myfirstproject/issues"
},
"homepage": "https://github.com/christkv/myfirstproject"
}
```
Save the file and return to the shell or command prompt and use **NPM** to install all the dependencies.
```
npm install
```
You should see **NPM** download a lot of files. Once it's done you'll find all the downloaded packages under the **node_modules** directory.
Booting up a MongoDB Server
---------------------------
Let's boot up a MongoDB server instance. Download the right MongoDB version from [MongoDB](http://www.mongodb.org), open a new shell or command line and ensure the **mongod** command is in the shell or command line path. Now let's create a database directory (in our case under **/data**).
```
mongod --dbpath=/data --port 27017
```
You should see the **mongod** process start up and print some status information.
## Connecting to MongoDB
Let's create a new **app.js** file that we will use to show the basic CRUD operations using the MongoDB driver.
First let's add code to connect to the server. Notice that there is no concept of a database here and we use the topology directly to perform the connection.
```js
var Server = require('mongodb-core').Server
, assert = require('assert');
// Set up server connection
var server = new Server({
host: 'localhost'
, port: 27017
, reconnect: true
, reconnectInterval: 50
});
// Add event listeners
server.on('connect', function(_server) {
console.log('connected');
test.done();
});
server.on('close', function() {
console.log('closed');
});
server.on('reconnect', function() {
console.log('reconnect');
});
// Start connection
server.connect();
```
To connect to a replicaset we would use the `ReplSet` class and for a set of Mongos proxies we use the `Mongos` class. Each topology class offer the same CRUD operations and you operate on the topology directly. Let's look at an example exercising all the different available CRUD operations.
```js
var Server = require('mongodb-core').Server
, assert = require('assert');
// Set up server connection
var server = new Server({
host: 'localhost'
, port: 27017
, reconnect: true
, reconnectInterval: 50
});
// Add event listeners
server.on('connect', function(_server) {
console.log('connected');
// Execute the ismaster command
_server.command('system.$cmd', {ismaster: true}, function(err, result) {
// Perform a document insert
_server.insert('myproject.inserts1', [{a:1}, {a:2}], {
writeConcern: {w:1}, ordered:true
}, function(err, results) {
assert.equal(null, err);
assert.equal(2, results.result.n);
// Perform a document update
_server.update('myproject.inserts1', [{
q: {a: 1}, u: {'$set': {b:1}}
}], {
writeConcern: {w:1}, ordered:true
}, function(err, results) {
assert.equal(null, err);
assert.equal(1, results.result.n);
// Remove a document
_server.remove('myproject.inserts1', [{
q: {a: 1}, limit: 1
}], {
writeConcern: {w:1}, ordered:true
}, function(err, results) {
assert.equal(null, err);
assert.equal(1, results.result.n);
// Get a document
var cursor = _server.cursor('integration_tests.inserts_example4', {
find: 'integration_tests.example4'
, query: {a:1}
});
// Get the first document
cursor.next(function(err, doc) {
assert.equal(null, err);
assert.equal(2, doc.a);
// Execute the ismaster command
_server.command("system.$cmd"
, {ismaster: true}, function(err, result) {
assert.equal(null, err)
_server.destroy();
});
});
});
});
test.done();
});
});
server.on('close', function() {
console.log('closed');
});
server.on('reconnect', function() {
console.log('reconnect');
});
// Start connection
server.connect();
```
The core driver does not contain any helpers or abstractions only the core crud operations. These consist of the following commands.
* `insert`, Insert takes an array of 1 or more documents to be inserted against the topology and allows you to specify a write concern and if you wish to execute the inserts in order or out of order.
* `update`, Update takes an array of 1 or more update commands to be executed against the server topology and also allows you to specify a write concern and if you wish to execute the updates in order or out of order.
* `remove`, Remove takes an array of 1 or more remove commands to be executed against the server topology and also allows you to specify a write concern and if you wish to execute the removes in order or out of order.
* `cursor`, Returns you a cursor for either the 'virtual' `find` command, a command that returns a cursor id or a plain cursor id. Read the cursor tutorial for more inn depth coverage.
* `command`, Executes a command against MongoDB and returns the result.
* `auth`, Authenticates the current topology using a supported authentication scheme.
The Core Driver is a building block for library builders and is not meant for usage by end users as it lacks a lot of features the end user might need such as automatic buffering of operations when a primary is changing in a replicaset or the db and collections abstraction.
## Next steps
The next steps is to get more inn depth information about how the different aspects of the core driver works and how to leverage them to extend the functionality of the cursors. Please view the tutorials for more detailed information.

View File

@@ -0,0 +1,18 @@
Testing setup
=============
Single Server
-------------
mongod --dbpath=./db
Replicaset
----------
mongo --nodb
var x = new ReplSetTest({"useHostName":"false", "nodes" : {node0 : {}, node1 : {}, node2 : {}}})
x.startSet();
var config = x.getReplSetConfig()
x.initiate(config);
Mongos
------
var s = new ShardingTest( "auth1", 1 , 0 , 2 , {rs: true, noChunkSize : true});

View File

@@ -0,0 +1,60 @@
{
"plugins": ["plugins/markdown", "docs/lib/jsdoc/examples_plugin.js"],
"source": {
"include": [
"test/tests/functional/operation_example_tests.js",
"lib/topologies/mongos.js",
"lib/topologies/command_result.js",
"lib/topologies/read_preference.js",
"lib/topologies/replset.js",
"lib/topologies/server.js",
"lib/topologies/session.js",
"lib/topologies/replset_state.js",
"lib/connection/logger.js",
"lib/connection/connection.js",
"lib/cursor.js",
"lib/error.js",
"node_modules/bson/lib/bson/binary.js",
"node_modules/bson/lib/bson/code.js",
"node_modules/bson/lib/bson/db_ref.js",
"node_modules/bson/lib/bson/double.js",
"node_modules/bson/lib/bson/long.js",
"node_modules/bson/lib/bson/objectid.js",
"node_modules/bson/lib/bson/symbol.js",
"node_modules/bson/lib/bson/timestamp.js",
"node_modules/bson/lib/bson/max_key.js",
"node_modules/bson/lib/bson/min_key.js"
]
},
"templates": {
"cleverLinks": true,
"monospaceLinks": true,
"default": {
"outputSourceFiles" : true
},
"applicationName": "Node.js MongoDB Driver API",
"disqus": true,
"googleAnalytics": "UA-29229787-1",
"openGraph": {
"title": "",
"type": "website",
"image": "",
"site_name": "",
"url": ""
},
"meta": {
"title": "",
"description": "",
"keyword": ""
},
"linenums": true
},
"markdown": {
"parser": "gfm",
"hardwrap": true,
"tags": ["examples"]
},
"examples": {
"indent": 4
}
}

View File

@@ -0,0 +1,17 @@
module.exports = {
MongoError: require('./lib/error')
, Server: require('./lib/topologies/server')
, ReplSet: require('./lib/topologies/replset')
, Mongos: require('./lib/topologies/mongos')
, Logger: require('./lib/connection/logger')
, Cursor: require('./lib/cursor')
, ReadPreference: require('./lib/topologies/read_preference')
, BSON: require('bson')
// Raw operations
, Query: require('./lib/connection/commands').Query
// Auth mechanisms
, MongoCR: require('./lib/auth/mongocr')
, X509: require('./lib/auth/x509')
, Plain: require('./lib/auth/plain')
, GSSAPI: require('./lib/auth/gssapi')
}

View File

@@ -0,0 +1,244 @@
"use strict";
var f = require('util').format
, crypto = require('crypto')
, MongoError = require('../error');
var AuthSession = function(db, username, password, options) {
this.db = db;
this.username = username;
this.password = password;
this.options = options;
}
AuthSession.prototype.equal = function(session) {
return session.db == this.db
&& session.username == this.username
&& session.password == this.password;
}
// Kerberos class
var Kerberos = null;
var MongoAuthProcess = null;
// Try to grab the Kerberos class
try {
Kerberos = require('kerberos').Kerberos
// Authentication process for Mongo
MongoAuthProcess = require('kerberos').processes.MongoAuthProcess
} catch(err) {}
/**
* Creates a new GSSAPI authentication mechanism
* @class
* @return {GSSAPI} A cursor instance
*/
var GSSAPI = function() {
this.authStore = [];
}
/**
* Authenticate
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {string} db Name of the database
* @param {string} username Username
* @param {string} password Password
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
GSSAPI.prototype.auth = function(server, pool, db, username, password, options, callback) {
var self = this;
// We don't have the Kerberos library
if(Kerberos == null) return callback(new Error("Kerberos library is not installed"));
var gssapiServiceName = options['gssapiServiceName'] || 'mongodb';
// Get all the connections
var connections = pool.getAll();
// Total connections
var count = connections.length;
if(count == 0) return callback(null, null);
// Valid connections
var numberOfValidConnections = 0;
var credentialsValid = false;
var errorObject = null;
// For each connection we need to authenticate
while(connections.length > 0) {
// Execute MongoCR
var execute = function(connection) {
// Start Auth process for a connection
GSSAPIInitialize(db, username, password, db, gssapiServiceName, server, connection, function(err, r) {
// Adjust count
count = count - 1;
// If we have an error
if(err) {
errorObject = err;
} else if(r.result['$err']) {
errorObject = r.result;
} else if(r.result['errmsg']) {
errorObject = r.result;
} else {
credentialsValid = true;
numberOfValidConnections = numberOfValidConnections + 1;
}
// We have authenticated all connections
if(count == 0 && numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password, options));
// Return correct authentication
callback(null, true);
} else if(count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using mongocr"));
callback(errorObject, false);
}
});
}
// Get the connection
execute(connections.shift());
}
}
//
// Initialize step
var GSSAPIInitialize = function(db, username, password, authdb, gssapiServiceName, server, connection, callback) {
// Create authenticator
var mongo_auth_process = new MongoAuthProcess(connection.host, connection.port, gssapiServiceName);
// Perform initialization
mongo_auth_process.init(username, password, function(err, context) {
if(err) return callback(err, false);
// Perform the first step
mongo_auth_process.transition('', function(err, payload) {
if(err) return callback(err, false);
// Call the next db step
MongoDBGSSAPIFirstStep(mongo_auth_process, payload, db, username, password, authdb, server, connection, callback);
});
});
}
//
// Perform first step against mongodb
var MongoDBGSSAPIFirstStep = function(mongo_auth_process, payload, db, username, password, authdb, server, connection, callback) {
// Build the sasl start command
var command = {
saslStart: 1
, mechanism: 'GSSAPI'
, payload: payload
, autoAuthorize: 1
};
// Execute first sasl step
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
var doc = r.result;
// Execute mongodb transition
mongo_auth_process.transition(r.result.payload, function(err, payload) {
if(err) return callback(err, false);
// MongoDB API Second Step
MongoDBGSSAPISecondStep(mongo_auth_process, payload, doc, db, username, password, authdb, server, connection, callback);
});
});
}
//
// Perform first step against mongodb
var MongoDBGSSAPISecondStep = function(mongo_auth_process, payload, doc, db, username, password, authdb, server, connection, callback) {
// Build Authentication command to send to MongoDB
var command = {
saslContinue: 1
, conversationId: doc.conversationId
, payload: payload
};
// Execute the command
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
var doc = r.result;
// Call next transition for kerberos
mongo_auth_process.transition(doc.payload, function(err, payload) {
if(err) return callback(err, false);
// Call the last and third step
MongoDBGSSAPIThirdStep(mongo_auth_process, payload, doc, db, username, password, authdb, server, connection, callback);
});
});
}
var MongoDBGSSAPIThirdStep = function(mongo_auth_process, payload, doc, db, username, password, authdb, server, connection, callback) {
// Build final command
var command = {
saslContinue: 1
, conversationId: doc.conversationId
, payload: payload
};
// Execute the command
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
mongo_auth_process.transition(null, function(err, payload) {
if(err) return callback(err, null);
callback(null, r);
});
});
}
// Add to store only if it does not exist
var addAuthSession = function(authStore, session) {
var found = false;
for(var i = 0; i < authStore.length; i++) {
if(authStore[i].equal(session)) {
found = true;
break;
}
}
if(!found) authStore.push(session);
}
/**
* Re authenticate pool
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
GSSAPI.prototype.reauthenticate = function(server, pool, callback) {
var count = this.authStore.length;
if(count == 0) return callback(null, null);
// Iterate over all the auth details stored
for(var i = 0; i < this.authStore.length; i++) {
this.auth(server, pool, this.authStore[i].db, this.authStore[i].username, this.authStore[i].password, this.authStore[i].options, function(err, r) {
count = count - 1;
// Done re-authenticating
if(count == 0) {
callback(null, null);
}
});
}
}
/**
* This is a result from a authentication strategy
*
* @callback authResultCallback
* @param {error} error An error object. Set to null if no error present
* @param {boolean} result The result of the authentication process
*/
module.exports = GSSAPI;

View File

@@ -0,0 +1,160 @@
"use strict";
var f = require('util').format
, crypto = require('crypto')
, MongoError = require('../error');
var AuthSession = function(db, username, password) {
this.db = db;
this.username = username;
this.password = password;
}
AuthSession.prototype.equal = function(session) {
return session.db == this.db
&& session.username == this.username
&& session.password == this.password;
}
/**
* Creates a new MongoCR authentication mechanism
* @class
* @return {MongoCR} A cursor instance
*/
var MongoCR = function() {
this.authStore = [];
}
// Add to store only if it does not exist
var addAuthSession = function(authStore, session) {
var found = false;
for(var i = 0; i < authStore.length; i++) {
if(authStore[i].equal(session)) {
found = true;
break;
}
}
if(!found) authStore.push(session);
}
/**
* Authenticate
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {string} db Name of the database
* @param {string} username Username
* @param {string} password Password
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
MongoCR.prototype.auth = function(server, pool, db, username, password, callback) {
var self = this;
// Get all the connections
var connections = pool.getAll();
// Total connections
var count = connections.length;
if(count == 0) return callback(null, null);
// Valid connections
var numberOfValidConnections = 0;
var credentialsValid = false;
var errorObject = null;
// For each connection we need to authenticate
while(connections.length > 0) {
// Execute MongoCR
var executeMongoCR = function(connection) {
// Let's start the process
server.command(f("%s.$cmd", db)
, { getnonce: 1 }
, { connection: connection }, function(err, r) {
var nonce = null;
var key = null;
// Adjust the number of connections left
// Get nonce
if(err == null) {
nonce = r.result.nonce;
// Use node md5 generator
var md5 = crypto.createHash('md5');
// Generate keys used for authentication
md5.update(username + ":mongo:" + password);
var hash_password = md5.digest('hex');
// Final key
md5 = crypto.createHash('md5');
md5.update(nonce + username + hash_password);
key = md5.digest('hex');
}
// Execute command
server.command(f("%s.$cmd", db)
, { authenticate: 1, user: username, nonce: nonce, key:key}
, { connection: connection }, function(err, r) {
count = count - 1;
// If we have an error
if(err) {
errorObject = err;
} else if(r.result['$err']) {
errorObject = r.result;
} else if(r.result['errmsg']) {
errorObject = r.result;
} else {
credentialsValid = true;
numberOfValidConnections = numberOfValidConnections + 1;
}
// We have authenticated all connections
if(count == 0 && numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password));
// Return correct authentication
callback(null, true);
} else if(count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using mongocr"));
callback(errorObject, false);
}
});
});
}
// Get the connection
executeMongoCR(connections.shift());
}
}
/**
* Re authenticate pool
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
MongoCR.prototype.reauthenticate = function(server, pool, callback) {
var count = this.authStore.length;
if(count == 0) return callback(null, null);
// Iterate over all the auth details stored
for(var i = 0; i < this.authStore.length; i++) {
this.auth(server, pool, this.authStore[i].db, this.authStore[i].username, this.authStore[i].password, function(err, r) {
count = count - 1;
// Done re-authenticating
if(count == 0) {
callback(null, null);
}
});
}
}
/**
* This is a result from a authentication strategy
*
* @callback authResultCallback
* @param {error} error An error object. Set to null if no error present
* @param {boolean} result The result of the authentication process
*/
module.exports = MongoCR;

View File

@@ -0,0 +1,150 @@
"use strict";
var f = require('util').format
, crypto = require('crypto')
, Binary = require('bson').Binary
, MongoError = require('../error');
var AuthSession = function(db, username, password) {
this.db = db;
this.username = username;
this.password = password;
}
AuthSession.prototype.equal = function(session) {
return session.db == this.db
&& session.username == this.username
&& session.password == this.password;
}
/**
* Creates a new Plain authentication mechanism
* @class
* @return {Plain} A cursor instance
*/
var Plain = function() {
this.authStore = [];
}
/**
* Authenticate
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {string} db Name of the database
* @param {string} username Username
* @param {string} password Password
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
Plain.prototype.auth = function(server, pool, db, username, password, callback) {
var self = this;
// Get all the connections
var connections = pool.getAll();
// Total connections
var count = connections.length;
if(count == 0) return callback(null, null);
// Valid connections
var numberOfValidConnections = 0;
var credentialsValid = false;
var errorObject = null;
// For each connection we need to authenticate
while(connections.length > 0) {
// Execute MongoCR
var execute = function(connection) {
// Create payload
var payload = new Binary(f("\x00%s\x00%s", username, password));
// Let's start the sasl process
var command = {
saslStart: 1
, mechanism: 'PLAIN'
, payload: payload
, autoAuthorize: 1
};
// Let's start the process
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
// Adjust count
count = count - 1;
// If we have an error
if(err) {
errorObject = err;
} else if(r.result['$err']) {
errorObject = r.result;
} else if(r.result['errmsg']) {
errorObject = r.result;
} else {
credentialsValid = true;
numberOfValidConnections = numberOfValidConnections + 1;
}
// We have authenticated all connections
if(count == 0 && numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password));
// Return correct authentication
callback(null, true);
} else if(count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using mongocr"));
callback(errorObject, false);
}
});
}
// Get the connection
execute(connections.shift());
}
}
// Add to store only if it does not exist
var addAuthSession = function(authStore, session) {
var found = false;
for(var i = 0; i < authStore.length; i++) {
if(authStore[i].equal(session)) {
found = true;
break;
}
}
if(!found) authStore.push(session);
}
/**
* Re authenticate pool
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
Plain.prototype.reauthenticate = function(server, pool, callback) {
var count = this.authStore.length;
if(count == 0) return callback(null, null);
// Iterate over all the auth details stored
for(var i = 0; i < this.authStore.length; i++) {
this.auth(server, pool, this.authStore[i].db, this.authStore[i].username, this.authStore[i].password, function(err, r) {
count = count - 1;
// Done re-authenticating
if(count == 0) {
callback(null, null);
}
});
}
}
/**
* This is a result from a authentication strategy
*
* @callback authResultCallback
* @param {error} error An error object. Set to null if no error present
* @param {boolean} result The result of the authentication process
*/
module.exports = Plain;

View File

@@ -0,0 +1,317 @@
"use strict";
var f = require('util').format
, crypto = require('crypto')
, Binary = require('bson').Binary
, MongoError = require('../error');
var AuthSession = function(db, username, password) {
this.db = db;
this.username = username;
this.password = password;
}
AuthSession.prototype.equal = function(session) {
return session.db == this.db
&& session.username == this.username
&& session.password == this.password;
}
/**
* Creates a new ScramSHA1 authentication mechanism
* @class
* @return {ScramSHA1} A cursor instance
*/
var ScramSHA1 = function() {
this.authStore = [];
}
var parsePayload = function(payload) {
var dict = {};
var parts = payload.split(',');
for(var i = 0; i < parts.length; i++) {
var valueParts = parts[i].split('=');
dict[valueParts[0]] = valueParts[1];
}
return dict;
}
var passwordDigest = function(username, password) {
if(typeof username != 'string') throw new MongoError("username must be a string");
if(typeof password != 'string') throw new MongoError("password must be a string");
if(password.length == 0) throw new MongoError("password cannot be empty");
// Use node md5 generator
var md5 = crypto.createHash('md5');
// Generate keys used for authentication
md5.update(username + ":mongo:" + password);
return md5.digest('hex');
}
// XOR two buffers
var xor = function(a, b) {
if (!Buffer.isBuffer(a)) a = new Buffer(a)
if (!Buffer.isBuffer(b)) b = new Buffer(b)
var res = []
if (a.length > b.length) {
for (var i = 0; i < b.length; i++) {
res.push(a[i] ^ b[i])
}
} else {
for (var i = 0; i < a.length; i++) {
res.push(a[i] ^ b[i])
}
}
return new Buffer(res);
}
// Create a final digest
var hi = function(data, salt, iterations) {
// Create digest
var digest = function(msg) {
var hmac = crypto.createHmac('sha1', data);
hmac.update(msg);
var result = hmac.digest()
return result;
}
// Create variables
salt = Buffer.concat([salt, new Buffer('\x00\x00\x00\x01')])
var ui = digest(salt);
var u1 = ui;
for(var i = 0; i < iterations - 1; i++) {
u1 = digest(u1);
ui = xor(ui, u1);
}
return ui;
}
/**
* Authenticate
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {string} db Name of the database
* @param {string} username Username
* @param {string} password Password
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
ScramSHA1.prototype.auth = function(server, pool, db, username, password, callback) {
var self = this;
// Get all the connections
var connections = pool.getAll();
// Total connections
var count = connections.length;
if(count == 0) return callback(null, null);
// Valid connections
var numberOfValidConnections = 0;
var credentialsValid = false;
var errorObject = null;
// For each connection we need to authenticate
while(connections.length > 0) {
// Execute MongoCR
var executeScram = function(connection) {
// Clean up the user
username = username.replace('=', "=3D").replace(',', '=2C');
// Create a random nonce
var nonce = crypto.randomBytes(24).toString('base64');
// var nonce = 'MsQUY9iw0T9fx2MUEz6LZPwGuhVvWAhc'
var firstBare = f("n=%s,r=%s", username, nonce);
// Build command structure
var cmd = {
saslStart: 1
, mechanism: 'SCRAM-SHA-1'
, payload: new Binary(f("n,,%s", firstBare))
, autoAuthorize: 1
}
// Handle the error
var handleError = function(err, r) {
if(err) {
errorObject = err; return false;
} else if(r.result['$err']) {
errorObject = r.result; return false;
} else if(r.result['errmsg']) {
errorObject = r.result; return false;
} else {
credentialsValid = true;
numberOfValidConnections = numberOfValidConnections + 1;
}
return true
}
// Finish up
var finish = function(_count, _numberOfValidConnections) {
if(_count == 0 && _numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password));
// Return correct authentication
return callback(null, true);
} else if(_count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using scram"));
return callback(errorObject, false);
}
}
var handleEnd = function(_err, _r) {
// Handle any error
handleError(_err, _r)
// Adjust the number of connections
count = count - 1;
// Execute the finish
finish(count, numberOfValidConnections);
}
// Execute start sasl command
server.command(f("%s.$cmd", db)
, cmd, { connection: connection }, function(err, r) {
// Do we have an error, handle it
if(handleError(err, r) == false) {
count = count - 1;
if(count == 0 && numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password));
// Return correct authentication
return callback(null, true);
} else if(count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using scram"));
return callback(errorObject, false);
}
return;
}
// Get the dictionary
var dict = parsePayload(r.result.payload.value())
// Unpack dictionary
var iterations = parseInt(dict.i, 10);
var salt = dict.s;
var rnonce = dict.r;
// Set up start of proof
var withoutProof = f("c=biws,r=%s", rnonce);
var passwordDig = passwordDigest(username, password);
var saltedPassword = hi(passwordDig
, new Buffer(salt, 'base64')
, iterations);
// Create the client key
var hmac = crypto.createHmac('sha1', saltedPassword);
hmac.update(new Buffer("Client Key"));
var clientKey = hmac.digest();
// Create the stored key
var hash = crypto.createHash('sha1');
hash.update(clientKey);
var storedKey = hash.digest();
// Create the authentication message
var authMsg = [firstBare, r.result.payload.value().toString('base64'), withoutProof].join(',');
// Create client signature
var hmac = crypto.createHmac('sha1', storedKey);
hmac.update(new Buffer(authMsg));
var clientSig = hmac.digest();
// Create client proof
var clientProof = f("p=%s", new Buffer(xor(clientKey, clientSig)).toString('base64'));
// Create client final
var clientFinal = [withoutProof, clientProof].join(',');
// Generate server key
var hmac = crypto.createHmac('sha1', saltedPassword);
hmac.update(new Buffer('Server Key'))
var serverKey = hmac.digest();
// Generate server signature
var hmac = crypto.createHmac('sha1', serverKey);
hmac.update(new Buffer(authMsg))
var serverSig = hmac.digest();
//
// Create continue message
var cmd = {
saslContinue: 1
, conversationId: r.result.conversationId
, payload: new Binary(new Buffer(clientFinal))
}
//
// Execute sasl continue
server.command(f("%s.$cmd", db)
, cmd, { connection: connection }, function(err, r) {
if(r && r.result.done == false) {
var cmd = {
saslContinue: 1
, conversationId: r.result.conversationId
, payload: new Buffer(0)
}
server.command(f("%s.$cmd", db)
, cmd, { connection: connection }, function(err, r) {
handleEnd(err, r);
});
} else {
handleEnd(err, r);
}
});
});
}
// Get the connection
executeScram(connections.shift());
}
}
// Add to store only if it does not exist
var addAuthSession = function(authStore, session) {
var found = false;
for(var i = 0; i < authStore.length; i++) {
if(authStore[i].equal(session)) {
found = true;
break;
}
}
if(!found) authStore.push(session);
}
/**
* Re authenticate pool
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
ScramSHA1.prototype.reauthenticate = function(server, pool, callback) {
var count = this.authStore.length;
if(count == 0) return callback(null, null);
// Iterate over all the auth details stored
for(var i = 0; i < this.authStore.length; i++) {
this.auth(server, pool, this.authStore[i].db, this.authStore[i].username, this.authStore[i].password, function(err, r) {
count = count - 1;
// Done re-authenticating
if(count == 0) {
callback(null, null);
}
});
}
}
module.exports = ScramSHA1;

View File

@@ -0,0 +1,234 @@
"use strict";
var f = require('util').format
, crypto = require('crypto')
, MongoError = require('../error');
var AuthSession = function(db, username, password, options) {
this.db = db;
this.username = username;
this.password = password;
this.options = options;
}
AuthSession.prototype.equal = function(session) {
return session.db == this.db
&& session.username == this.username
&& session.password == this.password;
}
// Kerberos class
var Kerberos = null;
var MongoAuthProcess = null;
// Try to grab the Kerberos class
try {
Kerberos = require('kerberos').Kerberos
// Authentication process for Mongo
MongoAuthProcess = require('kerberos').processes.MongoAuthProcess
} catch(err) {}
/**
* Creates a new SSPI authentication mechanism
* @class
* @return {SSPI} A cursor instance
*/
var SSPI = function() {
this.authStore = [];
}
/**
* Authenticate
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {string} db Name of the database
* @param {string} username Username
* @param {string} password Password
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
SSPI.prototype.auth = function(server, pool, db, username, password, options, callback) {
var self = this;
// We don't have the Kerberos library
if(Kerberos == null) return callback(new Error("Kerberos library is not installed"));
var gssapiServiceName = options['gssapiServiceName'] || 'mongodb';
// Get all the connections
var connections = pool.getAll();
// Total connections
var count = connections.length;
if(count == 0) return callback(null, null);
// Valid connections
var numberOfValidConnections = 0;
var credentialsValid = false;
var errorObject = null;
// For each connection we need to authenticate
while(connections.length > 0) {
// Execute MongoCR
var execute = function(connection) {
// Start Auth process for a connection
SSIPAuthenticate(username, password, gssapiServiceName, server, connection, function(err, r) {
// Adjust count
count = count - 1;
// If we have an error
if(err) {
errorObject = err;
} else if(r && typeof r == 'object' && r.result['$err']) {
errorObject = r.result;
} else if(r && typeof r == 'object' && r.result['errmsg']) {
errorObject = r.result;
} else {
credentialsValid = true;
numberOfValidConnections = numberOfValidConnections + 1;
}
// We have authenticated all connections
if(count == 0 && numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password, options));
// Return correct authentication
callback(null, true);
} else if(count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using mongocr"));
callback(errorObject, false);
}
});
}
// Get the connection
execute(connections.shift());
}
}
var SSIPAuthenticate = function(username, password, gssapiServiceName, server, connection, callback) {
// Build Authentication command to send to MongoDB
var command = {
saslStart: 1
, mechanism: 'GSSAPI'
, payload: ''
, autoAuthorize: 1
};
// Create authenticator
var mongo_auth_process = new MongoAuthProcess(connection.host, connection.port, gssapiServiceName);
// Execute first sasl step
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
var doc = r.result;
mongo_auth_process.init(username, password, function(err) {
if(err) return callback(err);
mongo_auth_process.transition(doc.payload, function(err, payload) {
if(err) return callback(err);
// Perform the next step against mongod
var command = {
saslContinue: 1
, conversationId: doc.conversationId
, payload: payload
};
// Execute the command
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
var doc = r.result;
mongo_auth_process.transition(doc.payload, function(err, payload) {
if(err) return callback(err);
// Perform the next step against mongod
var command = {
saslContinue: 1
, conversationId: doc.conversationId
, payload: payload
};
// Execute the command
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
var doc = r.result;
mongo_auth_process.transition(doc.payload, function(err, payload) {
// Perform the next step against mongod
var command = {
saslContinue: 1
, conversationId: doc.conversationId
, payload: payload
};
// Execute the command
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
if(err) return callback(err, false);
var doc = r.result;
if(doc.done) return callback(null, true);
callback(new Error("Authentication failed"), false);
});
});
});
});
});
});
});
});
}
// Add to store only if it does not exist
var addAuthSession = function(authStore, session) {
var found = false;
for(var i = 0; i < authStore.length; i++) {
if(authStore[i].equal(session)) {
found = true;
break;
}
}
if(!found) authStore.push(session);
}
/**
* Re authenticate pool
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
SSPI.prototype.reauthenticate = function(server, pool, callback) {
var count = this.authStore.length;
if(count == 0) return callback(null, null);
// Iterate over all the auth details stored
for(var i = 0; i < this.authStore.length; i++) {
this.auth(server, pool, this.authStore[i].db, this.authStore[i].username, this.authStore[i].password, this.authStore[i].options, function(err, r) {
count = count - 1;
// Done re-authenticating
if(count == 0) {
callback(null, null);
}
});
}
}
/**
* This is a result from a authentication strategy
*
* @callback authResultCallback
* @param {error} error An error object. Set to null if no error present
* @param {boolean} result The result of the authentication process
*/
module.exports = SSPI;

View File

@@ -0,0 +1,145 @@
"use strict";
var f = require('util').format
, crypto = require('crypto')
, MongoError = require('../error');
var AuthSession = function(db, username, password) {
this.db = db;
this.username = username;
this.password = password;
}
AuthSession.prototype.equal = function(session) {
return session.db == this.db
&& session.username == this.username
&& session.password == this.password;
}
/**
* Creates a new X509 authentication mechanism
* @class
* @return {X509} A cursor instance
*/
var X509 = function() {
this.authStore = [];
}
/**
* Authenticate
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {string} db Name of the database
* @param {string} username Username
* @param {string} password Password
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
X509.prototype.auth = function(server, pool, db, username, password, callback) {
var self = this;
// Get all the connections
var connections = pool.getAll();
// Total connections
var count = connections.length;
if(count == 0) return callback(null, null);
// Valid connections
var numberOfValidConnections = 0;
var credentialsValid = false;
var errorObject = null;
// For each connection we need to authenticate
while(connections.length > 0) {
// Execute MongoCR
var execute = function(connection) {
// Let's start the sasl process
var command = {
authenticate: 1
, mechanism: 'MONGODB-X509'
, user: username
};
// Let's start the process
server.command("$external.$cmd"
, command
, { connection: connection }, function(err, r) {
// Adjust count
count = count - 1;
// If we have an error
if(err) {
errorObject = err;
} else if(r.result['$err']) {
errorObject = r.result;
} else if(r.result['errmsg']) {
errorObject = r.result;
} else {
credentialsValid = true;
numberOfValidConnections = numberOfValidConnections + 1;
}
// We have authenticated all connections
if(count == 0 && numberOfValidConnections > 0) {
// Store the auth details
addAuthSession(self.authStore, new AuthSession(db, username, password));
// Return correct authentication
callback(null, true);
} else if(count == 0) {
if(errorObject == null) errorObject = new MongoError(f("failed to authenticate using mongocr"));
callback(errorObject, false);
}
});
}
// Get the connection
execute(connections.shift());
}
}
// Add to store only if it does not exist
var addAuthSession = function(authStore, session) {
var found = false;
for(var i = 0; i < authStore.length; i++) {
if(authStore[i].equal(session)) {
found = true;
break;
}
}
if(!found) authStore.push(session);
}
/**
* Re authenticate pool
* @method
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
* @param {Pool} pool Connection pool for this topology
* @param {authResultCallback} callback The callback to return the result from the authentication
* @return {object}
*/
X509.prototype.reauthenticate = function(server, pool, callback) {
var count = this.authStore.length;
if(count == 0) return callback(null, null);
// Iterate over all the auth details stored
for(var i = 0; i < this.authStore.length; i++) {
this.auth(server, pool, this.authStore[i].db, this.authStore[i].username, this.authStore[i].password, function(err, r) {
count = count - 1;
// Done re-authenticating
if(count == 0) {
callback(null, null);
}
});
}
}
/**
* This is a result from a authentication strategy
*
* @callback authResultCallback
* @param {error} error An error object. Set to null if no error present
* @param {boolean} result The result of the authentication process
*/
module.exports = X509;

View File

@@ -0,0 +1,482 @@
"use strict";
var f = require('util').format
, Long = require('bson').Long
, setProperty = require('./utils').setProperty
, getProperty = require('./utils').getProperty
, getSingleProperty = require('./utils').getSingleProperty;
// Incrementing request id
var _requestId = 0;
// Wire command operation ids
var OP_QUERY = 2004;
var OP_GETMORE = 2005;
var OP_KILL_CURSORS = 2007;
// Query flags
var OPTS_NONE = 0;
var OPTS_TAILABLE_CURSOR = 2;
var OPTS_SLAVE = 4;
var OPTS_OPLOG_REPLAY = 8;
var OPTS_NO_CURSOR_TIMEOUT = 16;
var OPTS_AWAIT_DATA = 32;
var OPTS_EXHAUST = 64;
var OPTS_PARTIAL = 128;
// Response flags
var CURSOR_NOT_FOUND = 0;
var QUERY_FAILURE = 2;
var SHARD_CONFIG_STALE = 4;
var AWAIT_CAPABLE = 8;
/**************************************************************
* QUERY
**************************************************************/
var Query = function(bson, ns, query, options) {
var self = this;
// Basic options needed to be passed in
if(ns == null) throw new Error("ns must be specified for query");
if(query == null) throw new Error("query must be specified for query");
// Validate that we are not passing 0x00 in the colletion name
if(!!~ns.indexOf("\x00")) {
throw new Error("namespace cannot contain a null character");
}
// Basic options
this.bson = bson;
this.ns = ns;
this.query = query;
// Ensure empty options
this.options = options || {};
// Additional options
this.numberToSkip = options.numberToSkip || 0;
this.numberToReturn = options.numberToReturn || 0;
this.returnFieldSelector = options.returnFieldSelector || null;
this.requestId = _requestId++;
// Serialization option
this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions : false;
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : true;
this.batchSize = self.numberToReturn;
// Flags
this.tailable = false;
this.slaveOk = false;
this.oplogReply = false;
this.noCursorTimeout = false;
this.awaitData = false;
this.exhaust = false;
this.partial = false;
}
//
// Assign a new request Id
Query.prototype.incRequestId = function() {
this.requestId = _requestId++;
}
//
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
Query.prototype.toBin = function() {
var self = this;
// Basic length
var length = 4
+ Buffer.byteLength(self.ns)
+ 1 + 4 + 4
+ self.bson.calculateObjectSize(self.query, self.serializeFunctions, true)
+ (4 * 4);
// Additional size for field selection
if(self.returnFieldSelector && Object.keys(self.returnFieldSelector).length > 0) {
length += self.bson.calculateObjectSize(self.returnFieldSelector, self.serializeFunctions, true);
}
// Allocate buffer for message
var _buffer = new Buffer(length);
// Set up the flags
var flags = 0;
if(this.tailable) flags |= OPTS_TAILABLE_CURSOR;
if(this.slaveOk) flags |= OPTS_SLAVE;
if(this.oplogReply) flags |= OPTS_OPLOG_REPLAY;
if(this.noCursorTimeout) flags |= OPTS_NO_CURSOR_TIMEOUT;
if(this.awaitData) flags |= OPTS_AWAIT_DATA;
if(this.exhaust) flags |= OPTS_EXHAUST;
if(this.partial) flags |= OPTS_PARTIAL;
// If batchSize is different to self.numberToReturn
if(self.batchSize != self.numberToReturn) self.numberToReturn = self.batchSize;
// Initial index
var index = 4;
// Write header information requestId
_buffer[index + 3] = (this.requestId >> 24) & 0xff;
_buffer[index + 2] = (this.requestId >> 16) & 0xff;
_buffer[index + 1] = (this.requestId >> 8) & 0xff;
_buffer[index] = (this.requestId) & 0xff;
index = index + 4;
// Write header information responseTo
_buffer[index + 3] = (0 >> 24) & 0xff;
_buffer[index + 2] = (0 >> 16) & 0xff;
_buffer[index + 1] = (0 >> 8) & 0xff;
_buffer[index] = (0) & 0xff;
index = index + 4;
// Write header information OP_QUERY
_buffer[index + 3] = (OP_QUERY >> 24) & 0xff;
_buffer[index + 2] = (OP_QUERY >> 16) & 0xff;
_buffer[index + 1] = (OP_QUERY >> 8) & 0xff;
_buffer[index] = (OP_QUERY) & 0xff;
index = index + 4;
// Write header information flags
_buffer[index + 3] = (flags >> 24) & 0xff;
_buffer[index + 2] = (flags >> 16) & 0xff;
_buffer[index + 1] = (flags >> 8) & 0xff;
_buffer[index] = (flags) & 0xff;
index = index + 4;
// Write collection name
index = index + _buffer.write(this.ns, index, 'utf8') + 1;
_buffer[index - 1] = 0;
// Write header information flags numberToSkip
_buffer[index + 3] = (this.numberToSkip >> 24) & 0xff;
_buffer[index + 2] = (this.numberToSkip >> 16) & 0xff;
_buffer[index + 1] = (this.numberToSkip >> 8) & 0xff;
_buffer[index] = (this.numberToSkip) & 0xff;
index = index + 4;
// Write header information flags numberToReturn
_buffer[index + 3] = (this.numberToReturn >> 24) & 0xff;
_buffer[index + 2] = (this.numberToReturn >> 16) & 0xff;
_buffer[index + 1] = (this.numberToReturn >> 8) & 0xff;
_buffer[index] = (this.numberToReturn) & 0xff;
index = index + 4;
// Serialize query
var queryLength = this.bson.serializeWithBufferAndIndex(this.query
, this.checkKeys
, _buffer, index
, this.serializeFunctions) - index + 1;
// Write header information flags queryLength
_buffer[index + 3] = (queryLength >> 24) & 0xff;
_buffer[index + 2] = (queryLength >> 16) & 0xff;
_buffer[index + 1] = (queryLength >> 8) & 0xff;
_buffer[index] = (queryLength) & 0xff;
index = index + 4;
// Add to the index
index = index - 4 + queryLength;
_buffer[index + 1] = 0x00;
// If we have field selectors
if(this.returnFieldSelector && Object.keys(this.returnFieldSelector).length > 0) {
var fieldSelectorLength = this.bson.serializeWithBufferAndIndex(this.returnFieldSelector
, this.checkKeys
, _buffer
, index
, this.serializeFunctions) - index + 1;
// Write header information flags fieldSelectorLength
_buffer[index + 3] = (fieldSelectorLength >> 24) & 0xff;
_buffer[index + 2] = (fieldSelectorLength >> 16) & 0xff;
_buffer[index + 1] = (fieldSelectorLength >> 8) & 0xff;
_buffer[index] = (fieldSelectorLength) & 0xff;
index = index + 4;
index = index - 4 + fieldSelectorLength;
_buffer[index + 1] = 0x00;
}
// Write total document length
_buffer[3] = (index >> 24) & 0xff;
_buffer[2] = (index >> 16) & 0xff;
_buffer[1] = (index >> 8) & 0xff;
_buffer[0] = (index) & 0xff;
// Return buffer
return _buffer;
}
Query.getRequestId = function() {
return ++_requestId;
}
/**************************************************************
* GETMORE
**************************************************************/
var GetMore = function(bson, ns, cursorId, opts) {
opts = opts || {};
this.numberToReturn = opts.numberToReturn || 0;
this.requestId = _requestId++;
this.bson = bson;
this.ns = ns;
this.cursorId = cursorId;
}
//
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
GetMore.prototype.toBin = function() {
var length = 4 + Buffer.byteLength(this.ns) + 1 + 4 + 8 + (4 * 4);
// Create command buffer
var index = 0;
// Allocate buffer
var _buffer = new Buffer(length);
// Write header information
// index = write32bit(index, _buffer, length);
_buffer[index + 3] = (length >> 24) & 0xff;
_buffer[index + 2] = (length >> 16) & 0xff;
_buffer[index + 1] = (length >> 8) & 0xff;
_buffer[index] = (length) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, requestId);
_buffer[index + 3] = (this.requestId >> 24) & 0xff;
_buffer[index + 2] = (this.requestId >> 16) & 0xff;
_buffer[index + 1] = (this.requestId >> 8) & 0xff;
_buffer[index] = (this.requestId) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, 0);
_buffer[index + 3] = (0 >> 24) & 0xff;
_buffer[index + 2] = (0 >> 16) & 0xff;
_buffer[index + 1] = (0 >> 8) & 0xff;
_buffer[index] = (0) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, OP_GETMORE);
_buffer[index + 3] = (OP_GETMORE >> 24) & 0xff;
_buffer[index + 2] = (OP_GETMORE >> 16) & 0xff;
_buffer[index + 1] = (OP_GETMORE >> 8) & 0xff;
_buffer[index] = (OP_GETMORE) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, 0);
_buffer[index + 3] = (0 >> 24) & 0xff;
_buffer[index + 2] = (0 >> 16) & 0xff;
_buffer[index + 1] = (0 >> 8) & 0xff;
_buffer[index] = (0) & 0xff;
index = index + 4;
// Write collection name
index = index + _buffer.write(this.ns, index, 'utf8') + 1;
_buffer[index - 1] = 0;
// Write batch size
// index = write32bit(index, _buffer, numberToReturn);
_buffer[index + 3] = (this.numberToReturn >> 24) & 0xff;
_buffer[index + 2] = (this.numberToReturn >> 16) & 0xff;
_buffer[index + 1] = (this.numberToReturn >> 8) & 0xff;
_buffer[index] = (this.numberToReturn) & 0xff;
index = index + 4;
// Write cursor id
// index = write32bit(index, _buffer, cursorId.getLowBits());
_buffer[index + 3] = (this.cursorId.getLowBits() >> 24) & 0xff;
_buffer[index + 2] = (this.cursorId.getLowBits() >> 16) & 0xff;
_buffer[index + 1] = (this.cursorId.getLowBits() >> 8) & 0xff;
_buffer[index] = (this.cursorId.getLowBits()) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, cursorId.getHighBits());
_buffer[index + 3] = (this.cursorId.getHighBits() >> 24) & 0xff;
_buffer[index + 2] = (this.cursorId.getHighBits() >> 16) & 0xff;
_buffer[index + 1] = (this.cursorId.getHighBits() >> 8) & 0xff;
_buffer[index] = (this.cursorId.getHighBits()) & 0xff;
index = index + 4;
// Return buffer
return _buffer;
}
/**************************************************************
* KILLCURSOR
**************************************************************/
var KillCursor = function(bson, cursorIds) {
this.requestId = _requestId++;
this.cursorIds = cursorIds;
}
//
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
KillCursor.prototype.toBin = function() {
var length = 4 + 4 + (4 * 4) + (this.cursorIds.length * 8);
// Create command buffer
var index = 0;
var _buffer = new Buffer(length);
// Write header information
// index = write32bit(index, _buffer, length);
_buffer[index + 3] = (length >> 24) & 0xff;
_buffer[index + 2] = (length >> 16) & 0xff;
_buffer[index + 1] = (length >> 8) & 0xff;
_buffer[index] = (length) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, requestId);
_buffer[index + 3] = (this.requestId >> 24) & 0xff;
_buffer[index + 2] = (this.requestId >> 16) & 0xff;
_buffer[index + 1] = (this.requestId >> 8) & 0xff;
_buffer[index] = (this.requestId) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, 0);
_buffer[index + 3] = (0 >> 24) & 0xff;
_buffer[index + 2] = (0 >> 16) & 0xff;
_buffer[index + 1] = (0 >> 8) & 0xff;
_buffer[index] = (0) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, OP_KILL_CURSORS);
_buffer[index + 3] = (OP_KILL_CURSORS >> 24) & 0xff;
_buffer[index + 2] = (OP_KILL_CURSORS >> 16) & 0xff;
_buffer[index + 1] = (OP_KILL_CURSORS >> 8) & 0xff;
_buffer[index] = (OP_KILL_CURSORS) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, 0);
_buffer[index + 3] = (0 >> 24) & 0xff;
_buffer[index + 2] = (0 >> 16) & 0xff;
_buffer[index + 1] = (0 >> 8) & 0xff;
_buffer[index] = (0) & 0xff;
index = index + 4;
// Write batch size
// index = write32bit(index, _buffer, this.cursorIds.length);
_buffer[index + 3] = (this.cursorIds.length >> 24) & 0xff;
_buffer[index + 2] = (this.cursorIds.length >> 16) & 0xff;
_buffer[index + 1] = (this.cursorIds.length >> 8) & 0xff;
_buffer[index] = (this.cursorIds.length) & 0xff;
index = index + 4;
// Write all the cursor ids into the array
for(var i = 0; i < this.cursorIds.length; i++) {
// Write cursor id
// index = write32bit(index, _buffer, cursorIds[i].getLowBits());
_buffer[index + 3] = (this.cursorIds[i].getLowBits() >> 24) & 0xff;
_buffer[index + 2] = (this.cursorIds[i].getLowBits() >> 16) & 0xff;
_buffer[index + 1] = (this.cursorIds[i].getLowBits() >> 8) & 0xff;
_buffer[index] = (this.cursorIds[i].getLowBits()) & 0xff;
index = index + 4;
// index = write32bit(index, _buffer, cursorIds[i].getHighBits());
_buffer[index + 3] = (this.cursorIds[i].getHighBits() >> 24) & 0xff;
_buffer[index + 2] = (this.cursorIds[i].getHighBits() >> 16) & 0xff;
_buffer[index + 1] = (this.cursorIds[i].getHighBits() >> 8) & 0xff;
_buffer[index] = (this.cursorIds[i].getHighBits()) & 0xff;
index = index + 4;
}
// Return buffer
return _buffer;
}
var Response = function(bson, data, opts) {
opts = opts || {promoteLongs: true};
this.parsed = false;
//
// Parse Header
//
this.index = 0;
this.raw = data;
this.data = data;
this.bson = bson;
this.opts = opts;
// Read the message length
this.length = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Fetch the request id for this reply
this.requestId = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Fetch the id of the request that triggered the response
this.responseTo = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Skip op-code field
this.index = this.index + 4;
// Unpack flags
this.responseFlags = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Unpack the cursor
var lowBits = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
var highBits = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Create long object
this.cursorId = new Long(lowBits, highBits);
// Unpack the starting from
this.startingFrom = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Unpack the number of objects returned
this.numberReturned = data[this.index] | data[this.index + 1] << 8 | data[this.index + 2] << 16 | data[this.index + 3] << 24;
this.index = this.index + 4;
// Preallocate document array
this.documents = new Array(this.numberReturned);
// Flag values
this.cursorNotFound = (this.responseFlags & CURSOR_NOT_FOUND) != 0;
this.queryFailure = (this.responseFlags & QUERY_FAILURE) != 0;
this.shardConfigStale = (this.responseFlags & SHARD_CONFIG_STALE) != 0;
this.awaitCapable = (this.responseFlags & AWAIT_CAPABLE) != 0;
this.promoteLongs = typeof opts.promoteLongs == 'boolean' ? opts.promoteLongs : true;
}
Response.prototype.isParsed = function() {
return this.parsed;
}
Response.prototype.parse = function(options) {
// Don't parse again if not needed
if(this.parsed) return;
options = options || {};
// Allow the return of raw documents instead of parsing
var raw = options.raw || false;
//
// Parse Body
//
for(var i = 0; i < this.numberReturned; i++) {
var bsonSize = this.data[this.index] | this.data[this.index + 1] << 8 | this.data[this.index + 2] << 16 | this.data[this.index + 3] << 24;
// Parse options
var _options = {promoteLongs: this.opts.promoteLongs};
// If we have raw results specified slice the return document
if(raw) {
this.documents[i] = this.data.slice(this.index, this.index + bsonSize);
} else {
this.documents[i] = this.bson.deserialize(this.data.slice(this.index, this.index + bsonSize), _options);
}
// Adjust the index
this.index = this.index + bsonSize;
}
// Set parsed
this.parsed = true;
}
module.exports = {
Query: Query
, GetMore: GetMore
, Response: Response
, KillCursor: KillCursor
}

View File

@@ -0,0 +1,454 @@
"use strict";
var inherits = require('util').inherits
, EventEmitter = require('events').EventEmitter
, net = require('net')
, tls = require('tls')
, f = require('util').format
, getSingleProperty = require('./utils').getSingleProperty
, debugOptions = require('./utils').debugOptions
, Response = require('./commands').Response
, MongoError = require('../error')
, Logger = require('./logger');
var _id = 0;
var debugFields = ['host', 'port', 'size', 'keepAlive', 'keepAliveInitialDelay', 'noDelay'
, 'connectionTimeout', 'socketTimeout', 'singleBufferSerializtion', 'ssl', 'ca', 'cert'
, 'rejectUnauthorized', 'promoteLongs'];
/**
* Creates a new Connection instance
* @class
* @param {string} options.host The server host
* @param {number} options.port The server port
* @param {number} [options.size=5] Server connection pool size
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
* @param {number} [options.keepAliveInitialDelay=0] Initial delay before TCP keep alive enabled
* @param {boolean} [options.noDelay=true] TCP Connection no delay
* @param {number} [options.connectionTimeout=0] TCP Connection timeout setting
* @param {number} [options.socketTimeout=0] TCP Socket timeout setting
* @param {boolean} [options.singleBufferSerializtion=true] Serialize into single buffer, trade of peak memory for serialization speed
* @param {boolean} [options.ssl=false] Use SSL for connection
* @param {Buffer} [options.ca] SSL Certificate store binary buffer
* @param {Buffer} [options.cert] SSL Certificate binary buffer
* @param {Buffer} [options.key] SSL Key file binary buffer
* @param {string} [options.passphrase] SSL Certificate pass phrase
* @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates
* @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits
* @fires Connection#connect
* @fires Connection#close
* @fires Connection#error
* @fires Connection#timeout
* @fires Connection#parseError
* @return {Connection} A cursor instance
*/
var Connection = function(options) {
// Add event listener
EventEmitter.call(this);
// Set empty if no options passed
this.options = options || {};
// Identification information
this.id = _id++;
// Logger instance
this.logger = Logger('Connection', options);
// No bson parser passed in
if(!options.bson) throw new Error("must pass in valid bson parser");
// Get bson parser
this.bson = options.bson;
// Grouping tag used for debugging purposes
this.tag = options.tag;
// Message handler
this.messageHandler = options.messageHandler;
// Max BSON message size
this.maxBsonMessageSize = options.maxBsonMessageSize || (1024 * 1024 * 16 * 4);
// Debug information
if(this.logger.isDebug()) this.logger.debug(f('creating connection %s with options [%s]', this.id, JSON.stringify(debugOptions(debugFields, options))));
// Default options
this.port = options.port || 27017;
this.host = options.host || 'localhost';
this.keepAlive = typeof options.keepAlive == 'boolean' ? options.keepAlive : true;
this.keepAliveInitialDelay = options.keepAliveInitialDelay || 0;
this.noDelay = typeof options.noDelay == 'boolean' ? options.noDelay : true;
this.connectionTimeout = options.connectionTimeout || 0;
this.socketTimeout = options.socketTimeout || 0;
// Check if we have a domain socket
this.domainSocket = this.host.indexOf('\/') != -1;
// Serialize commands using function
this.singleBufferSerializtion = typeof options.singleBufferSerializtion == 'boolean' ? options.singleBufferSerializtion : true;
this.serializationFunction = this.singleBufferSerializtion ? 'toBinUnified' : 'toBin';
// SSL options
this.ca = options.ca || null;
this.cert = options.cert || null;
this.key = options.key || null;
this.passphrase = options.passphrase || null;
this.ssl = typeof options.ssl == 'boolean' ? options.ssl : false;
this.rejectUnauthorized = typeof options.rejectUnauthorized == 'boolean' ? options.rejectUnauthorized : true
// If ssl not enabled
if(!this.ssl) this.rejectUnauthorized = false;
// Response options
this.responseOptions = {
promoteLongs: typeof options.promoteLongs == 'boolean' ? options.promoteLongs : true
}
// Flushing
this.flushing = false;
this.queue = [];
// Internal state
this.connection = null;
this.writeStream = null;
}
inherits(Connection, EventEmitter);
//
// Connection handlers
var errorHandler = function(self) {
return function(err) {
// Debug information
if(self.logger.isDebug()) self.logger.debug(f('connection %s for [%s:%s] errored out with [%s]', self.id, self.host, self.port, JSON.stringify(err)));
// Emit the error
if(self.listeners('error').length > 0) self.emit("error", MongoError.create(err), self);
}
}
var timeoutHandler = function(self) {
return function() {
// Debug information
if(self.logger.isDebug()) self.logger.debug(f('connection %s for [%s:%s] timed out', self.id, self.host, self.port));
// Emit timeout error
self.emit("timeout"
, MongoError.create(f("connection %s to %s:%s timed out", self.id, self.host, self.port))
, self);
}
}
var closeHandler = function(self) {
return function(hadError) {
// Debug information
if(self.logger.isDebug()) self.logger.debug(f('connection %s with for [%s:%s] closed', self.id, self.host, self.port));
// Emit close event
if(!hadError) {
self.emit("close"
, MongoError.create(f("connection %s to %s:%s closed", self.id, self.host, self.port))
, self);
}
}
}
var dataHandler = function(self) {
return function(data) {
// Parse until we are done with the data
while(data.length > 0) {
// If we still have bytes to read on the current message
if(self.bytesRead > 0 && self.sizeOfMessage > 0) {
// Calculate the amount of remaining bytes
var remainingBytesToRead = self.sizeOfMessage - self.bytesRead;
// Check if the current chunk contains the rest of the message
if(remainingBytesToRead > data.length) {
// Copy the new data into the exiting buffer (should have been allocated when we know the message size)
data.copy(self.buffer, self.bytesRead);
// Adjust the number of bytes read so it point to the correct index in the buffer
self.bytesRead = self.bytesRead + data.length;
// Reset state of buffer
data = new Buffer(0);
} else {
// Copy the missing part of the data into our current buffer
data.copy(self.buffer, self.bytesRead, 0, remainingBytesToRead);
// Slice the overflow into a new buffer that we will then re-parse
data = data.slice(remainingBytesToRead);
// Emit current complete message
try {
var emitBuffer = self.buffer;
// Reset state of buffer
self.buffer = null;
self.sizeOfMessage = 0;
self.bytesRead = 0;
self.stubBuffer = null;
// Emit the buffer
self.messageHandler(new Response(self.bson, emitBuffer, self.responseOptions), self);
} catch(err) {
var errorObject = {err:"socketHandler", trace:err, bin:self.buffer, parseState:{
sizeOfMessage:self.sizeOfMessage,
bytesRead:self.bytesRead,
stubBuffer:self.stubBuffer}};
// We got a parse Error fire it off then keep going
self.emit("parseError", errorObject, self);
}
}
} else {
// Stub buffer is kept in case we don't get enough bytes to determine the
// size of the message (< 4 bytes)
if(self.stubBuffer != null && self.stubBuffer.length > 0) {
// If we have enough bytes to determine the message size let's do it
if(self.stubBuffer.length + data.length > 4) {
// Prepad the data
var newData = new Buffer(self.stubBuffer.length + data.length);
self.stubBuffer.copy(newData, 0);
data.copy(newData, self.stubBuffer.length);
// Reassign for parsing
data = newData;
// Reset state of buffer
self.buffer = null;
self.sizeOfMessage = 0;
self.bytesRead = 0;
self.stubBuffer = null;
} else {
// Add the the bytes to the stub buffer
var newStubBuffer = new Buffer(self.stubBuffer.length + data.length);
// Copy existing stub buffer
self.stubBuffer.copy(newStubBuffer, 0);
// Copy missing part of the data
data.copy(newStubBuffer, self.stubBuffer.length);
// Exit parsing loop
data = new Buffer(0);
}
} else {
if(data.length > 4) {
// Retrieve the message size
// var sizeOfMessage = data.readUInt32LE(0);
var sizeOfMessage = data[0] | data[1] << 8 | data[2] << 16 | data[3] << 24;
// If we have a negative sizeOfMessage emit error and return
if(sizeOfMessage < 0 || sizeOfMessage > self.maxBsonMessageSize) {
var errorObject = {err:"socketHandler", trace:'', bin:self.buffer, parseState:{
sizeOfMessage: sizeOfMessage,
bytesRead: self.bytesRead,
stubBuffer: self.stubBuffer}};
// We got a parse Error fire it off then keep going
self.emit("parseError", errorObject, self);
return;
}
// Ensure that the size of message is larger than 0 and less than the max allowed
if(sizeOfMessage > 4 && sizeOfMessage < self.maxBsonMessageSize && sizeOfMessage > data.length) {
self.buffer = new Buffer(sizeOfMessage);
// Copy all the data into the buffer
data.copy(self.buffer, 0);
// Update bytes read
self.bytesRead = data.length;
// Update sizeOfMessage
self.sizeOfMessage = sizeOfMessage;
// Ensure stub buffer is null
self.stubBuffer = null;
// Exit parsing loop
data = new Buffer(0);
} else if(sizeOfMessage > 4 && sizeOfMessage < self.maxBsonMessageSize && sizeOfMessage == data.length) {
try {
var emitBuffer = data;
// Reset state of buffer
self.buffer = null;
self.sizeOfMessage = 0;
self.bytesRead = 0;
self.stubBuffer = null;
// Exit parsing loop
data = new Buffer(0);
// Emit the message
self.messageHandler(new Response(self.bson, emitBuffer, self.responseOptions), self);
} catch (err) {
var errorObject = {err:"socketHandler", trace:err, bin:self.buffer, parseState:{
sizeOfMessage:self.sizeOfMessage,
bytesRead:self.bytesRead,
stubBuffer:self.stubBuffer}};
// We got a parse Error fire it off then keep going
self.emit("parseError", errorObject, self);
}
} else if(sizeOfMessage <= 4 || sizeOfMessage > self.maxBsonMessageSize) {
var errorObject = {err:"socketHandler", trace:null, bin:data, parseState:{
sizeOfMessage:sizeOfMessage,
bytesRead:0,
buffer:null,
stubBuffer:null}};
// We got a parse Error fire it off then keep going
self.emit("parseError", errorObject, self);
// Clear out the state of the parser
self.buffer = null;
self.sizeOfMessage = 0;
self.bytesRead = 0;
self.stubBuffer = null;
// Exit parsing loop
data = new Buffer(0);
} else {
var emitBuffer = data.slice(0, sizeOfMessage);
// Reset state of buffer
self.buffer = null;
self.sizeOfMessage = 0;
self.bytesRead = 0;
self.stubBuffer = null;
// Copy rest of message
data = data.slice(sizeOfMessage);
// Emit the message
self.messageHandler(new Response(self.bson, emitBuffer, self.responseOptions), self);
}
} else {
// Create a buffer that contains the space for the non-complete message
self.stubBuffer = new Buffer(data.length)
// Copy the data to the stub buffer
data.copy(self.stubBuffer, 0);
// Exit parsing loop
data = new Buffer(0);
}
}
}
}
}
}
/**
* Connect
* @method
*/
Connection.prototype.connect = function(_options) {
var self = this;
_options = _options || {};
// Check if we are overriding the promoteLongs
if(typeof _options.promoteLongs == 'boolean') {
self.responseOptions.promoteLongs = _options.promoteLongs;
}
// Create new connection instance
self.connection = self.domainSocket
? net.createConnection(self.host)
: net.createConnection(self.port, self.host);
// Set the options for the connection
self.connection.setKeepAlive(self.keepAlive, self.keepAliveInitialDelay);
self.connection.setTimeout(self.connectionTimeout);
self.connection.setNoDelay(self.noDelay);
// If we have ssl enabled
if(self.ssl) {
var sslOptions = {
socket: self.connection
, rejectUnauthorized: self.rejectUnauthorized
}
if(self.ca) sslOptions.ca = self.ca;
if(self.cert) sslOptions.cert = self.cert;
if(self.key) sslOptions.key = self.key;
if(self.passphrase) sslOptions.passphrase = self.passphrase;
// Attempt SSL connection
self.connection = tls.connect(self.port, self.host, sslOptions, function() {
// Error on auth or skip
if(self.connection.authorizationError && self.rejectUnauthorized) {
return self.emit("error", self.connection.authorizationError, self, {ssl:true});
}
// Set socket timeout instead of connection timeout
self.connection.setTimeout(self.socketTimeout);
// We are done emit connect
self.emit('connect', self);
});
} else {
self.connection.on('connect', function() {
// Set socket timeout instead of connection timeout
self.connection.setTimeout(self.socketTimeout);
// Emit connect event
self.emit('connect', self);
});
}
// Add handlers for events
self.connection.once('error', errorHandler(self));
self.connection.once('timeout', timeoutHandler(self));
self.connection.once('close', closeHandler(self));
self.connection.on('data', dataHandler(self));
}
/**
* Destroy connection
* @method
*/
Connection.prototype.destroy = function() {
if(this.connection) this.connection.destroy();
}
/**
* Write to connection
* @method
* @param {Command} command Command to write out need to implement toBin and toBinUnified
*/
Connection.prototype.write = function(buffer) {
// Debug log
if(this.logger.isDebug()) this.logger.debug(f('writing buffer [%s] to %s:%s', buffer.toString('hex'), this.host, this.port));
// Write out the command
this.connection.write(buffer, 'binary');
}
/**
* Return id of connection as a string
* @method
* @return {string}
*/
Connection.prototype.toString = function() {
return "" + this.id;
}
/**
* Return json object of connection
* @method
* @return {object}
*/
Connection.prototype.toJSON = function() {
return {id: this.id, host: this.host, port: this.port};
}
/**
* Is the connection connected
* @method
* @return {boolean}
*/
Connection.prototype.isConnected = function() {
return !this.connection.destroyed && this.connection.writable;
}
/**
* A server connect event, used to verify that the connection is up and running
*
* @event Connection#connect
* @type {Connection}
*/
/**
* The server connection closed, all pool connections closed
*
* @event Connection#close
* @type {Connection}
*/
/**
* The server connection caused an error, all pool connections closed
*
* @event Connection#error
* @type {Connection}
*/
/**
* The server connection timed out, all pool connections closed
*
* @event Connection#timeout
* @type {Connection}
*/
/**
* The driver experienced an invalid message, all pool connections closed
*
* @event Connection#parseError
* @type {Connection}
*/
module.exports = Connection;

View File

@@ -0,0 +1,193 @@
"use strict";
var f = require('util').format
, MongoError = require('../error');
// Filters for classes
var classFilters = {};
var filteredClasses = {};
var level = null;
// Save the process id
var pid = process.pid;
// current logger
var currentLogger = null;
/**
* Creates a new Logger instance
* @class
* @param {string} className The Class name associated with the logging instance
* @param {object} [options=null] Optional settings.
* @param {Function} [options.logger=null] Custom logger function;
* @param {string} [options.loggerLevel=error] Override default global log level.
* @return {Logger} a Logger instance.
*/
var Logger = function(className, options) {
if(!(this instanceof Logger)) return new Logger(className, options);
options = options || {};
// Current reference
var self = this;
this.className = className;
// Current logger
if(currentLogger == null && options.logger) {
currentLogger = options.logger;
} else if(currentLogger == null) {
currentLogger = console.log;
}
// Set level of logging, default is error
if(level == null) {
level = options.loggerLevel || 'error';
}
// Add all class names
if(filteredClasses[this.className] == null) classFilters[this.className] = true;
}
/**
* Log a message at the debug level
* @method
* @param {string} message The message to log
* @param {object} object additional meta data to log
* @return {null}
*/
Logger.prototype.debug = function(message, object) {
if(this.isDebug()
&& classFilters[this.className] && (filteredClasses[this.className] || Object.keys(filteredClasses).length == 0)) {
var dateTime = new Date().getTime();
var msg = f("[%s-%s:%s] %s %s", 'DEBUG', this.className, pid, dateTime, message);
var state = {
type: 'debug', message: message, className: this.className, pid: pid, date: dateTime
};
if(object) state.meta = object;
currentLogger(msg, state);
}
}
/**
* Log a message at the info level
* @method
* @param {string} message The message to log
* @param {object} object additional meta data to log
* @return {null}
*/
Logger.prototype.info = function(message, object) {
if(this.isInfo()
&& classFilters[this.className] && (filteredClasses[this.className] || Object.keys(filteredClasses).length == 0)) {
var dateTime = new Date().getTime();
var msg = f("[%s-%s:%s] %s %s", 'INFO', this.className, pid, dateTime, message);
var state = {
type: 'info', message: message, className: this.className, pid: pid, date: dateTime
};
if(object) state.meta = object;
currentLogger(msg, state);
}
},
/**
* Log a message at the error level
* @method
* @param {string} message The message to log
* @param {object} object additional meta data to log
* @return {null}
*/
Logger.prototype.error = function(message, object) {
if(this.isError()
&& classFilters[this.className] && (filteredClasses[this.className] || Object.keys(filteredClasses).length == 0)) {
var dateTime = new Date().getTime();
var msg = f("[%s-%s:%s] %s %s", 'ERROR', this.className, pid, dateTime, message);
var state = {
type: 'error', message: message, className: this.className, pid: pid, date: dateTime
};
if(object) state.meta = object;
currentLogger(msg, state);
}
},
/**
* Is the logger set at info level
* @method
* @return {boolean}
*/
Logger.prototype.isInfo = function() {
return level == 'info' || level == 'debug';
},
/**
* Is the logger set at error level
* @method
* @return {boolean}
*/
Logger.prototype.isError = function() {
return level == 'error' || level == 'info' || level == 'debug';
},
/**
* Is the logger set at debug level
* @method
* @return {boolean}
*/
Logger.prototype.isDebug = function() {
return level == 'debug';
}
/**
* Resets the logger to default settings, error and no filtered classes
* @method
* @return {null}
*/
Logger.reset = function() {
level = 'error';
filteredClasses = {};
}
/**
* Get the current logger function
* @method
* @return {function}
*/
Logger.currentLogger = function() {
return currentLogger;
}
/**
* Set the current logger function
* @method
* @param {function} logger Logger function.
* @return {null}
*/
Logger.setCurrentLogger = function(logger) {
if(typeof logger != 'function') throw new MongoError("current logger must be a function");
currentLogger = logger;
}
/**
* Set what classes to log.
* @method
* @param {string} type The type of filter (currently only class)
* @param {string[]} values The filters to apply
* @return {null}
*/
Logger.filter = function(type, values) {
if(type == 'class' && Array.isArray(values)) {
filteredClasses = {};
values.forEach(function(x) {
filteredClasses[x] = true;
});
}
}
/**
* Set the current log level
* @method
* @param {string} level Set current log level (debug, info, error)
* @return {null}
*/
Logger.setLevel = function(_level) {
if(_level != 'info' && _level != 'error' && _level != 'debug') throw new Error(f("%s is an illegal logging level", _level));
level = _level;
}
module.exports = Logger;

View File

@@ -0,0 +1,258 @@
"use strict";
var inherits = require('util').inherits
, EventEmitter = require('events').EventEmitter
, Connection = require('./connection')
, Query = require('./commands').Query
, Logger = require('./logger')
, f = require('util').format;
var DISCONNECTED = 'disconnected';
var CONNECTING = 'connecting';
var CONNECTED = 'connected';
var DESTROYED = 'destroyed';
var _id = 0;
/**
* Creates a new Pool instance
* @class
* @param {string} options.host The server host
* @param {number} options.port The server port
* @param {number} [options.size=5] Server connection pool size
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
* @param {number} [options.keepAliveInitialDelay=0] Initial delay before TCP keep alive enabled
* @param {boolean} [options.noDelay=true] TCP Connection no delay
* @param {number} [options.connectionTimeout=0] TCP Connection timeout setting
* @param {number} [options.socketTimeout=0] TCP Socket timeout setting
* @param {boolean} [options.singleBufferSerializtion=true] Serialize into single buffer, trade of peak memory for serialization speed
* @param {boolean} [options.ssl=false] Use SSL for connection
* @param {Buffer} [options.ca] SSL Certificate store binary buffer
* @param {Buffer} [options.cert] SSL Certificate binary buffer
* @param {Buffer} [options.key] SSL Key file binary buffer
* @param {string} [options.passPhrase] SSL Certificate pass phrase
* @param {boolean} [options.rejectUnauthorized=false] Reject unauthorized server certificates
* @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits
* @fires Pool#connect
* @fires Pool#close
* @fires Pool#error
* @fires Pool#timeout
* @fires Pool#parseError
* @return {Pool} A cursor instance
*/
var Pool = function(options) {
var self = this;
// Add event listener
EventEmitter.call(this);
// Set empty if no options passed
this.options = options || {};
this.size = typeof options.size == 'number' ? options.size : 5;
// Message handler
this.messageHandler = options.messageHandler;
// No bson parser passed in
if(!options.bson) throw new Error("must pass in valid bson parser");
// Contains all connections
this.connections = [];
this.state = DISCONNECTED;
// Round robin index
this.index = 0;
this.dead = false;
// Logger instance
this.logger = Logger('Pool', options);
// Pool id
this.id = _id++;
// Grouping tag used for debugging purposes
this.tag = options.tag;
}
inherits(Pool, EventEmitter);
var errorHandler = function(self) {
return function(err, connection) {
if(self.logger.isDebug()) self.logger.debug(f('pool [%s] errored out [%s] with connection [%s]', this.dead, JSON.stringify(err), JSON.stringify(connection)));
if(!self.dead) {
self.state = DISCONNECTED;
self.dead = true;
self.destroy();
self.emit('error', err, self);
}
}
}
var timeoutHandler = function(self) {
return function(err, connection) {
if(self.logger.isDebug()) self.logger.debug(f('pool [%s] timedout out [%s] with connection [%s]', this.dead, JSON.stringify(err), JSON.stringify(connection)));
if(!self.dead) {
self.state = DISCONNECTED;
self.dead = true;
self.destroy();
self.emit('timeout', err, self);
}
}
}
var closeHandler = function(self) {
return function(err, connection) {
if(self.logger.isDebug()) self.logger.debug(f('pool [%s] closed [%s] with connection [%s]', this.dead, JSON.stringify(err), JSON.stringify(connection)));
if(!self.dead) {
self.state = DISCONNECTED;
self.dead = true;
self.destroy();
self.emit('close', err, self);
}
}
}
var parseErrorHandler = function(self) {
return function(err, connection) {
if(self.logger.isDebug()) self.logger.debug(f('pool [%s] errored out [%s] with connection [%s]', this.dead, JSON.stringify(err), JSON.stringify(connection)));
if(!self.dead) {
self.state = DISCONNECTED;
self.dead = true;
self.destroy();
self.emit('parseError', err, self);
}
}
}
var connectHandler = function(self) {
return function(connection) {
self.connections.push(connection);
// We have connected to all servers
if(self.connections.length == self.size) {
self.state = CONNECTED;
// Done connecting
self.emit("connect", self);
}
}
}
/**
* Destroy pool
* @method
*/
Pool.prototype.destroy = function() {
this.state = DESTROYED;
// Set dead
this.dead = true;
// Destroy all the connections
this.connections.forEach(function(c) {
// Destroy all event emitters
["close", "message", "error", "timeout", "parseError", "connect"].forEach(function(e) {
c.removeAllListeners(e);
});
// Destroy the connection
c.destroy();
});
}
/**
* Connect pool
* @method
*/
Pool.prototype.connect = function(_options) {
var self = this;
// Set to connecting
this.state = CONNECTING
// No dead
this.dead = false;
// Connect all sockets
for(var i = 0; i < this.size; i++) {
setTimeout(function() {
self.options.messageHandler = self.messageHandler;
var connection = new Connection(self.options);
// Add all handlers
connection.once('close', closeHandler(self));
connection.once('error', errorHandler(self));
connection.once('timeout', timeoutHandler(self));
connection.once('parseError', parseErrorHandler(self));
connection.on('connect', connectHandler(self));
// Start connection
connection.connect(_options);
}, 100);
}
}
/**
* Get a pool connection (round-robin)
* @method
* @return {Connection}
*/
Pool.prototype.get = function() {
// if(this.dead) return null;
var connection = this.connections[this.index++];
this.index = this.index % this.connections.length;
return connection;
}
/**
* Get all pool connections
* @method
* @return {array}
*/
Pool.prototype.getAll = function() {
return this.connections.slice(0);
}
/**
* Is the pool connected
* @method
* @return {boolean}
*/
Pool.prototype.isConnected = function() {
for(var i = 0; i < this.connections.length; i++) {
if(!this.connections[i].isConnected()) return false;
}
return this.state == CONNECTED;
}
/**
* Was the pool destroyed
* @method
* @return {boolean}
*/
Pool.prototype.isDestroyed = function() {
return this.state == DESTROYED;
}
/**
* A server connect event, used to verify that the connection is up and running
*
* @event Pool#connect
* @type {Pool}
*/
/**
* The server connection closed, all pool connections closed
*
* @event Pool#close
* @type {Pool}
*/
/**
* The server connection caused an error, all pool connections closed
*
* @event Pool#error
* @type {Pool}
*/
/**
* The server connection timed out, all pool connections closed
*
* @event Pool#timeout
* @type {Pool}
*/
/**
* The driver experienced an invalid message, all pool connections closed
*
* @event Pool#parseError
* @type {Pool}
*/
module.exports = Pool;

View File

@@ -0,0 +1,77 @@
"use strict";
// Set property function
var setProperty = function(obj, prop, flag, values) {
Object.defineProperty(obj, prop.name, {
enumerable:true,
set: function(value) {
if(typeof value != 'boolean') throw new Error(f("%s required a boolean", prop.name));
// Flip the bit to 1
if(value == true) values.flags |= flag;
// Flip the bit to 0 if it's set, otherwise ignore
if(value == false && (values.flags & flag) == flag) values.flags ^= flag;
prop.value = value;
}
, get: function() { return prop.value; }
});
}
// Set property function
var getProperty = function(obj, propName, fieldName, values, func) {
Object.defineProperty(obj, propName, {
enumerable:true,
get: function() {
// Not parsed yet, parse it
if(values[fieldName] == null && obj.isParsed && !obj.isParsed()) {
obj.parse();
}
// Do we have a post processing function
if(typeof func == 'function') return func(values[fieldName]);
// Return raw value
return values[fieldName];
}
});
}
// Set simple property
var getSingleProperty = function(obj, name, value) {
Object.defineProperty(obj, name, {
enumerable:true,
get: function() {
return value
}
});
}
// Shallow copy
var copy = function(fObj, tObj) {
tObj = tObj || {};
for(var name in fObj) tObj[name] = fObj[name];
return tObj;
}
var debugOptions = function(debugFields, options) {
var finaloptions = {};
debugFields.forEach(function(n) {
finaloptions[n] = options[n];
});
return finaloptions;
}
/**
* @ignore
*/
var bindToCurrentDomain = function(callback) {
var domain = process.domain;
if(domain == null || callback == null) return callback;
return domain.bind(callback);
}
exports.setProperty = setProperty;
exports.getProperty = getProperty;
exports.getSingleProperty = getSingleProperty;
exports.copy = copy;
exports.bindToCurrentDomain = bindToCurrentDomain;
exports.debugOptions = debugOptions;

View File

@@ -0,0 +1,607 @@
"use strict";
var Long = require('bson').Long
, Logger = require('./connection/logger')
, MongoError = require('./error')
, f = require('util').format;
/**
* This is a cursor results callback
*
* @callback resultCallback
* @param {error} error An error object. Set to null if no error present
* @param {object} document
*/
/**
* @fileOverview The **Cursor** class is an internal class that embodies a cursor on MongoDB
* allowing for iteration over the results returned from the underlying query.
*
* **CURSORS Cannot directly be instantiated**
* @example
* var Server = require('mongodb-core').Server
* , ReadPreference = require('mongodb-core').ReadPreference
* , assert = require('assert');
*
* var server = new Server({host: 'localhost', port: 27017});
* // Wait for the connection event
* server.on('connect', function(server) {
* assert.equal(null, err);
*
* // Execute the write
* var cursor = _server.cursor('integration_tests.inserts_example4', {
* find: 'integration_tests.example4'
* , query: {a:1}
* }, {
* readPreference: new ReadPreference('secondary');
* });
*
* // Get the first document
* cursor.next(function(err, doc) {
* assert.equal(null, err);
* server.destroy();
* });
* });
*
* // Start connecting
* server.connect();
*/
/**
* Creates a new Cursor, not to be used directly
* @class
* @param {object} bson An instance of the BSON parser
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {{object}|Long} cmd The selector (can be a command or a cursorId)
* @param {object} [options=null] Optional settings.
* @param {object} [options.batchSize=1000] Batchsize for the operation
* @param {array} [options.documents=[]] Initial documents list for cursor
* @param {object} [options.transforms=null] Transform methods for the cursor results
* @param {function} [options.transforms.query] Transform the value returned from the initial query
* @param {function} [options.transforms.doc] Transform each document returned from Cursor.prototype.next
* @param {object} topology The server topology instance.
* @param {object} topologyOptions The server topology options.
* @return {Cursor} A cursor instance
* @property {number} cursorBatchSize The current cursorBatchSize for the cursor
* @property {number} cursorLimit The current cursorLimit for the cursor
* @property {number} cursorSkip The current cursorSkip for the cursor
*/
var Cursor = function(bson, ns, cmd, options, topology, topologyOptions) {
options = options || {};
// Cursor reference
var self = this;
// Initial query
var query = null;
// Cursor connection
this.connection = null;
// Cursor server
this.server = null;
// Do we have a not connected handler
this.disconnectHandler = options.disconnectHandler;
// Set local values
this.bson = bson;
this.ns = ns;
this.cmd = cmd;
this.options = options;
this.topology = topology;
// All internal state
this.cursorState = {
cursorId: null
, documents: options.documents || []
, cursorIndex: 0
, dead: false
, killed: false
, init: false
, notified: false
, limit: options.limit || cmd.limit || 0
, skip: options.skip || cmd.skip || 0
, batchSize: options.batchSize || cmd.batchSize || 1000
, currentLimit: 0
// Result field name if not a cursor (contains the array of results)
, transforms: options.transforms
}
// Callback controller
this.callbacks = null;
// Logger
this.logger = Logger('Cursor', options);
//
// Did we pass in a cursor id
if(typeof cmd == 'number') {
this.cursorState.cursorId = Long.fromNumber(cmd);
} else if(cmd instanceof Long) {
this.cursorState.cursorId = cmd;
}
}
Cursor.prototype.setCursorBatchSize = function(value) {
this.cursorState.batchSize = value;
}
Cursor.prototype.cursorBatchSize = function() {
return this.cursorState.batchSize;
}
Cursor.prototype.setCursorLimit = function(value) {
this.cursorState.limit = value;
}
Cursor.prototype.cursorLimit = function() {
return this.cursorState.limit;
}
Cursor.prototype.setCursorSkip = function(value) {
this.cursorState.skip = value;
}
Cursor.prototype.cursorSkip = function() {
return this.cursorState.skip;
}
//
// Execute getMore command
var execGetMore = function(self, callback) {
if(self.logger.isDebug()) self.logger.debug(f("schedule getMore call for query [%s]", JSON.stringify(self.query)))
// Determine if it's a raw query
var raw = self.options.raw || self.cmd.raw;
// We have a wire protocol handler
self.server.wireProtocolHandler.getMore(self.bson, self.ns, self.cursorState, self.cursorState.batchSize, raw, self.connection, self.callbacks, self.options, callback);
}
//
// Execute the first query
var execInitialQuery = function(self, query, cmd, options, cursorState, connection, logger, callbacks, callback) {
if(logger.isDebug()) {
logger.debug(f("issue initial query [%s] with flags [%s]"
, JSON.stringify(cmd)
, JSON.stringify(query)));
}
var queryCallback = function(err, result) {
if(err) return callback(err);
if (result.queryFailure) {
return callback(MongoError.create(result.documents[0]), null);
}
// Check if we have a command cursor
if(Array.isArray(result.documents) && result.documents.length == 1 && !cmd.find) {
if(result.documents[0]['$err']
|| result.documents[0]['errmsg']) {
return callback(MongoError.create(result.documents[0]), null);
}
if(result.documents[0].cursor != null
&& typeof result.documents[0].cursor != 'string') {
var id = result.documents[0].cursor.id;
// If we have a namespace change set the new namespace for getmores
if(result.documents[0].cursor.ns) {
self.ns = result.documents[0].cursor.ns;
}
// Promote id to long if needed
cursorState.cursorId = typeof id == 'number' ? Long.fromNumber(id) : id;
// If we have a firstBatch set it
if(Array.isArray(result.documents[0].cursor.firstBatch)) {
cursorState.documents = result.documents[0].cursor.firstBatch;//.reverse();
}
// Return after processing command cursor
return callback(null, null);
}
if(Array.isArray(result.documents[0].result)) {
cursorState.documents = result.documents[0].result;
cursorState.cursorId = Long.ZERO;
return callback(null, null);
}
}
// Otherwise fall back to regular find path
cursorState.cursorId = result.cursorId;
cursorState.documents = result.documents;
// Transform the results with passed in transformation method if provided
if(cursorState.transforms && typeof cursorState.transforms.query == 'function') {
cursorState.documents = cursorState.transforms.query(result);
}
// Return callback
callback(null, null);
}
// If we have a raw query decorate the function
if(options.raw || cmd.raw) {
queryCallback.raw = options.raw || cmd.raw;
}
// Set up callback
callbacks.register(query.requestId, queryCallback);
// Write the initial command out
connection.write(query.toBin());
}
//
// Handle callback (including any exceptions thrown)
var handleCallback = function(callback, err, result) {
try {
callback(err, result);
} catch(err) {
process.nextTick(function() {
throw err;
});
}
}
/**
* Clone the cursor
* @method
* @return {Cursor}
*/
Cursor.prototype.clone = function() {
return this.topology.cursor(this.ns, this.cmd, this.options);
}
/**
* Checks if the cursor is dead
* @method
* @return {boolean} A boolean signifying if the cursor is dead or not
*/
Cursor.prototype.isDead = function() {
return this.cursorState.dead == true;
}
/**
* Checks if the cursor was killed by the application
* @method
* @return {boolean} A boolean signifying if the cursor was killed by the application
*/
Cursor.prototype.isKilled = function() {
return this.cursorState.killed == true;
}
/**
* Checks if the cursor notified it's caller about it's death
* @method
* @return {boolean} A boolean signifying if the cursor notified the callback
*/
Cursor.prototype.isNotified = function() {
return this.cursorState.notified == true;
}
/**
* Returns current buffered documents length
* @method
* @return {number} The number of items in the buffered documents
*/
Cursor.prototype.bufferedCount = function() {
return this.cursorState.documents.length - this.cursorState.cursorIndex;
}
/**
* Returns current buffered documents
* @method
* @return {Array} An array of buffered documents
*/
Cursor.prototype.readBufferedDocuments = function(number) {
var unreadDocumentsLength = this.cursorState.documents.length - this.cursorState.cursorIndex;
var length = number < unreadDocumentsLength ? number : unreadDocumentsLength;
var elements = this.cursorState.documents.slice(this.cursorState.cursorIndex, this.cursorState.cursorIndex + length);
this.cursorState.currentLimit = this.cursorState.currentLimit + length;
this.cursorState.cursorIndex = this.cursorState.cursorIndex + length;
// Transform the doc with passed in transformation method if provided
if(this.cursorState.transforms && typeof this.cursorState.transforms.doc == 'function') {
// Transform all the elements
for(var i = 0; i < elements.length; i++) {
elements[i] = this.cursorState.transforms.doc(elements[i]);
}
}
// Return elements
return elements;
}
/**
* Kill the cursor
* @method
* @param {resultCallback} callback A callback function
*/
Cursor.prototype.kill = function(callback) {
// Set cursor to dead
this.cursorState.dead = true;
this.cursorState.killed = true;
// Remove documents
this.cursorState.documents = [];
// If no cursor id just return
if(this.cursorState.cursorId == null || this.cursorState.cursorId.isZero() || this.cursorState.init == false) {
if(callback) callback(null, null);
return;
}
// Execute command
this.server.wireProtocolHandler.killCursor(this.bson, this.cursorState.cursorId, this.connection, callback);
}
/**
* Resets the cursor
* @method
* @return {null}
*/
Cursor.prototype.rewind = function() {
if(this.cursorState.init) {
if(!this.cursorState.dead) {
this.kill();
}
this.cursorState.currentLimit = 0;
this.cursorState.init = false;
this.cursorState.dead = false;
this.cursorState.killed = false;
this.cursorState.notified = false;
this.cursorState.documents = [];
this.cursorState.cursorId = null;
this.cursorState.cursorIndex = 0;
}
}
/**
* Validate if the connection is dead and return error
*/
var isConnectionDead = function(self, callback) {
if(self.connection
&& !self.connection.isConnected()) {
self.cursorState.notified = true;
self.cursorState.killed = true;
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
callback(MongoError.create(f('connection to host %s:%s was destroyed', self.connection.host, self.connection.port)))
return true;
}
return false;
}
/**
* Validate if the cursor is dead but was not explicitly killed by user
*/
var isCursorDeadButNotkilled = function(self, callback) {
// Cursor is dead but not marked killed, return null
if(self.cursorState.dead && !self.cursorState.killed) {
self.cursorState.notified = true;
self.cursorState.killed = true;
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
handleCallback(callback, null, null);
return true;
}
return false;
}
/**
* Validate if the cursor is dead and was killed by user
*/
var isCursorDeadAndKilled = function(self, callback) {
if(self.cursorState.dead && self.cursorState.killed) {
handleCallback(callback, MongoError.create("cursor is dead"));
return true;
}
return false;
}
/**
* Validate if the cursor was killed by the user
*/
var isCursorKilled = function(self, callback) {
if(self.cursorState.killed) {
self.cursorState.notified = true;
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
handleCallback(callback, null, null);
return true;
}
return false;
}
/**
* Mark cursor as being dead and notified
*/
var setCursorDeadAndNotified = function(self, callback) {
self.cursorState.dead = true;
self.cursorState.notified = true;
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
handleCallback(callback, null, null);
}
/**
* Mark cursor as being notified
*/
var setCursorNotified = function(self, callback) {
self.cursorState.notified = true;
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
handleCallback(callback, null, null);
}
/**
* Retrieve the next document from the cursor
* @method
* @param {resultCallback} callback A callback function
*/
Cursor.prototype.next = function(callback) {
var self = this;
// We have notified about it
if(self.cursorState.notified) return;
// Cursor is killed return null
if(isCursorKilled(self, callback)) return;
// Cursor is dead but not marked killed, return null
if(isCursorDeadButNotkilled(self, callback)) return;
// We have a dead and killed cursor, attempting to call next should error
if(isCursorDeadAndKilled(self, callback)) return;
// We have just started the cursor
if(!self.cursorState.init) {
// Topology is not connected, save the call in the provided store to be
// Executed at some point when the handler deems it's reconnected
if(!self.topology.isConnected(self.options) && self.disconnectHandler != null) {
return self.disconnectHandler.addObjectAndMethod('cursor', self, 'next', [callback], callback);
}
try {
// Get a server
self.server = self.topology.getServer(self.options);
// Get a connection
self.connection = self.server.getConnection();
// Get the callbacks
self.callbacks = self.server.getCallbacks();
} catch(err) {
return callback(err);
}
// Set as init
self.cursorState.init = true;
// Get the right wire protocol command
this.query = self.server.wireProtocolHandler.command(self.bson, self.ns, self.cmd, self.cursorState, self.topology, self.options);
}
// Process exhaust messages
var processExhaustMessages = function(err, result) {
if(err) {
self.cursorState.dead = true;
self.callbacks.unregister(self.query.requestId);
return callback(err);
}
// Concatenate all the documents
self.cursorState.documents = self.cursorState.documents.concat(result.documents);
// If we have no documents left
if(Long.ZERO.equals(result.cursorId)) {
self.cursorState.cursorId = Long.ZERO;
self.callbacks.unregister(self.query.requestId);
return self.next(callback);
}
// Set up next listener
self.callbacks.register(result.requestId, processExhaustMessages)
// Initial result
if(self.cursorState.cursorId == null) {
self.cursorState.cursorId = result.cursorId;
self.next(callback);
}
}
// If we have exhaust
if(self.cmd.exhaust && self.cursorState.cursorId == null) {
// Handle all the exhaust responses
self.callbacks.register(self.query.requestId, processExhaustMessages);
// Write the initial command out
return self.connection.write(self.query.toBin());
} else if(self.cmd.exhaust && self.cursorState.cursorIndex < self.cursorState.documents.length) {
return handleCallback(callback, null, self.cursorState.documents[self.cursorState.cursorIndex++]);
} else if(self.cmd.exhaust && Long.ZERO.equals(self.cursorState.cursorId)) {
self.callbacks.unregister(self.query.requestId);
return setCursorNotified(self, callback);
} else if(self.cmd.exhaust) {
return setTimeout(function() {
if(Long.ZERO.equals(self.cursorState.cursorId)) return;
self.next(callback);
}, 1);
}
// If we don't have a cursorId execute the first query
if(self.cursorState.cursorId == null) {
// Check if connection is dead and return if not possible to
// execute the query against the db
if(isConnectionDead(self, callback)) return;
// query, cmd, options, cursorState, callback
execInitialQuery(self, self.query, self.cmd, self.options, self.cursorState, self.connection, self.logger, self.callbacks, function(err, r) {
if(err) return handleCallback(callback, err, null);
if(self.cursorState.documents.length == 0 && !self.cmd.tailable && !self.cmd.awaitData) {
return setCursorNotified(self, callback);
}
self.next(callback);
});
} else if(self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
return setCursorDeadAndNotified(self, callback);
} else if(self.cursorState.cursorIndex == self.cursorState.documents.length
&& !Long.ZERO.equals(self.cursorState.cursorId)) {
// Ensure an empty cursor state
self.cursorState.documents = [];
self.cursorState.cursorIndex = 0;
// Check if connection is dead and return if not possible to
// execute a getmore on this connection
if(isConnectionDead(self, callback)) return;
// Execute the next get more
execGetMore(self, function(err, doc) {
if(err) return handleCallback(callback, err);
if(self.cursorState.documents.length == 0 && Long.ZERO.equals(self.cursorState.cursorId)) self.cursorState.dead = true;
// Tailable cursor getMore result, notify owner about it
// No attempt is made here to retry, this is left to the user of the
// core module to handle to keep core simple
if(self.cursorState.documents.length == 0 && self.cmd.tailable) {
return handleCallback(callback, MongoError.create({
message: "No more documents in tailed cursor"
, tailable: self.cmd.tailable
, awaitData: self.cmd.awaitData
}));
}
if(self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
return setCursorDeadAndNotified(self, callback);
}
self.next(callback);
});
} else if(self.cursorState.documents.length == self.cursorState.cursorIndex
&& self.cmd.tailable) {
return handleCallback(callback, MongoError.create({
message: "No more documents in tailed cursor"
, tailable: self.cmd.tailable
, awaitData: self.cmd.awaitData
}));
} else if(self.cursorState.documents.length == self.cursorState.cursorIndex
&& Long.ZERO.equals(self.cursorState.cursorId)) {
setCursorDeadAndNotified(self, callback);
} else {
if(self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
return setCursorDeadAndNotified(self, callback);
}
// Increment the current cursor limit
self.cursorState.currentLimit += 1;
// Get the document
var doc = self.cursorState.documents[self.cursorState.cursorIndex++];
// Transform the doc with passed in transformation method if provided
if(self.cursorState.transforms && typeof self.cursorState.transforms.doc == 'function') {
doc = self.cursorState.transforms.doc(doc);
}
// Return the document
handleCallback(callback, null, doc);
}
}
module.exports = Cursor;

View File

@@ -0,0 +1,44 @@
"use strict";
/**
* Creates a new MongoError
* @class
* @augments Error
* @param {string} message The error message
* @return {MongoError} A cursor instance
*/
function MongoError(message) {
this.name = 'MongoError';
this.message = message;
this.stack = (new Error()).stack;
}
/**
* Creates a new MongoError object
* @class
* @param {object} options The error options
* @return {MongoError} A cursor instance
*/
MongoError.create = function(options) {
var err = null;
if(options instanceof Error) {
err = new MongoError(options.message);
err.stack = options.stack;
} else if(typeof options == 'string') {
err = new MongoError(options);
} else {
err = new MongoError(options.message || options.errmsg || "n/a");
// Other options
for(var name in options) {
err[name] = options[name];
}
}
return err;
}
// Extend JavaScript error
MongoError.prototype = new Error;
module.exports = MongoError;

View File

@@ -0,0 +1,59 @@
var fs = require('fs');
/* Note: because this plugin uses process.on('uncaughtException'), only one
* of these can exist at any given time. This plugin and anything else that
* uses process.on('uncaughtException') will conflict. */
exports.attachToRunner = function(runner, outputFile) {
var smokeOutput = { results : [] };
var runningTests = {};
var integraPlugin = {
beforeTest: function(test, callback) {
test.startTime = Date.now();
runningTests[test.name] = test;
callback();
},
afterTest: function(test, callback) {
smokeOutput.results.push({
status: test.status,
start: test.startTime,
end: Date.now(),
test_file: test.name,
exit_code: 0,
url: ""
});
delete runningTests[test.name];
callback();
},
beforeExit: function(obj, callback) {
fs.writeFile(outputFile, JSON.stringify(smokeOutput), function() {
callback();
});
}
};
// In case of exception, make sure we write file
process.on('uncaughtException', function(err) {
// Mark all currently running tests as failed
for (var testName in runningTests) {
smokeOutput.results.push({
status: "fail",
start: runningTests[testName].startTime,
end: Date.now(),
test_file: testName,
exit_code: 0,
url: ""
});
}
// write file
fs.writeFileSync(outputFile, JSON.stringify(smokeOutput));
// Standard NodeJS uncaught exception handler
console.error(err.stack);
process.exit(1);
});
runner.plugin(integraPlugin);
return integraPlugin;
};

View File

@@ -0,0 +1,37 @@
"use strict";
var setProperty = require('../connection/utils').setProperty
, getProperty = require('../connection/utils').getProperty
, getSingleProperty = require('../connection/utils').getSingleProperty;
/**
* Creates a new CommandResult instance
* @class
* @param {object} result CommandResult object
* @param {Connection} connection A connection instance associated with this result
* @return {CommandResult} A cursor instance
*/
var CommandResult = function(result, connection) {
this.result = result;
this.connection = connection;
}
/**
* Convert CommandResult to JSON
* @method
* @return {object}
*/
CommandResult.prototype.toJSON = function() {
return this.result;
}
/**
* Convert CommandResult to String representation
* @method
* @return {string}
*/
CommandResult.prototype.toString = function() {
return JSON.stringify(this.toJSON());
}
module.exports = CommandResult;

View File

@@ -0,0 +1,931 @@
"use strict";
var inherits = require('util').inherits
, f = require('util').format
, b = require('bson')
, bindToCurrentDomain = require('../connection/utils').bindToCurrentDomain
, EventEmitter = require('events').EventEmitter
, BasicCursor = require('../cursor')
, BSON = require('bson').native().BSON
, BasicCursor = require('../cursor')
, Server = require('./server')
, Logger = require('../connection/logger')
, ReadPreference = require('./read_preference')
, Session = require('./session')
, MongoError = require('../error');
/**
* @fileOverview The **Mongos** class is a class that represents a Mongos Proxy topology and is
* used to construct connections.
*
* @example
* var Mongos = require('mongodb-core').Mongos
* , ReadPreference = require('mongodb-core').ReadPreference
* , assert = require('assert');
*
* var server = new Mongos([{host: 'localhost', port: 30000}]);
* // Wait for the connection event
* server.on('connect', function(server) {
* server.destroy();
* });
*
* // Start connecting
* server.connect();
*/
var DISCONNECTED = 'disconnected';
var CONNECTING = 'connecting';
var CONNECTED = 'connected';
var DESTROYED = 'destroyed';
// All bson types
var bsonTypes = [b.Long, b.ObjectID, b.Binary, b.Code, b.DBRef, b.Symbol, b.Double, b.Timestamp, b.MaxKey, b.MinKey];
// BSON parser
var bsonInstance = null;
// Instance id
var mongosId = 0;
//
// Clone the options
var cloneOptions = function(options) {
var opts = {};
for(var name in options) {
opts[name] = options[name];
}
return opts;
}
var State = function(readPreferenceStrategies) {
// Internal state
this.s = {
connectedServers: []
, disconnectedServers: []
, readPreferenceStrategies: readPreferenceStrategies
}
}
//
// A Mongos connected
State.prototype.connected = function(server) {
// Locate in disconnected servers and remove
this.s.disconnectedServers = this.s.disconnectedServers.filter(function(s) {
return !s.equals(server);
});
var found = false;
// Check if the server exists
this.s.connectedServers.forEach(function(s) {
if(s.equals(server)) found = true;
});
// Add to disconnected list if it does not already exist
if(!found) this.s.connectedServers.push(server);
}
//
// A Mongos disconnected
State.prototype.disconnected = function(server) {
// Locate in disconnected servers and remove
this.s.connectedServers = this.s.connectedServers.filter(function(s) {
return !s.equals(server);
});
var found = false;
// Check if the server exists
this.s.disconnectedServers.forEach(function(s) {
if(s.equals(server)) found = true;
});
// Add to disconnected list if it does not already exist
if(!found) this.s.disconnectedServers.push(server);
}
//
// Return the list of disconnected servers
State.prototype.disconnectedServers = function() {
return this.s.disconnectedServers.slice(0);
}
//
// Get connectedServers
State.prototype.connectedServers = function() {
return this.s.connectedServers.slice(0)
}
//
// Get all servers
State.prototype.getAll = function() {
return this.s.connectedServers.slice(0).concat(this.s.disconnectedServers);
}
//
// Get all connections
State.prototype.getAllConnections = function() {
var connections = [];
this.s.connectedServers.forEach(function(e) {
connections = connections.concat(e.connections());
});
return connections;
}
//
// Destroy the state
State.prototype.destroy = function() {
// Destroy any connected servers
while(this.s.connectedServers.length > 0) {
var server = this.s.connectedServers.shift();
// Remove any non used handlers
['error', 'close', 'timeout', 'connect'].forEach(function(e) {
server.removeAllListeners(e);
})
// Server destroy
server.destroy();
// Add to list of disconnected servers
this.s.disconnectedServers.push(server);
}
}
//
// Are we connected
State.prototype.isConnected = function() {
return this.s.connectedServers.length > 0;
}
//
// Pick a server
State.prototype.pickServer = function(readPreference) {
readPreference = readPreference || ReadPreference.primary;
// Do we have a custom readPreference strategy, use it
if(this.s.readPreferenceStrategies != null && this.s.readPreferenceStrategies[readPreference] != null) {
return this.s.readPreferenceStrategies[readPreference].pickServer(connectedServers, readPreference);
}
// No valid connections
if(this.s.connectedServers.length == 0) throw new MongoError("no mongos proxy available");
// Pick first one
return this.s.connectedServers[0];
}
/**
* Creates a new Mongos instance
* @class
* @param {array} seedlist A list of seeds for the replicaset
* @param {number} [options.reconnectTries=30] Reconnect retries for HA if no servers available
* @param {number} [options.haInterval=5000] The High availability period for replicaset inquiry
* @param {boolean} [options.emitError=false] Server will emit errors events
* @param {Cursor} [options.cursorFactory=Cursor] The cursor factory class used for all query cursors
* @param {number} [options.size=5] Server connection pool size
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
* @param {number} [options.keepAliveInitialDelay=0] Initial delay before TCP keep alive enabled
* @param {boolean} [options.noDelay=true] TCP Connection no delay
* @param {number} [options.connectionTimeout=1000] TCP Connection timeout setting
* @param {number} [options.socketTimeout=0] TCP Socket timeout setting
* @param {boolean} [options.singleBufferSerializtion=true] Serialize into single buffer, trade of peak memory for serialization speed
* @param {boolean} [options.ssl=false] Use SSL for connection
* @param {Buffer} [options.ca] SSL Certificate store binary buffer
* @param {Buffer} [options.cert] SSL Certificate binary buffer
* @param {Buffer} [options.key] SSL Key file binary buffer
* @param {string} [options.passphrase] SSL Certificate pass phrase
* @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates
* @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits
* @return {Mongos} A cursor instance
* @fires Mongos#connect
* @fires Mongos#joined
* @fires Mongos#left
*/
var Mongos = function(seedlist, options) {
var self = this;
options = options || {};
// Add event listener
EventEmitter.call(this);
// Validate seedlist
if(!Array.isArray(seedlist)) throw new MongoError("seedlist must be an array");
// Validate list
if(seedlist.length == 0) throw new MongoError("seedlist must contain at least one entry");
// Validate entries
seedlist.forEach(function(e) {
if(typeof e.host != 'string' || typeof e.port != 'number')
throw new MongoError("seedlist entry must contain a host and port");
});
// BSON Parser, ensure we have a single instance
bsonInstance = bsonInstance == null ? new BSON(bsonTypes) : bsonInstance;
// Pick the right bson parser
var bson = options.bson ? options.bson : bsonInstance;
// Add bson parser to options
options.bson = bson;
// The Mongos state
this.s = {
// Seed list for sharding passed in
seedlist: seedlist
// Passed in options
, options: options
// Logger
, logger: Logger('Mongos', options)
// Reconnect tries
, reconnectTries: options.reconnectTries || 30
// Ha interval
, haInterval: options.haInterval || 5000
// Have omitted fullsetup
, fullsetup: false
// Cursor factory
, Cursor: options.cursorFactory || BasicCursor
// Current credentials used for auth
, credentials: []
// BSON Parser
, bsonInstance: bsonInstance
, bson: bson
// Default state
, state: DISCONNECTED
// Swallow or emit errors
, emitError: typeof options.emitError == 'boolean' ? options.emitError : false
// Contains any alternate strategies for picking
, readPreferenceStrategies: {}
// Auth providers
, authProviders: {}
// Unique instance id
, id: mongosId++
// Current retries left
, retriesLeft: options.reconnectTries || 30
// Do we have a not connected handler
, disconnectHandler: options.disconnectHandler
}
// Set up the connection timeout for the options
options.connectionTimeout = options.connectionTimeout || 1000;
// Create a new state for the mongos
this.s.mongosState = new State(this.s.readPreferenceStrategies);
// BSON property (find a server and pass it along)
Object.defineProperty(this, 'bson', {
enumerable: true, get: function() {
var servers = self.s.mongosState.getAll();
return servers.length > 0 ? servers[0].bson : null;
}
});
Object.defineProperty(this, 'id', {
enumerable:true, get: function() { return self.s.id; }
});
Object.defineProperty(this, 'type', {
enumerable:true, get: function() { return 'mongos'; }
});
Object.defineProperty(this, 'haInterval', {
enumerable:true, get: function() { return self.s.haInterval; }
});
Object.defineProperty(this, 'state', {
enumerable:true, get: function() { return self.s.mongosState; }
});
}
inherits(Mongos, EventEmitter);
/**
* Execute a command
* @method
* @param {string} type Type of BSON parser to use (c++ or js)
*/
Mongos.prototype.setBSONParserType = function(type) {
var nBSON = null;
if(type == 'c++') {
nBSON = require('bson').native().BSON;
} else if(type == 'js') {
nBSON = require('bson').pure().BSON;
} else {
throw new MongoError(f("% parser not supported", type));
}
this.s.options.bson = new nBSON(bsonTypes);
}
/**
* Returns the last known ismaster document for this server
* @method
* @return {object}
*/
Mongos.prototype.lastIsMaster = function() {
var connectedServers = this.s.mongosState.connectedServers();
if(connectedServers.length > 0) return connectedServers[0].lastIsMaster();
return null;
}
/**
* Initiate server connect
* @method
*/
Mongos.prototype.connect = function(_options) {
var self = this;
// Start replicaset inquiry process
setTimeout(mongosInquirer(self, self.s), self.s.haInterval);
// Additional options
if(_options) for(var name in _options) self.s.options[name] = _options[name];
// For all entries in the seedlist build a server instance
self.s.seedlist.forEach(function(e) {
// Clone options
var opts = cloneOptions(self.s.options);
// Add host and port
opts.host = e.host;
opts.port = e.port;
opts.reconnect = false;
opts.readPreferenceStrategies = self.s.readPreferenceStrategies;
// Share the auth store
opts.authProviders = self.s.authProviders;
// Don't emit errors
opts.emitError = true;
// Create a new Server
self.s.mongosState.disconnected(new Server(opts));
});
// Get the disconnected servers
var servers = self.s.mongosState.disconnectedServers();
// Attempt to connect to all the servers
while(servers.length > 0) {
// Get the server
var server = servers.shift();
// Remove any non used handlers
['error', 'close', 'timeout', 'connect', 'message', 'parseError'].forEach(function(e) {
server.removeAllListeners(e);
});
// Set up the event handlers
server.once('error', errorHandlerTemp(self, self.s, server));
server.once('close', errorHandlerTemp(self, self.s, server));
server.once('timeout', errorHandlerTemp(self, self.s, server));
server.once('parseError', errorHandlerTemp(self, self.s, server));
server.once('connect', connectHandler(self, self.s, 'connect'));
if(self.s.logger.isInfo()) self.s.logger.info(f('connecting to server %s', server.name));
// Attempt to connect
server.connect();
}
}
/**
* Destroy the server connection
* @method
*/
Mongos.prototype.destroy = function(emitClose) {
this.s.state = DESTROYED;
// Emit close
if(emitClose && self.listeners('close').length > 0) self.emit('close', self);
// Destroy the state
this.s.mongosState.destroy();
}
/**
* Figure out if the server is connected
* @method
* @return {boolean}
*/
Mongos.prototype.isConnected = function() {
return this.s.mongosState.isConnected();
}
//
// Operations
//
/**
* Insert one or more documents
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {array} ops An array of documents to insert
* @param {boolean} [options.ordered=true] Execute in order or out of order
* @param {object} [options.writeConcern={}] Write concern for the operation
* @param {opResultCallback} callback A callback function
*/
Mongos.prototype.insert = function(ns, ops, options, callback) {
// Topology is not connected, save the call in the provided store to be
// Executed at some point when the handler deems it's reconnected
if(!this.isConnected() && this.s.disconnectHandler != null) {
callback = bindToCurrentDomain(callback);
return this.s.disconnectHandler.add('insert', ns, ops, options, callback);
}
executeWriteOperation(this.s, 'insert', ns, ops, options, callback);
}
/**
* Perform one or more update operations
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {array} ops An array of updates
* @param {boolean} [options.ordered=true] Execute in order or out of order
* @param {object} [options.writeConcern={}] Write concern for the operation
* @param {opResultCallback} callback A callback function
*/
Mongos.prototype.update = function(ns, ops, options, callback) {
// Topology is not connected, save the call in the provided store to be
// Executed at some point when the handler deems it's reconnected
if(!this.isConnected() && this.s.disconnectHandler != null) {
callback = bindToCurrentDomain(callback);
return this.s.disconnectHandler.add('update', ns, ops, options, callback);
}
executeWriteOperation(this.s, 'update', ns, ops, options, callback);
}
/**
* Perform one or more remove operations
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {array} ops An array of removes
* @param {boolean} [options.ordered=true] Execute in order or out of order
* @param {object} [options.writeConcern={}] Write concern for the operation
* @param {opResultCallback} callback A callback function
*/
Mongos.prototype.remove = function(ns, ops, options, callback) {
// Topology is not connected, save the call in the provided store to be
// Executed at some point when the handler deems it's reconnected
if(!this.isConnected() && this.s.disconnectHandler != null) {
callback = bindToCurrentDomain(callback);
return this.s.disconnectHandler.add('remove', ns, ops, options, callback);
}
executeWriteOperation(this.s, 'remove', ns, ops, options, callback);
}
/**
* Execute a command
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {object} cmd The command hash
* @param {ReadPreference} [options.readPreference] Specify read preference if command supports it
* @param {Connection} [options.connection] Specify connection object to execute command against
* @param {opResultCallback} callback A callback function
*/
Mongos.prototype.command = function(ns, cmd, options, callback) {
var self = this;
if(typeof options == 'function') {
callback = options;
options = {};
}
// Topology is not connected, save the call in the provided store to be
// Executed at some point when the handler deems it's reconnected
if(!self.isConnected() && self.s.disconnectHandler != null) {
callback = bindToCurrentDomain(callback);
return self.s.disconnectHandler.add('command', ns, cmd, options, callback);
}
var server = null;
// Ensure we have no options
options = options || {};
// We need to execute the command on all servers
if(options.onAll) {
var servers = self.s.mongosState.getAll();
var count = servers.length;
var cmdErr = null;
for(var i = 0; i < servers.length; i++) {
servers[i].command(ns, cmd, options, function(err, r) {
count = count - 1;
// Finished executing command
if(count == 0) {
// Was it a logout command clear any credentials
if(cmd.logout) clearCredentials(state, ns);
// Return the error
callback(err, r);
}
});
}
return;
}
try {
// Get a primary
server = self.s.mongosState.pickServer(options.writeConcern ? ReadPreference.primary : options.readPreference);
} catch(err) {
return callback(err);
}
// No server returned we had an error
if(server == null) return callback(new MongoError("no mongos found"));
server.command(ns, cmd, options, function(err, r) {
// Was it a logout command clear any credentials
if(cmd.logout) clearCredentials(self.s, ns);
callback(err, r);
});
}
/**
* Perform one or more remove operations
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {{object}|{Long}} cmd Can be either a command returning a cursor or a cursorId
* @param {object} [options.batchSize=0] Batchsize for the operation
* @param {array} [options.documents=[]] Initial documents list for cursor
* @param {ReadPreference} [options.readPreference] Specify read preference if command supports it
* @param {opResultCallback} callback A callback function
*/
Mongos.prototype.cursor = function(ns, cmd, cursorOptions) {
cursorOptions = cursorOptions || {};
var FinalCursor = cursorOptions.cursorFactory || this.s.Cursor;
return new FinalCursor(this.s.bson, ns, cmd, cursorOptions, this, this.s.options);
}
/**
* Authenticate using a specified mechanism
* @method
* @param {string} mechanism The Auth mechanism we are invoking
* @param {string} db The db we are invoking the mechanism against
* @param {...object} param Parameters for the specific mechanism
* @param {authResultCallback} callback A callback function
*/
Mongos.prototype.auth = function(mechanism, db) {
var self = this;
var args = Array.prototype.slice.call(arguments, 2);
var callback = args.pop();
// If we don't have the mechanism fail
if(self.s.authProviders[mechanism] == null && mechanism != 'default')
throw new MongoError(f("auth provider %s does not exist", mechanism));
// Authenticate against all the servers
var servers = self.s.mongosState.connectedServers();
var count = servers.length;
// Correct authentication
var authenticated = true;
var authErr = null;
// Authenticate against all servers
while(servers.length > 0) {
var server = servers.shift();
// Create arguments
var finalArguments = [mechanism, db].concat(args.slice(0)).concat([function(err, r) {
count = count - 1;
if(err) authErr = err;
if(!r) authenticated = false;
// We are done
if(count == 0) {
if(authErr) return callback(authErr, false);
callback(null, new Session({}, self));
}
}]);
// Execute the auth
server.auth.apply(server, finalArguments);
}
}
//
// Plugin methods
//
/**
* Add custom read preference strategy
* @method
* @param {string} name Name of the read preference strategy
* @param {object} strategy Strategy object instance
*/
Mongos.prototype.addReadPreferenceStrategy = function(name, strategy) {
if(this.s.readPreferenceStrategies == null) this.s.readPreferenceStrategies = {};
this.s.readPreferenceStrategies[name] = strategy;
}
/**
* Add custom authentication mechanism
* @method
* @param {string} name Name of the authentication mechanism
* @param {object} provider Authentication object instance
*/
Mongos.prototype.addAuthProvider = function(name, provider) {
this.s.authProviders[name] = provider;
}
/**
* Get connection
* @method
* @param {ReadPreference} [options.readPreference] Specify read preference if command supports it
* @return {Connection}
*/
Mongos.prototype.getConnection = function(options) {
// Ensure we have no options
options = options || {};
// Pick the right server based on readPreference
var server = this.s.mongosState.pickServer(options.readPreference);
if(server == null) return null;
// Return connection
return server.getConnection();
}
/**
* Get server
* @method
* @param {ReadPreference} [options.readPreference] Specify read preference if command supports it
* @return {Server}
*/
Mongos.prototype.getServer = function(options) {
// Ensure we have no options
options = options || {};
// Pick the right server based on readPreference
return this.s.mongosState.pickServer(options.readPreference);
}
/**
* All raw connections
* @method
* @return {Connection[]}
*/
Mongos.prototype.connections = function() {
return this.s.mongosState.getAllConnections();
}
//
// Inquires about state changes
//
var mongosInquirer = function(self, state) {
return function() {
if(state.state == DESTROYED) return
if(state.state == CONNECTED) state.retriesLeft = state.reconnectTries;
// If we have a disconnected site
if(state.state == DISCONNECTED && state.retriesLeft == 0) {
self.destroy();
return self.emit('error', new MongoError(f('failed to reconnect after %s', state.reconnectTries)));
} else if(state == DISCONNECTED) {
state.retriesLeft = state.retriesLeft - 1;
}
// If we have a primary and a disconnect handler, execute
// buffered operations
if(state.mongosState.isConnected() && state.disconnectHandler) {
state.disconnectHandler.execute();
}
// Log the information
if(state.logger.isDebug()) state.logger.debug(f('mongos ha proceess running'));
// Let's query any disconnected proxies
var disconnectedServers = state.mongosState.disconnectedServers();
if(disconnectedServers.length == 0) return setTimeout(mongosInquirer(self, state), state.haInterval);
// Count of connections waiting to be connected
var connectionCount = disconnectedServers.length;
if(state.logger.isDebug()) state.logger.debug(f('mongos ha proceess found %d disconnected proxies', connectionCount));
// Let's attempt to reconnect
while(disconnectedServers.length > 0) {
var server = disconnectedServers.shift();
if(state.logger.isDebug()) state.logger.debug(f('attempting to connect to server %s', server.name));
// Remove any listeners
['error', 'close', 'timeout', 'connect', 'message', 'parseError'].forEach(function(e) {
server.removeAllListeners(e);
});
// Set up the event handlers
server.once('error', errorHandlerTemp(self, state, server));
server.once('close', errorHandlerTemp(self, state, server));
server.once('timeout', errorHandlerTemp(self, state, server));
server.once('connect', connectHandler(self, state, 'ha'));
// Start connect
server.connect();
}
// Let's keep monitoring but wait for possible timeout to happen
return setTimeout(mongosInquirer(self, state), state.options.connectionTimeout + state.haInterval);
}
}
//
// Error handler for initial connect
var errorHandlerTemp = function(self, state, server) {
return function(err, server) {
// Log the information
if(state.logger.isInfo()) state.logger.info(f('server %s disconnected with error %s', server.name, JSON.stringify(err)));
// Remove any non used handlers
['error', 'close', 'timeout', 'connect'].forEach(function(e) {
server.removeAllListeners(e);
})
// Signal disconnect of server
state.mongosState.disconnected(server);
}
}
//
// Handlers
var errorHandler = function(self, state) {
return function(err, server) {
if(state.logger.isInfo()) state.logger.info(f('server %s errored out with %s', server.name, JSON.stringify(err)));
state.mongosState.disconnected(server);
// No more servers left emit close
if(state.mongosState.connectedServers().length == 0) {
state.state = DISCONNECTED;
}
// Signal server left
self.emit('left', 'mongos', server);
if(state.emitError) self.emit('error', err, server);
}
}
var timeoutHandler = function(self, state) {
return function(err, server) {
if(state.logger.isInfo()) state.logger.info(f('server %s timed out', server.name));
state.mongosState.disconnected(server);
// No more servers emit close event if no entries left
if(state.mongosState.connectedServers().length == 0) {
state.state = DISCONNECTED;
}
// Signal server left
self.emit('left', 'mongos', server);
}
}
var closeHandler = function(self, state) {
return function(err, server) {
if(state.logger.isInfo()) state.logger.info(f('server %s closed', server.name));
state.mongosState.disconnected(server);
// No more servers left emit close
if(state.mongosState.connectedServers().length == 0) {
state.state = DISCONNECTED;
}
// Signal server left
self.emit('left', 'mongos', server);
}
}
// Connect handler
var connectHandler = function(self, state, e) {
return function(server) {
if(state.logger.isInfo()) state.logger.info(f('connected to %s', server.name));
// Remove any non used handlers
['error', 'close', 'timeout', 'connect', 'message', 'parseError'].forEach(function(e) {
server.removeAllListeners(e);
});
// finish processing the server
var processNewServer = function(_server) {
// Add the server handling code
if(_server.isConnected()) {
_server.once('error', errorHandler(self, state));
_server.once('close', closeHandler(self, state));
_server.once('timeout', timeoutHandler(self, state));
_server.once('parseError', timeoutHandler(self, state));
}
// Emit joined event
self.emit('joined', 'mongos', _server);
// Add to list connected servers
state.mongosState.connected(_server);
// Do we have a reconnect event
if('ha' == e && state.mongosState.connectedServers().length == 1) {
self.emit('reconnect', _server);
}
if(state.mongosState.disconnectedServers().length == 0 &&
state.mongosState.connectedServers().length > 0 &&
!state.fullsetup) {
state.fullsetup = true;
self.emit('fullsetup');
}
// Set connected
if(state.state == DISCONNECTED) {
state.state = CONNECTED;
self.emit('connect', self);
}
}
// No credentials just process server
if(state.credentials.length == 0) return processNewServer(server);
// Do we have credentials, let's apply them all
var count = state.credentials.length;
// Apply the credentials
for(var i = 0; i < state.credentials.length; i++) {
server.auth.apply(server, state.credentials[i].concat([function(err, r) {
count = count - 1;
if(count == 0) processNewServer(server);
}]));
}
}
}
//
// Add server to the list if it does not exist
var addToListIfNotExist = function(list, server) {
var found = false;
// Remove any non used handlers
['error', 'close', 'timeout', 'connect'].forEach(function(e) {
server.removeAllListeners(e);
})
// Check if the server already exists
for(var i = 0; i < list.length; i++) {
if(list[i].equals(server)) found = true;
}
if(!found) {
list.push(server);
}
}
// Add the new credential for a db, removing the old
// credential from the cache
var addCredentials = function(state, db, argsWithoutCallback) {
// Remove any credentials for the db
clearCredentials(state, db + ".dummy");
// Add new credentials to list
state.credentials.push(argsWithoutCallback);
}
// Clear out credentials for a namespace
var clearCredentials = function(state, ns) {
var db = ns.split('.')[0];
var filteredCredentials = [];
// Filter out all credentials for the db the user is logging out off
for(var i = 0; i < state.credentials.length; i++) {
if(state.credentials[i][1] != db) filteredCredentials.push(state.credentials[i]);
}
// Set new list of credentials
state.credentials = filteredCredentials;
}
var processReadPreference = function(cmd, options) {
options = options || {}
// No read preference specified
if(options.readPreference == null) return cmd;
}
//
// Execute write operation
var executeWriteOperation = function(state, op, ns, ops, options, callback) {
if(typeof options == 'function') {
callback = options;
options = {};
}
var server = null;
// Ensure we have no options
options = options || {};
try {
// Get a primary
server = state.mongosState.pickServer();
} catch(err) {
return callback(err);
}
// No server returned we had an error
if(server == null) return callback(new MongoError("no mongos found"));
// Execute the command
server[op](ns, ops, options, callback);
}
/**
* A mongos connect event, used to verify that the connection is up and running
*
* @event Mongos#connect
* @type {Mongos}
*/
/**
* A server member left the mongos list
*
* @event Mongos#left
* @type {Mongos}
* @param {string} type The type of member that left (mongos)
* @param {Server} server The server object that left
*/
/**
* A server member joined the mongos list
*
* @event Mongos#joined
* @type {Mongos}
* @param {string} type The type of member that left (mongos)
* @param {Server} server The server object that joined
*/
module.exports = Mongos;

View File

@@ -0,0 +1,106 @@
"use strict";
var needSlaveOk = ['primaryPreferred', 'secondary', 'secondaryPreferred', 'nearest'];
/**
* @fileOverview The **ReadPreference** class is a class that represents a MongoDB ReadPreference and is
* used to construct connections.
*
* @example
* var ReplSet = require('mongodb-core').ReplSet
* , ReadPreference = require('mongodb-core').ReadPreference
* , assert = require('assert');
*
* var server = new ReplSet([{host: 'localhost', port: 30000}], {setName: 'rs'});
* // Wait for the connection event
* server.on('connect', function(server) {
* var cursor = server.cursor('db.test'
* , {find: 'db.test', query: {}}
* , {readPreference: new ReadPreference('secondary')});
* cursor.next(function(err, doc) {
* server.destroy();
* });
* });
*
* // Start connecting
* server.connect();
*/
/**
* Creates a new Pool instance
* @class
* @param {string} preference A string describing the preference (primary|primaryPreferred|secondary|secondaryPreferred|nearest)
* @param {object} tags The tags object
* @param {object} [options] Additional read preference options
* @property {string} preference The preference string (primary|primaryPreferred|secondary|secondaryPreferred|nearest)
* @property {object} tags The tags object
* @property {object} options Additional read preference options
* @return {ReadPreference}
*/
var ReadPreference = function(preference, tags, options) {
this.preference = preference;
this.tags = tags;
this.options = options;
}
/**
* This needs slaveOk bit set
* @method
* @return {boolean}
*/
ReadPreference.prototype.slaveOk = function() {
return needSlaveOk.indexOf(this.preference) != -1;
}
/**
* Are the two read preference equal
* @method
* @return {boolean}
*/
ReadPreference.prototype.equals = function(readPreference) {
return readPreference.preference == this.preference;
}
/**
* Return JSON representation
* @method
* @return {Object}
*/
ReadPreference.prototype.toJSON = function() {
var readPreference = {mode: this.preference};
if(Array.isArray(this.tags)) readPreference.tags = this.tags;
return readPreference;
}
/**
* Primary read preference
* @method
* @return {ReadPreference}
*/
ReadPreference.primary = new ReadPreference('primary');
/**
* Primary Preferred read preference
* @method
* @return {ReadPreference}
*/
ReadPreference.primaryPreferred = new ReadPreference('primaryPreferred');
/**
* Secondary read preference
* @method
* @return {ReadPreference}
*/
ReadPreference.secondary = new ReadPreference('secondary');
/**
* Secondary Preferred read preference
* @method
* @return {ReadPreference}
*/
ReadPreference.secondaryPreferred = new ReadPreference('secondaryPreferred');
/**
* Nearest read preference
* @method
* @return {ReadPreference}
*/
ReadPreference.nearest = new ReadPreference('nearest');
module.exports = ReadPreference;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,421 @@
"use strict";
var Logger = require('../connection/logger')
, f = require('util').format
, MongoError = require('../error');
var DISCONNECTED = 'disconnected';
var CONNECTING = 'connecting';
var CONNECTED = 'connected';
var DESTROYED = 'destroyed';
/**
* Creates a new Replicaset State object
* @class
* @property {object} primary Primary property
* @property {array} secondaries List of secondaries
* @property {array} arbiters List of arbiters
* @return {State} A cursor instance
*/
var State = function(replSet, options) {
this.replSet = replSet;
this.options = options;
this.secondaries = [];
this.arbiters = [];
this.passives = [];
this.primary = null;
// Initial state is disconnected
this.state = DISCONNECTED;
// Get a logger instance
this.logger = Logger('ReplSet', options);
// Unpacked options
this.id = options.id;
this.setName = options.setName;
this.connectingServers = options.connectingServers;
this.secondaryOnlyConnectionAllowed = options.secondaryOnlyConnectionAllowed;
}
/**
* Is there a secondary connected
* @method
* @return {boolean}
*/
State.prototype.isSecondaryConnected = function() {
for(var i = 0; i < this.secondaries.length; i++) {
if(this.secondaries[i].isConnected()) return true;
}
return false;
}
/**
* Is there a primary connection
* @method
* @return {boolean}
*/
State.prototype.isPrimaryConnected = function() {
return this.primary != null && this.primary.isConnected();
}
/**
* Is the given address the primary
* @method
* @param {string} address Server address
* @return {boolean}
*/
State.prototype.isPrimary = function(address) {
if(this.primary == null) return false;
return this.primary && this.primary.equals(address);
}
/**
* Is the given address a secondary
* @method
* @param {string} address Server address
* @return {boolean}
*/
State.prototype.isSecondary = function(address) {
// Check if the server is a secondary at the moment
for(var i = 0; i < this.secondaries.length; i++) {
if(this.secondaries[i].equals(address)) {
return true;
}
}
return false;
}
/**
* Is the given address a secondary
* @method
* @param {string} address Server address
* @return {boolean}
*/
State.prototype.isPassive = function(address) {
// Check if the server is a secondary at the moment
for(var i = 0; i < this.passives.length; i++) {
if(this.passives[i].equals(address)) {
return true;
}
}
return false;
}
/**
* Does the replicaset contain this server
* @method
* @param {string} address Server address
* @return {boolean}
*/
State.prototype.contains = function(address) {
if(this.primary && this.primary.equals(address)) return true;
for(var i = 0; i < this.secondaries.length; i++) {
if(this.secondaries[i].equals(address)) return true;
}
for(var i = 0; i < this.arbiters.length; i++) {
if(this.arbiters[i].equals(address)) return true;
}
for(var i = 0; i < this.passives.length; i++) {
if(this.passives[i].equals(address)) return true;
}
return false;
}
/**
* Clean out all dead connections
* @method
*/
State.prototype.clean = function() {
if(this.primary != null && !this.primary.isConnected()) {
this.primary = null;
}
// Filter out disconnected servers
this.secondaries = this.secondaries.filter(function(s) {
return s.isConnected();
});
// Filter out disconnected servers
this.arbiters = this.arbiters.filter(function(s) {
return s.isConnected();
});
}
/**
* Destroy state
* @method
*/
State.prototype.destroy = function() {
this.state = DESTROYED;
if(this.primary) this.primary.destroy();
this.secondaries.forEach(function(s) {
s.destroy();
});
}
/**
* Remove server from state
* @method
* @param {Server} Server to remove
* @return {string} Returns type of server removed (primary|secondary)
*/
State.prototype.remove = function(server) {
if(this.primary && this.primary.equals(server)) {
this.primary = null;
return 'primary';
}
var length = this.arbiters.length;
// Filter out the server from the arbiters
this.arbiters = this.arbiters.filter(function(s) {
return !s.equals(server);
});
if(this.arbiters.length < length) return 'arbiter';
var length = this.passives.length;
// Filter out the server from the passives
this.passives = this.passives.filter(function(s) {
return !s.equals(server);
});
// We have removed a passive
if(this.passives.length < length) {
// Ensure we removed it from the list of secondaries as well if it exists
this.secondaries = this.secondaries.filter(function(s) {
return !s.equals(server);
});
return 'passive';
}
// Filter out the server from the secondaries
this.secondaries = this.secondaries.filter(function(s) {
return !s.equals(server);
});
return 'secondary';
}
/**
* Get the server by name
* @method
* @param {string} address Server address
* @return {Server}
*/
State.prototype.get = function(server) {
var found = false;
// All servers to search
var servers = this.primary ? [this.primary] : [];
servers = servers.concat(this.secondaries);
// Locate the server
for(var i = 0; i < servers.length; i++) {
if(servers[i].equals(server)) {
return servers[i];
}
}
}
/**
* Get all the servers in the set
* @method
* @return {array}
*/
State.prototype.getAll = function() {
var servers = [];
if(this.primary) servers.push(this.primary);
return servers.concat(this.secondaries);
}
/**
* All raw connections
* @method
* @return {array}
*/
State.prototype.getAllConnections = function() {
var connections = [];
if(this.primary) connections = connections.concat(this.primary.connections());
this.secondaries.forEach(function(s) {
connections = connections.concat(s.connections());
})
return connections;
}
/**
* Return JSON object
* @method
* @return {object}
*/
State.prototype.toJSON = function() {
return {
primary: this.primary ? this.primary.lastIsMaster().me : null
, secondaries: this.secondaries.map(function(s) {
return s.lastIsMaster().me
})
}
}
/**
* Returns the last known ismaster document for this server
* @method
* @return {object}
*/
State.prototype.lastIsMaster = function() {
if(this.primary) return this.primary.lastIsMaster();
if(this.secondaries.length > 0) return this.secondaries[0].lastIsMaster();
return {};
}
/**
* Promote server to primary
* @method
* @param {Server} server Server we wish to promote
*/
State.prototype.promotePrimary = function(server) {
var currentServer = this.get(server);
// Server does not exist in the state, add it as new primary
if(currentServer == null) {
this.primary = server;
return;
}
// We found a server, make it primary and remove it from the secondaries
// Remove the server first
this.remove(currentServer);
// Set as primary
this.primary = currentServer;
}
var add = function(list, server) {
// Check if the server is a secondary at the moment
for(var i = 0; i < list.length; i++) {
if(list[i].equals(server)) return false;
}
list.push(server);
return true;
}
/**
* Add server to list of secondaries
* @method
* @param {Server} server Server we wish to add
*/
State.prototype.addSecondary = function(server) {
return add(this.secondaries, server);
}
/**
* Add server to list of arbiters
* @method
* @param {Server} server Server we wish to add
*/
State.prototype.addArbiter = function(server) {
return add(this.arbiters, server);
}
/**
* Add server to list of passives
* @method
* @param {Server} server Server we wish to add
*/
State.prototype.addPassive = function(server) {
return add(this.passives, server);
}
/**
* Update the state given a specific ismaster result
* @method
* @param {object} ismaster IsMaster result
* @param {Server} server IsMaster Server source
*/
State.prototype.update = function(ismaster, server) {
var self = this;
// Not in a known connection valid state
if(!ismaster.ismaster && !ismaster.secondary && !ismaster.arbiterOnly) {
// Remove the state
var result = self.remove(server);
if(self.state == CONNECTED) {
if(self.logger.isInfo()) self.logger.info(f('[%s] removing %s from set', self.id, ismaster.me));
self.replSet.emit('left', self.remove(server), server);
}
return false;
}
// Set the setName if it's not set from the first server
if(self.setName == null && ismaster.setName) {
if(self.logger.isInfo()) self.logger.info(f('[%s] setting setName to %s', self.id, ismaster.setName));
self.setName = ismaster.setName;
}
// Check if the replicaset name matches the provided one
if(ismaster.setName && self.setName != ismaster.setName) {
if(self.logger.isError()) self.logger.error(f('[%s] server in replset %s is not part of the specified setName %s', self.id, ismaster.setName, self.setName));
self.remove(server);
self.replSet.emit('error', new MongoError("provided setName for Replicaset Connection does not match setName found in server seedlist"));
return false;
}
// Log information
if(self.logger.isInfo()) self.logger.info(f('[%s] updating replicaset state %s', self.id, JSON.stringify(this)));
// It's a master set it
if(ismaster.ismaster && self.setName == ismaster.setName && !self.isPrimary(ismaster.me)) {
self.promotePrimary(server);
if(self.logger.isInfo()) self.logger.info(f('[%s] promoting %s to primary', self.id, ismaster.me));
// Emit primary
self.replSet.emit('joined', 'primary', this.primary);
// We are connected
if(self.state == CONNECTING) {
self.state = CONNECTED;
self.replSet.emit('connect', self.replSet);
} else {
self.state = CONNECTED;
self.replSet.emit('reconnect', server);
}
} else if(!ismaster.ismaster && self.setName == ismaster.setName
&& ismaster.arbiterOnly) {
if(self.addArbiter(server)) {
if(self.logger.isInfo()) self.logger.info(f('[%s] promoting %s to arbiter', self.id, ismaster.me));
self.replSet.emit('joined', 'arbiter', server);
return true;
};
return false;
} else if(!ismaster.ismaster && self.setName == ismaster.setName
&& ismaster.secondary && ismaster.passive) {
if(self.addPassive(server) && self.addSecondary(server)) {
if(self.logger.isInfo()) self.logger.info(f('[%s] promoting %s to passive', self.id, ismaster.me));
self.replSet.emit('joined', 'passive', server);
return true;
};
return false;
} else if(!ismaster.ismaster && self.setName == ismaster.setName
&& ismaster.secondary) {
if(self.addSecondary(server)) {
if(self.logger.isInfo()) self.logger.info(f('[%s] promoting %s to passive', self.id, ismaster.me));
self.replSet.emit('joined', 'secondary', server);
if(self.secondaryOnlyConnectionAllowed && self.state == CONNECTING) {
self.state = CONNECTED;
self.replSet.emit('connect', self.replSet);
}
return true;
};
return false;
}
// Return update applied
return true;
}
module.exports = State;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,93 @@
"use strict";
var inherits = require('util').inherits
, f = require('util').format
, EventEmitter = require('events').EventEmitter;
/**
* Creates a new Authentication Session
* @class
* @param {object} [options] Options for the session
* @param {{Server}|{ReplSet}|{Mongos}} topology The topology instance underpinning the session
*/
var Session = function(options, topology) {
this.options = options;
this.topology = topology;
// Add event listener
EventEmitter.call(this);
}
inherits(Session, EventEmitter);
/**
* Execute a command
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {object} cmd The command hash
* @param {object} [options.readPreference] Specify read preference if command supports it
* @param {object} [options.connection] Specify connection object to execute command against
* @param {opResultCallback} callback A callback function
*/
Session.prototype.command = function(ns, cmd, options, callback) {
this.topology.command(ns, cmd, options, callback);
}
/**
* Insert one or more documents
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {array} ops An array of documents to insert
* @param {boolean} [options.ordered=true] Execute in order or out of order
* @param {object} [options.writeConcern={}] Write concern for the operation
* @param {opResultCallback} callback A callback function
*/
Session.prototype.insert = function(ns, ops, options, callback) {
this.topology.insert(ns, ops, options, callback);
}
/**
* Perform one or more update operations
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {array} ops An array of updates
* @param {boolean} [options.ordered=true] Execute in order or out of order
* @param {object} [options.writeConcern={}] Write concern for the operation
* @param {opResultCallback} callback A callback function
*/
Session.prototype.update = function(ns, ops, options, callback) {
this.topology.update(ns, ops, options, callback);
}
/**
* Perform one or more remove operations
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {array} ops An array of removes
* @param {boolean} [options.ordered=true] Execute in order or out of order
* @param {object} [options.writeConcern={}] Write concern for the operation
* @param {opResultCallback} callback A callback function
*/
Session.prototype.remove = function(ns, ops, options, callback) {
this.topology.remove(ns, ops, options, callback);
}
/**
* Perform one or more remove operations
* @method
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {{object}|{Long}} cmd Can be either a command returning a cursor or a cursorId
* @param {object} [options.batchSize=0] Batchsize for the operation
* @param {array} [options.documents=[]] Initial documents list for cursor
* @param {boolean} [options.tailable=false] Tailable flag set
* @param {boolean} [options.oplogReply=false] oplogReply flag set
* @param {boolean} [options.awaitdata=false] awaitdata flag set
* @param {boolean} [options.exhaust=false] exhaust flag set
* @param {boolean} [options.partial=false] partial flag set
* @param {opResultCallback} callback A callback function
*/
Session.prototype.cursor = function(ns, cmd, options) {
return this.topology.cursor(ns, cmd, options);
}
module.exports = Session;

View File

@@ -0,0 +1,276 @@
"use strict";
var Logger = require('../../connection/logger')
, EventEmitter = require('events').EventEmitter
, inherits = require('util').inherits
, f = require('util').format;
/**
* Creates a new Ping read preference strategy instance
* @class
* @param {number} [options.pingInterval=5000] Ping interval to check the response time to the different servers
* @param {number} [options.acceptableLatency=250] Acceptable latency for selecting a server for reading (in milliseconds)
* @return {Ping} A cursor instance
*/
var Ping = function(options) {
// Add event listener
EventEmitter.call(this);
// Contains the ping state
this.s = {
// Contains all the ping data
pings: {}
// Set no options if none provided
, options: options || {}
// Logger
, logger: Logger('Ping', options)
// Ping interval
, pingInterval: options.pingInterval || 10000
, acceptableLatency: options.acceptableLatency || 15
// Debug options
, debug: typeof options.debug == 'boolean' ? options.debug : false
// Index
, index: 0
// Current ping time
, lastPing: null
}
// Log the options set
if(this.s.logger.isDebug()) this.s.logger.debug(f('ping strategy interval [%s], acceptableLatency [%s]', this.s.pingInterval, this.s.acceptableLatency));
// If we have enabled debug
if(this.s.debug) {
// Add access to the read Preference Strategies
Object.defineProperty(this, 'data', {
enumerable: true, get: function() { return this.s.pings; }
});
}
}
inherits(Ping, EventEmitter);
/**
* @ignore
*/
var filterByTags = function(readPreference, servers) {
if(readPreference.tags == null) return servers;
var filteredServers = [];
var tags = readPreference.tags;
// Iterate over all the servers
for(var i = 0; i < servers.length; i++) {
var serverTag = servers[i].lastIsMaster().tags || {};
// Did we find the a matching server
var found = true;
// Check if the server is valid
for(var name in tags) {
if(serverTag[name] != tags[name]) found = false;
}
// Add to candidate list
if(found) filteredServers.push(servers[i]);
}
// Returned filtered servers
return filteredServers;
}
/**
* Pick a server
* @method
* @param {State} set The current replicaset state object
* @param {ReadPreference} readPreference The current readPreference object
* @param {readPreferenceResultCallback} callback The callback to return the result from the function
* @return {object}
*/
Ping.prototype.pickServer = function(set, readPreference) {
var self = this;
// Only get primary and secondaries as seeds
var seeds = {};
var servers = [];
if(set.primary) {
servers.push(set.primary);
}
for(var i = 0; i < set.secondaries.length; i++) {
servers.push(set.secondaries[i]);
}
// Filter by tags
servers = filterByTags(readPreference, servers);
// Transform the list
var serverList = [];
// for(var name in seeds) {
for(var i = 0; i < servers.length; i++) {
serverList.push({name: servers[i].name, time: self.s.pings[servers[i].name] || 0});
}
// Sort by time
serverList.sort(function(a, b) {
return a.time > b.time;
});
// Locate lowest time (picked servers are lowest time + acceptable Latency margin)
var lowest = serverList.length > 0 ? serverList[0].time : 0;
// Filter by latency
serverList = serverList.filter(function(s) {
return s.time <= lowest + self.s.acceptableLatency;
});
// No servers, default to primary
if(serverList.length == 0 && set.primary) {
if(self.s.logger.isInfo()) self.s.logger.info(f('picked primary server [%s]', set.primary.name));
return set.primary;
} else if(serverList.length == 0) {
return null
}
// We picked first server
if(self.s.logger.isInfo()) self.s.logger.info(f('picked server [%s] with ping latency [%s]', serverList[0].name, serverList[0].time));
// Add to the index
self.s.index = self.s.index + 1;
// Select the index
self.s.index = self.s.index % serverList.length;
// Return the first server of the sorted and filtered list
return set.get(serverList[self.s.index].name);
}
/**
* Start of an operation
* @method
* @param {Server} server The server the operation is running against
* @param {object} query The operation running
* @param {Date} date The start time of the operation
* @return {object}
*/
Ping.prototype.startOperation = function(server, query, date) {
}
/**
* End of an operation
* @method
* @param {Server} server The server the operation is running against
* @param {error} err An error from the operation
* @param {object} result The result from the operation
* @param {Date} date The start time of the operation
* @return {object}
*/
Ping.prototype.endOperation = function(server, err, result, date) {
}
/**
* High availability process running
* @method
* @param {State} set The current replicaset state object
* @param {resultCallback} callback The callback to return the result from the function
* @return {object}
*/
Ping.prototype.ha = function(topology, state, callback) {
var self = this;
var servers = state.getAll();
var count = servers.length;
// No servers return
if(servers.length == 0) return callback(null, null);
// Return if we have not yet reached the ping interval
if(self.s.lastPing != null) {
var diff = new Date().getTime() - self.s.lastPing.getTime();
if(diff < self.s.pingInterval) return callback(null, null);
}
// Execute operation
var operation = function(_server) {
var start = new Date();
// Execute ping against server
_server.command('system.$cmd', {ismaster:1}, function(err, r) {
count = count - 1;
var time = new Date().getTime() - start.getTime();
self.s.pings[_server.name] = time;
// Log info for debug
if(self.s.logger.isDebug()) self.s.logger.debug(f('ha latency for server [%s] is [%s] ms', _server.name, time));
// We are done with all the servers
if(count == 0) {
// Emit ping event
topology.emit('ping', err, r ? r.result : null);
// Update the last ping time
self.s.lastPing = new Date();
// Return
callback(null, null);
}
});
}
// Let's ping all servers
while(servers.length > 0) {
operation(servers.shift());
}
}
var removeServer = function(self, server) {
delete self.s.pings[server.name];
}
/**
* Server connection closed
* @method
* @param {Server} server The server that closed
*/
Ping.prototype.close = function(server) {
removeServer(this, server);
}
/**
* Server connection errored out
* @method
* @param {Server} server The server that errored out
*/
Ping.prototype.error = function(server) {
removeServer(this, server);
}
/**
* Server connection timeout
* @method
* @param {Server} server The server that timed out
*/
Ping.prototype.timeout = function(server) {
removeServer(this, server);
}
/**
* Server connection happened
* @method
* @param {Server} server The server that connected
* @param {resultCallback} callback The callback to return the result from the function
*/
Ping.prototype.connect = function(server, callback) {
var self = this;
// Get the command start date
var start = new Date();
// Execute ping against server
server.command('system.$cmd', {ismaster:1}, function(err, r) {
var time = new Date().getTime() - start.getTime();
self.s.pings[server.name] = time;
// Log info for debug
if(self.s.logger.isDebug()) self.s.logger.debug(f('connect latency for server [%s] is [%s] ms', server.name, time));
// Set last ping
self.s.lastPing = new Date();
// Done, return
callback(null, null);
});
}
/**
* This is a result from a readPreference strategy
*
* @callback readPreferenceResultCallback
* @param {error} error An error object. Set to null if no error present
* @param {Server} server The server picked by the strategy
*/
module.exports = Ping;

View File

@@ -0,0 +1,514 @@
"use strict";
var Insert = require('./commands').Insert
, Update = require('./commands').Update
, Remove = require('./commands').Remove
, Query = require('../connection/commands').Query
, copy = require('../connection/utils').copy
, KillCursor = require('../connection/commands').KillCursor
, GetMore = require('../connection/commands').GetMore
, Query = require('../connection/commands').Query
, ReadPreference = require('../topologies/read_preference')
, f = require('util').format
, CommandResult = require('../topologies/command_result')
, MongoError = require('../error')
, Long = require('bson').Long;
// Write concern fields
var writeConcernFields = ['w', 'wtimeout', 'j', 'fsync'];
var LegacySupport = function() {}
//
// Needs to support legacy mass insert as well as ordered/unordered legacy
// emulation
//
LegacySupport.prototype.insert = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
options = options || {};
// Default is ordered execution
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
var legacy = typeof options.legacy == 'boolean' ? options.legacy : false;
ops = Array.isArray(ops) ? ops :[ops];
// If we have more than a 1000 ops fails
if(ops.length > 1000) return callback(new MongoError("exceeded maximum write batch size of 1000"));
// Write concern
var writeConcern = options.writeConcern || {w:1};
// We are unordered
if(!ordered || writeConcern.w == 0) {
return executeUnordered('insert', Insert, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
return executeOrdered('insert', Insert, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
LegacySupport.prototype.update = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
options = options || {};
// Default is ordered execution
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
ops = Array.isArray(ops) ? ops :[ops];
// Write concern
var writeConcern = options.writeConcern || {w:1};
// We are unordered
if(!ordered || writeConcern.w == 0) {
return executeUnordered('update', Update, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
return executeOrdered('update', Update, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
LegacySupport.prototype.remove = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
options = options || {};
// Default is ordered execution
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
ops = Array.isArray(ops) ? ops :[ops];
// Write concern
var writeConcern = options.writeConcern || {w:1};
// We are unordered
if(!ordered || writeConcern.w == 0) {
return executeUnordered('remove', Remove, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
return executeOrdered('remove', Remove, ismaster, ns, bson, pool, callbacks, ops, options, callback);
}
LegacySupport.prototype.killCursor = function(bson, cursorId, connection, callback) {
// Create a kill cursor command
var killCursor = new KillCursor(bson, [cursorId]);
// Execute the kill cursor command
if(connection && connection.isConnected()) connection.write(killCursor.toBin());
// Set cursor to 0
cursorId = Long.ZERO;
// Return to caller
if(callback) callback(null, null);
}
LegacySupport.prototype.getMore = function(bson, ns, cursorState, batchSize, raw, connection, callbacks, options, callback) {
// Create getMore command
var getMore = new GetMore(bson, ns, cursorState.cursorId, {numberToReturn: batchSize});
// Query callback
var queryCallback = function(err, r) {
if(err) return callback(err);
// If we have a timed out query or a cursor that was killed
if((r.responseFlags & (1 << 0)) != 0) {
return callback(new MongoError("cursor killed or timed out"), null);
}
// Set all the values
cursorState.documents = r.documents;
cursorState.cursorId = r.cursorId;
// Return
callback(null);
}
// If we have a raw query decorate the function
if(raw) {
queryCallback.raw = raw;
}
// Register a callback
callbacks.register(getMore.requestId, queryCallback);
// Write out the getMore command
connection.write(getMore.toBin());
}
LegacySupport.prototype.command = function(bson, ns, cmd, cursorState, topology, options) {
// Establish type of command
if(cmd.find) {
return setupClassicFind(bson, ns, cmd, cursorState, topology, options)
} else if(cursorState.cursorId != null) {
} else if(cmd) {
return setupCommand(bson, ns, cmd, cursorState, topology, options);
} else {
throw new MongoError(f("command %s does not return a cursor", JSON.stringify(cmd)));
}
}
//
// Execute a find command
var setupClassicFind = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Ensure we have at least some options
options = options || {};
// Set the optional batchSize
cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
var numberToReturn = 0;
// Unpack the limit and batchSize values
if(cursorState.limit == 0) {
numberToReturn = cursorState.batchSize;
} else if(cursorState.limit < 0 || cursorState.limit < cursorState.batchSize || (cursorState.limit > 0 && cursorState.batchSize == 0)) {
numberToReturn = cursorState.limit;
} else {
numberToReturn = cursorState.batchSize;
}
var numberToSkip = cursorState.skip || 0;
// Build actual find command
var findCmd = {};
// Using special modifier
var usesSpecialModifier = false;
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos' && readPreference) {
findCmd['$readPreference'] = readPreference.toJSON();
usesSpecialModifier = true;
}
// Add special modifiers to the query
if(cmd.sort) findCmd['orderby'] = cmd.sort, usesSpecialModifier = true;
if(cmd.hint) findCmd['$hint'] = cmd.hint, usesSpecialModifier = true;
if(cmd.snapshot) findCmd['$snapshot'] = cmd.snapshot, usesSpecialModifier = true;
if(cmd.returnKey) findCmd['$returnKey'] = cmd.returnKey, usesSpecialModifier = true;
if(cmd.maxScan) findCmd['$maxScan'] = cmd.maxScan, usesSpecialModifier = true;
if(cmd.min) findCmd['$min'] = cmd.min, usesSpecialModifier = true;
if(cmd.max) findCmd['$max'] = cmd.max, usesSpecialModifier = true;
if(cmd.showDiskLoc) findCmd['$showDiskLoc'] = cmd.showDiskLoc, usesSpecialModifier = true;
if(cmd.comment) findCmd['$comment'] = cmd.comment, usesSpecialModifier = true;
if(cmd.maxTimeMS) findCmd['$maxTimeMS'] = cmd.maxTimeMS, usesSpecialModifier = true;
// If we have explain, return a single document and close cursor
if(cmd.explain) {
numberToReturn = -1;
usesSpecialModifier = true;
findCmd['$explain'] = true;
}
// If we have a special modifier
if(usesSpecialModifier) {
findCmd['$query'] = cmd.query;
} else {
findCmd = cmd.query;
}
// Build Query object
var query = new Query(bson, ns, findCmd, {
numberToSkip: numberToSkip, numberToReturn: numberToReturn
, checkKeys: false, returnFieldSelector: cmd.fields
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Set up the option bits for wire protocol
if(typeof cmd.tailable == 'boolean') query.tailable = cmd.tailable;
if(typeof cmd.oplogReplay == 'boolean') query.oplogReplay = cmd.oplogReplay;
if(typeof cmd.noCursorTimeout == 'boolean') query.noCursorTimeout = cmd.noCursorTimeout;
if(typeof cmd.awaitData == 'boolean') query.awaitData = cmd.awaitData;
if(typeof cmd.exhaust == 'boolean') query.exhaust = cmd.exhaust;
if(typeof cmd.partial == 'boolean') query.partial = cmd.partial;
// Return the query
return query;
}
//
// Set up a command cursor
var setupCommand = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Set empty options object
options = options || {}
// Final query
var finalCmd = {};
for(var name in cmd) {
finalCmd[name] = cmd[name];
}
// Build command namespace
var parts = ns.split(/\./);
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos' && readPreference) {
finalCmd['$readPreference'] = readPreference.toJSON();
}
// Build Query object
var query = new Query(bson, f('%s.$cmd', parts.shift()), finalCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Return the query
return query;
}
/**
* @ignore
*/
var bindToCurrentDomain = function(callback) {
var domain = process.domain;
if(domain == null || callback == null) {
return callback;
} else {
return domain.bind(callback);
}
}
var hasWriteConcern = function(writeConcern) {
if(writeConcern.w
|| writeConcern.wtimeout
|| writeConcern.j == true
|| writeConcern.fsync == true
|| Object.keys(writeConcern).length == 0) {
return true;
}
return false;
}
var cloneWriteConcern = function(writeConcern) {
var wc = {};
if(writeConcern.w != null) wc.w = writeConcern.w;
if(writeConcern.wtimeout != null) wc.wtimeout = writeConcern.wtimeout;
if(writeConcern.j != null) wc.j = writeConcern.j;
if(writeConcern.fsync != null) wc.fsync = writeConcern.fsync;
return wc;
}
//
// Aggregate up all the results
//
var aggregateWriteOperationResults = function(opType, ops, results, connection) {
var finalResult = { ok: 1, n: 0 }
// Map all the results coming back
for(var i = 0; i < results.length; i++) {
var result = results[i];
var op = ops[i];
if((result.upserted || (result.updatedExisting == false)) && finalResult.upserted == null) {
finalResult.upserted = [];
}
// Push the upserted document to the list of upserted values
if(result.upserted) {
finalResult.upserted.push({index: i, _id: result.upserted});
}
// We have an upsert where we passed in a _id
if(result.updatedExisting == false && result.n == 1 && result.upserted == null) {
finalResult.upserted.push({index: i, _id: op.q._id});
}
// We have an insert command
if(result.ok == 1 && opType == 'insert' && result.err == null) {
finalResult.n = finalResult.n + 1;
}
// We have a command error
if(result != null && result.ok == 0 || result.err || result.errmsg) {
if(result.ok == 0) finalResult.ok = 0;
finalResult.code = result.code;
finalResult.errmsg = result.errmsg || result.err || result.errMsg;
// Check if we have a write error
if(result.code == 11000
|| result.code == 11001
|| result.code == 12582
|| result.code == 16544
|| result.code == 16538
|| result.code == 16542
|| result.code == 14
|| result.code == 13511) {
if(finalResult.writeErrors == null) finalResult.writeErrors = [];
finalResult.writeErrors.push({
index: i
, code: result.code
, errmsg: result.errmsg || result.err || result.errMsg
});
} else {
finalResult.writeConcernError = {
code: result.code
, errmsg: result.errmsg || result.err || result.errMsg
}
}
} else if(typeof result.n == 'number') {
finalResult.n += result.n;
} else {
finalResult.n += 1;
}
// Result as expected
if(result != null && result.lastOp) finalResult.lastOp = result.lastOp;
}
// Return finalResult aggregated results
return new CommandResult(finalResult, connection);
}
//
// Execute all inserts in an ordered manner
//
var executeOrdered = function(opType ,command, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
var _ops = ops.slice(0);
// Bind to current domain
callback = bindToCurrentDomain(callback);
// Collect all the getLastErrors
var getLastErrors = [];
// Execute an operation
var executeOp = function(list, _callback) {
// Get a pool connection
var connection = pool.get();
// No more items in the list
if(list.length == 0) return _callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
// Get the first operation
var doc = list.shift();
// Create an insert command
var op = new command(Query.getRequestId(), ismaster, bson, ns, [doc], options);
// Write concern
var optionWriteConcern = options.writeConcern || {w:1};
// Final write concern
var writeConcern = cloneWriteConcern(optionWriteConcern);
// Get the db name
var db = ns.split('.').shift();
// Error out if no connection available
if(connection == null)
return _callback(new MongoError("no connection available"));
try {
// Execute the insert
connection.write(op.toBin());
// If write concern 0 don't fire getLastError
if(hasWriteConcern(writeConcern)) {
var getLastErrorCmd = {getlasterror: 1};
// Merge all the fields
for(var i = 0; i < writeConcernFields.length; i++) {
if(writeConcern[writeConcernFields[i]] != null)
getLastErrorCmd[writeConcernFields[i]] = writeConcern[writeConcernFields[i]];
}
// Create a getLastError command
var getLastErrorOp = new Query(bson, f("%s.$cmd", db), getLastErrorCmd, {numberToReturn: -1});
// Write the lastError message
connection.write(getLastErrorOp.toBin());
// Register the callback
callbacks.register(getLastErrorOp.requestId, function(err, result) {
if(err) return callback(err);
// Get the document
var doc = result.documents[0];
// Save the getLastError document
getLastErrors.push(doc);
// If we have an error terminate
if(doc.ok == 0 || doc.err || doc.errmsg) return callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
// Execute the next op in the list
executeOp(list, callback);
});
}
} catch(err) {
if(typeof err == 'string') err = new MongoError(err);
// We have a serialization error, rewrite as a write error to have same behavior as modern
// write commands
getLastErrors.push({ ok: 1, errmsg: err.message, code: 14 });
// Return due to an error
return callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
}
}
// Execute the operations
executeOp(_ops, callback);
}
var executeUnordered = function(opType, command, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
// Bind to current domain
callback = bindToCurrentDomain(callback);
// Total operations to write
var totalOps = ops.length;
// Collect all the getLastErrors
var getLastErrors = [];
// Write concern
var optionWriteConcern = options.writeConcern || {w:1};
// Final write concern
var writeConcern = cloneWriteConcern(optionWriteConcern);
// Execute all the operations
for(var i = 0; i < ops.length; i++) {
// Create an insert command
var op = new command(Query.getRequestId(), ismaster, bson, ns, [ops[i]], options);
// Get db name
var db = ns.split('.').shift();
// Get a pool connection
var connection = pool.get();
// Error out if no connection available
if(connection == null)
return _callback(new MongoError("no connection available"));
try {
// Execute the insert
connection.write(op.toBin());
// If write concern 0 don't fire getLastError
if(hasWriteConcern(writeConcern)) {
var getLastErrorCmd = {getlasterror: 1};
// Merge all the fields
for(var j = 0; j < writeConcernFields.length; j++) {
if(writeConcern[writeConcernFields[j]] != null)
getLastErrorCmd[writeConcernFields[j]] = writeConcern[writeConcernFields[j]];
}
// Create a getLastError command
var getLastErrorOp = new Query(bson, f("%s.$cmd", db), getLastErrorCmd, {numberToReturn: -1});
// Write the lastError message
connection.write(getLastErrorOp.toBin());
// Give the result from getLastError the right index
var callbackOp = function(_index) {
return function(err, result) {
// Update the number of operations executed
totalOps = totalOps - 1;
// Save the getLastError document
getLastErrors[_index] = result.documents[0];
// Check if we are done
if(totalOps == 0) {
callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
}
}
}
// Register the callback
callbacks.register(getLastErrorOp.requestId, callbackOp(i));
}
} catch(err) {
if(typeof err == 'string') err = new MongoError(err);
// Update the number of operations executed
totalOps = totalOps - 1;
// We have a serialization error, rewrite as a write error to have same behavior as modern
// write commands
getLastErrors[i] = { ok: 1, errmsg: err.message, code: 14 };
// Check if we are done
if(totalOps == 0) {
callback(null, aggregateWriteOperationResults(opType, ops, getLastErrors, connection));
}
}
}
// Empty w:0 return
if(writeConcern
&& writeConcern.w == 0 && callback) {
callback(null, null);
}
}
module.exports = LegacySupport;

View File

@@ -0,0 +1,250 @@
"use strict";
var Insert = require('./commands').Insert
, Update = require('./commands').Update
, Remove = require('./commands').Remove
, Query = require('../connection/commands').Query
, copy = require('../connection/utils').copy
, KillCursor = require('../connection/commands').KillCursor
, GetMore = require('../connection/commands').GetMore
, Query = require('../connection/commands').Query
, ReadPreference = require('../topologies/read_preference')
, f = require('util').format
, CommandResult = require('../topologies/command_result')
, MongoError = require('../error')
, Long = require('bson').Long;
var LegacySupport = function() {}
//
// Execute a write operation
var executeWrite = function(topology, type, opsField, ns, ops, options, callback) {
if(ops.length == 0) throw new MongoError("insert must contain at least one document");
if(typeof options == 'function') {
callback = options;
options = {};
}
// Split the ns up to get db and collection
var p = ns.split(".");
var d = p.shift();
// Options
var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
var writeConcern = options.writeConcern || {};
// return skeleton
var writeCommand = {};
writeCommand[type] = p.join('.');
writeCommand[opsField] = ops;
writeCommand.ordered = ordered;
writeCommand.writeConcern = writeConcern;
// Options object
var opts = {};
if(type == 'insert') opts.checkKeys = true;
// Ensure we support serialization of functions
if(options.serializeFunctions) opts.serializeFunctions = options.serializeFunctions;
// Execute command
topology.command(f("%s.$cmd", d), writeCommand, opts, callback);
}
//
// Needs to support legacy mass insert as well as ordered/unordered legacy
// emulation
//
LegacySupport.prototype.insert = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'insert', 'documents', ns, ops, options, callback);
}
LegacySupport.prototype.update = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'update', 'updates', ns, ops, options, callback);
}
LegacySupport.prototype.remove = function(topology, ismaster, ns, bson, pool, callbacks, ops, options, callback) {
executeWrite(topology, 'delete', 'deletes', ns, ops, options, callback);
}
LegacySupport.prototype.killCursor = function(bson, cursorId, connection, callback) {
// Create a kill cursor command
var killCursor = new KillCursor(bson, [cursorId]);
// Execute the kill cursor command
if(connection && connection.isConnected()) connection.write(killCursor.toBin());
// Set cursor to 0
cursorId = Long.ZERO;
// Return to caller
if(callback) callback(null, null);
}
LegacySupport.prototype.getMore = function(bson, ns, cursorState, batchSize, raw, connection, callbacks, options, callback) {
// Create getMore command
var getMore = new GetMore(bson, ns, cursorState.cursorId, {numberToReturn: batchSize});
// Query callback
var queryCallback = function(err, r) {
if(err) return callback(err);
// If we have a timed out query or a cursor that was killed
if((r.responseFlags & (1 << 0)) != 0) {
return callback(new MongoError("cursor killed or timed out"), null);
}
// Set all the values
cursorState.documents = r.documents;
cursorState.cursorId = r.cursorId;
// Return
callback(null);
}
// If we have a raw query decorate the function
if(raw) {
queryCallback.raw = raw;
}
// Register a callback
callbacks.register(getMore.requestId, queryCallback);
// Write out the getMore command
connection.write(getMore.toBin());
}
LegacySupport.prototype.command = function(bson, ns, cmd, cursorState, topology, options) {
// Establish type of command
if(cmd.find) {
return setupClassicFind(bson, ns, cmd, cursorState, topology, options)
} else if(cursorState.cursorId != null) {
} else if(cmd) {
return setupCommand(bson, ns, cmd, cursorState, topology, options);
} else {
throw new MongoError(f("command %s does not return a cursor", JSON.stringify(cmd)));
}
}
//
// Execute a find command
var setupClassicFind = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Ensure we have at least some options
options = options || {};
// Set the optional batchSize
cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
var numberToReturn = 0;
// Unpack the limit and batchSize values
if(cursorState.limit == 0) {
numberToReturn = cursorState.batchSize;
} else if(cursorState.limit < 0 || cursorState.limit < cursorState.batchSize || (cursorState.limit > 0 && cursorState.batchSize == 0)) {
numberToReturn = cursorState.limit;
} else {
numberToReturn = cursorState.batchSize;
}
var numberToSkip = cursorState.skip || 0;
// Build actual find command
var findCmd = {};
// Using special modifier
var usesSpecialModifier = false;
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos' && readPreference) {
findCmd['$readPreference'] = readPreference.toJSON();
usesSpecialModifier = true;
}
// Add special modifiers to the query
if(cmd.sort) findCmd['orderby'] = cmd.sort, usesSpecialModifier = true;
if(cmd.hint) findCmd['$hint'] = cmd.hint, usesSpecialModifier = true;
if(cmd.snapshot) findCmd['$snapshot'] = cmd.snapshot, usesSpecialModifier = true;
if(cmd.returnKey) findCmd['$returnKey'] = cmd.returnKey, usesSpecialModifier = true;
if(cmd.maxScan) findCmd['$maxScan'] = cmd.maxScan, usesSpecialModifier = true;
if(cmd.min) findCmd['$min'] = cmd.min, usesSpecialModifier = true;
if(cmd.max) findCmd['$max'] = cmd.max, usesSpecialModifier = true;
if(cmd.showDiskLoc) findCmd['$showDiskLoc'] = cmd.showDiskLoc, usesSpecialModifier = true;
if(cmd.comment) findCmd['$comment'] = cmd.comment, usesSpecialModifier = true;
if(cmd.maxTimeMS) findCmd['$maxTimeMS'] = cmd.maxTimeMS, usesSpecialModifier = true;
// If we have explain, return a single document and close cursor
if(cmd.explain) {
numberToReturn = -1;
usesSpecialModifier = true;
findCmd['$explain'] = true;
}
// If we have a special modifier
if(usesSpecialModifier) {
findCmd['$query'] = cmd.query;
} else {
findCmd = cmd.query;
}
// Build Query object
var query = new Query(bson, ns, findCmd, {
numberToSkip: numberToSkip, numberToReturn: numberToReturn
, checkKeys: false, returnFieldSelector: cmd.fields
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Set up the option bits for wire protocol
if(typeof cmd.tailable == 'boolean') query.tailable = cmd.tailable;
if(typeof cmd.oplogReplay == 'boolean') query.oplogReplay = cmd.oplogReplay;
if(typeof cmd.noCursorTimeout == 'boolean') query.noCursorTimeout = cmd.noCursorTimeout;
if(typeof cmd.awaitData == 'boolean') query.awaitData = cmd.awaitData;
if(typeof cmd.exhaust == 'boolean') query.exhaust = cmd.exhaust;
if(typeof cmd.partial == 'boolean') query.partial = cmd.partial;
// Return the query
return query;
}
//
// Set up a command cursor
var setupCommand = function(bson, ns, cmd, cursorState, topology, options) {
var readPreference = options.readPreference || new ReadPreference('primary');
if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must be a ReadPreference instance');
// Set empty options object
options = options || {}
// Final query
var finalCmd = {};
for(var name in cmd) {
finalCmd[name] = cmd[name];
}
// Build command namespace
var parts = ns.split(/\./);
// We have a Mongos topology, check if we need to add a readPreference
if(topology.type == 'mongos' && readPreference) {
finalCmd['$readPreference'] = readPreference.toJSON();
}
// Build Query object
var query = new Query(bson, f('%s.$cmd', parts.shift()), finalCmd, {
numberToSkip: 0, numberToReturn: -1
, checkKeys: false
});
// Set query flags
query.slaveOk = readPreference.slaveOk();
// Return the query
return query;
}
/**
* @ignore
*/
var bindToCurrentDomain = function(callback) {
var domain = process.domain;
if(domain == null || callback == null) {
return callback;
} else {
return domain.bind(callback);
}
}
module.exports = LegacySupport;

View File

@@ -0,0 +1,335 @@
"use strict";
var MongoError = require('../error');
// Wire command operation ids
var OP_UPDATE = 2001;
var OP_INSERT = 2002;
var OP_DELETE = 2006;
var Insert = function(requestId, ismaster, bson, ns, documents, options) {
// Basic options needed to be passed in
if(ns == null) throw new MongoError("ns must be specified for query");
if(!Array.isArray(documents) || documents.length == 0) throw new MongoError("documents array must contain at least one document to insert");
// Validate that we are not passing 0x00 in the colletion name
if(!!~ns.indexOf("\x00")) {
throw new MongoError("namespace cannot contain a null character");
}
// Set internal
this.requestId = requestId;
this.bson = bson;
this.ns = ns;
this.documents = documents;
this.ismaster = ismaster;
// Ensure empty options
options = options || {};
// Unpack options
this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions : false;
this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : true;
this.continueOnError = typeof options.continueOnError == 'boolean' ? options.continueOnError : false;
// Set flags
this.flags = this.continueOnError ? 1 : 0;
}
// To Binary
Insert.prototype.toBin = function() {
// Calculate total length of the document
var length = 4 + Buffer.byteLength(this.ns) + 1 + (4 * 4);
// Calculate the size of all documents
for(var i = 0; i < this.documents.length; i++) {
var docsize = this.bson.calculateObjectSize(this.documents[i], this.serializeFunctions, true);
// Document is larger than maxBsonObjectSize, terminate serialization
if(docsize > this.ismaster.maxBsonObjectSize) {
throw new MongoError("Document exceeds maximum allowed bson size of " + this.ismaster.maxBsonObjectSize + " bytes");
}
// Add to total command size
length += docsize;
}
// Command is larger than maxMessageSizeBytes terminate serialization
if(length > this.ismaster.maxBsonObjectSize) {
throw new MongoError("Command exceeds maximum message size of " + this.ismaster.maxMessageSizeBytes + " bytes");
}
// Create command buffer
var buffer = new Buffer(length);
var index = 0;
// Write header length
buffer[index + 3] = (length >> 24) & 0xff;
buffer[index + 2] = (length >> 16) & 0xff;
buffer[index + 1] = (length >> 8) & 0xff;
buffer[index] = (length) & 0xff;
index = index + 4;
// Write header requestId
buffer[index + 3] = (this.requestId >> 24) & 0xff;
buffer[index + 2] = (this.requestId >> 16) & 0xff;
buffer[index + 1] = (this.requestId >> 8) & 0xff;
buffer[index] = (this.requestId) & 0xff;
index = index + 4;
// No flags
buffer[index + 3] = (0 >> 24) & 0xff;
buffer[index + 2] = (0 >> 16) & 0xff;
buffer[index + 1] = (0 >> 8) & 0xff;
buffer[index] = (0) & 0xff;
index = index + 4;
// Operation
buffer[index + 3] = (OP_INSERT >> 24) & 0xff;
buffer[index + 2] = (OP_INSERT >> 16) & 0xff;
buffer[index + 1] = (OP_INSERT >> 8) & 0xff;
buffer[index] = (OP_INSERT) & 0xff;
index = index + 4;
// Flags
buffer[index + 3] = (this.flags >> 24) & 0xff;
buffer[index + 2] = (this.flags >> 16) & 0xff;
buffer[index + 1] = (this.flags >> 8) & 0xff;
buffer[index] = (this.flags) & 0xff;
index = index + 4;
// Write collection name
index = index + buffer.write(this.ns, index, 'utf8') + 1;
buffer[index - 1] = 0;
// Write all the bson documents to the buffer at the index offset
for(var i = 0; i < this.documents.length; i++) {
// Serialize the entry
var newIndex = this.bson.serializeWithBufferAndIndex(this.documents[i], this.checkKeys, buffer, index, this.serializeFunctions);
var docSize = newIndex - index + 1;
// Write the doc size
buffer[index + 3] = (docSize >> 24) & 0xff;
buffer[index + 2] = (docSize >> 16) & 0xff;
buffer[index + 1] = (docSize >> 8) & 0xff;
buffer[index] = (docSize) & 0xff;
// Adjust index
index = index + docSize;
// Add terminating 0 for the object
buffer[index - 1] = 0;
}
return buffer;
}
var Update = function(requestId, ismaster, bson, ns, update, options) {
// Basic options needed to be passed in
if(ns == null) throw new MongoError("ns must be specified for query");
// Ensure empty options
options = options || {};
// Set internal
this.requestId = requestId;
this.bson = bson;
this.ns = ns;
this.ismaster = ismaster;
// Unpack options
this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions : false;
this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : false;
// Unpack the update document
this.upsert = typeof update[0].upsert == 'boolean' ? update[0].upsert : false;
this.multi = typeof update[0].multi == 'boolean' ? update[0].multi : false;
this.q = update[0].q;
this.u = update[0].u;
// Create flag value
this.flags = this.upsert ? 1 : 0;
this.flags = this.multi ? this.flags | 2 : this.flags;
}
// To Binary
Update.prototype.toBin = function() {
// Calculate total length of the document
var length = (4 * 4) + 4 + Buffer.byteLength(this.ns) + 1 + 4;
// Calculate the two object sizes
var qSize = this.bson.calculateObjectSize(this.q, this.serializeFunctions, true);
var uSize = this.bson.calculateObjectSize(this.u, this.serializeFunctions, true);
// Update the length
length = length + qSize + uSize;
// Create command buffer
var buffer = new Buffer(length);
var index = 0;
// Write header length
buffer[index + 3] = (length >> 24) & 0xff;
buffer[index + 2] = (length >> 16) & 0xff;
buffer[index + 1] = (length >> 8) & 0xff;
buffer[index] = (length) & 0xff;
index = index + 4;
// Write header requestId
buffer[index + 3] = (this.requestId >> 24) & 0xff;
buffer[index + 2] = (this.requestId >> 16) & 0xff;
buffer[index + 1] = (this.requestId >> 8) & 0xff;
buffer[index] = (this.requestId) & 0xff;
index = index + 4;
// No flags
buffer[index + 3] = (0 >> 24) & 0xff;
buffer[index + 2] = (0 >> 16) & 0xff;
buffer[index + 1] = (0 >> 8) & 0xff;
buffer[index] = (0) & 0xff;
index = index + 4;
// Operation
buffer[index + 3] = (OP_UPDATE >> 24) & 0xff;
buffer[index + 2] = (OP_UPDATE >> 16) & 0xff;
buffer[index + 1] = (OP_UPDATE >> 8) & 0xff;
buffer[index] = (OP_UPDATE) & 0xff;
index = index + 4;
// Write ZERO
buffer[index + 3] = (0 >> 24) & 0xff;
buffer[index + 2] = (0 >> 16) & 0xff;
buffer[index + 1] = (0 >> 8) & 0xff;
buffer[index] = (0) & 0xff;
index = index + 4;
// Write collection name
index = index + buffer.write(this.ns, index, 'utf8') + 1;
buffer[index - 1] = 0;
// Flags
buffer[index + 3] = (this.flags >> 24) & 0xff;
buffer[index + 2] = (this.flags >> 16) & 0xff;
buffer[index + 1] = (this.flags >> 8) & 0xff;
buffer[index] = (this.flags) & 0xff;
index = index + 4;
// Serialize the selector
var length = this.bson.serializeWithBufferAndIndex(this.q, this.checkKeys, buffer, index, this.serializeFunctions) - index + 1;
buffer[index + 3] = (length >> 24) & 0xff;
buffer[index + 2] = (length >> 16) & 0xff;
buffer[index + 1] = (length >> 8) & 0xff;
buffer[index] = (length) & 0xff;
index = index + length;
// Serialize the update statement
length = this.bson.serializeWithBufferAndIndex(this.u, false, buffer, index, this.serializeFunctions) - index + 1;
buffer[index + 3] = (length >> 24) & 0xff;
buffer[index + 2] = (length >> 16) & 0xff;
buffer[index + 1] = (length >> 8) & 0xff;
buffer[index] = (length) & 0xff;
index = index + length;
// Return the buffer
return buffer;
}
var Remove = function(requestId, ismaster, bson, ns, remove, options) {
// Basic options needed to be passed in
if(ns == null) throw new MongoError("ns must be specified for query");
// Ensure empty options
options = options || {};
// Set internal
this.requestId = requestId;
this.bson = bson;
this.ns = ns;
this.ismaster = ismaster;
// Unpack options
this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions : false;
this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : false;
// Unpack the update document
this.limit = typeof remove[0].limit == 'number' ? remove[0].limit : 1;
this.q = remove[0].q;
// Create flag value
this.flags = this.limit == 1 ? 1 : 0;
}
// To Binary
Remove.prototype.toBin = function() {
// Calculate total length of the document
var length = (4 * 4) + 4 + Buffer.byteLength(this.ns) + 1 + 4;
// Calculate the two object sizes
var qSize = this.bson.calculateObjectSize(this.q, this.serializeFunctions, true);
// Update the length
length = length + qSize;
// Create command buffer
var buffer = new Buffer(length);
var index = 0;
// Write header length
buffer[index + 3] = (length >> 24) & 0xff;
buffer[index + 2] = (length >> 16) & 0xff;
buffer[index + 1] = (length >> 8) & 0xff;
buffer[index] = (length) & 0xff;
index = index + 4;
// Write header requestId
buffer[index + 3] = (this.requestId >> 24) & 0xff;
buffer[index + 2] = (this.requestId >> 16) & 0xff;
buffer[index + 1] = (this.requestId >> 8) & 0xff;
buffer[index] = (this.requestId) & 0xff;
index = index + 4;
// No flags
buffer[index + 3] = (0 >> 24) & 0xff;
buffer[index + 2] = (0 >> 16) & 0xff;
buffer[index + 1] = (0 >> 8) & 0xff;
buffer[index] = (0) & 0xff;
index = index + 4;
// Operation
buffer[index + 3] = (OP_DELETE >> 24) & 0xff;
buffer[index + 2] = (OP_DELETE >> 16) & 0xff;
buffer[index + 1] = (OP_DELETE >> 8) & 0xff;
buffer[index] = (OP_DELETE) & 0xff;
index = index + 4;
// Write ZERO
buffer[index + 3] = (0 >> 24) & 0xff;
buffer[index + 2] = (0 >> 16) & 0xff;
buffer[index + 1] = (0 >> 8) & 0xff;
buffer[index] = (0) & 0xff;
index = index + 4;
// Write collection name
index = index + buffer.write(this.ns, index, 'utf8') + 1;
buffer[index - 1] = 0;
// Write ZERO
buffer[index + 3] = (this.flags >> 24) & 0xff;
buffer[index + 2] = (this.flags >> 16) & 0xff;
buffer[index + 1] = (this.flags >> 8) & 0xff;
buffer[index] = (this.flags) & 0xff;
index = index + 4;
// Serialize the selector
var length = this.bson.serializeWithBufferAndIndex(this.q, this.checkKeys, buffer, index, this.serializeFunctions) - index + 1;
buffer[index + 3] = (length >> 24) & 0xff;
buffer[index + 2] = (length >> 16) & 0xff;
buffer[index + 1] = (length >> 8) & 0xff;
buffer[index] = (length) & 0xff;
index = index + length;
// Return the buffer
return buffer;
}
module.exports = {
Insert: Insert
, Update: Update
, Remove: Remove
}

View File

@@ -0,0 +1 @@
../mkdirp/bin/cmd.js

View File

@@ -0,0 +1 @@
../rimraf/bin.js

View File

@@ -0,0 +1,4 @@
language: node_js
node_js:
- 0.10 # development version of 0.8, may be unstable
- 0.12

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,69 @@
Javascript + C++ BSON parser
============================
This BSON parser is primarily meant to be used with the `mongodb` node.js driver.
However, wonderful tools such as `onejs` can package up a BSON parser that will work in the browser.
The current build is located in the `browser_build/bson.js` file.
A simple example of how to use BSON in the browser:
```html
<html>
<head>
<script src="https://raw.github.com/mongodb/js-bson/master/browser_build/bson.js">
</script>
</head>
<body onload="start();">
<script>
function start() {
var BSON = bson().BSON;
var Long = bson().Long;
var doc = {long: Long.fromNumber(100)}
// Serialize a document
var data = BSON.serialize(doc, false, true, false);
// De serialize it again
var doc_2 = BSON.deserialize(data);
}
</script>
</body>
</html>
```
A simple example of how to use BSON in `node.js`:
```javascript
var bson = require("bson");
var BSON = bson.BSONPure.BSON;
var Long = bson.BSONPure.Long;
var doc = {long: Long.fromNumber(100)}
// Serialize a document
var data = BSON.serialize(doc, false, true, false);
console.log("data:", data);
// Deserialize the resulting Buffer
var doc_2 = BSON.deserialize(data);
console.log("doc_2:", doc_2);
```
The API consists of two simple methods to serialize/deserialize objects to/from BSON format:
* BSON.serialize(object, checkKeys, asBuffer, serializeFunctions)
* @param {Object} object the Javascript object to serialize.
* @param {Boolean} checkKeys the serializer will check if keys are valid.
* @param {Boolean} asBuffer return the serialized object as a Buffer object **(ignore)**.
* @param {Boolean} serializeFunctions serialize the javascript functions **(default:false)**
* @return {TypedArray/Array} returns a TypedArray or Array depending on what your browser supports
* BSON.deserialize(buffer, options, isArray)
* Options
* **evalFunctions** {Boolean, default:false}, evaluate functions in the BSON document scoped to the object deserialized.
* **cacheFunctions** {Boolean, default:false}, cache evaluated functions for reuse.
* **cacheFunctionsCrc32** {Boolean, default:false}, use a crc32 code for caching, otherwise use the string of the function.
* @param {TypedArray/Array} a TypedArray/Array containing the BSON data
* @param {Object} [options] additional options used for the deserialization.
* @param {Boolean} [isArray] ignore used for recursive parsing.
* @return {Object} returns the deserialized Javascript Object.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,8 @@
{ "name" : "bson"
, "description" : "A bson parser for node.js and the browser"
, "main": "../lib/bson/bson"
, "directories" : { "lib" : "../lib/bson" }
, "engines" : { "node" : ">=0.6.0" }
, "licenses" : [ { "type" : "Apache License, Version 2.0"
, "url" : "http://www.apache.org/licenses/LICENSE-2.0" } ]
}

View File

@@ -0,0 +1,122 @@
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 46,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/usr",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/kasperrt/.node-gyp/0.10.33",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/1.4.28 node/v0.10.33 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "1000",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "Infinity",
"userconfig": "/home/kasperrt/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/zsh",
"prefix": "/usr",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/kasperrt/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "",
"node_version": "0.10.33",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/kasperrt/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": "",
"spin": "true"
}
}

View File

@@ -0,0 +1,12 @@
gyp: binding.gyp not found (cwd: /var/www/server/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson) while trying to load binding.gyp
gyp ERR! configure error
gyp ERR! stack Error: `gyp` failed with exit code: 1
gyp ERR! stack at ChildProcess.onCpExit (/usr/lib/node_modules/npm/node_modules/node-gyp/lib/configure.js:343:16)
gyp ERR! stack at ChildProcess.emit (events.js:98:17)
gyp ERR! stack at Process.ChildProcess._handle.onexit (child_process.js:810:12)
gyp ERR! System Linux 3.13.11-03131106-generic
gyp ERR! command "node" "/usr/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js" "rebuild"
gyp ERR! cwd /var/www/server/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson
gyp ERR! node -v v0.10.33
gyp ERR! node-gyp -v v1.0.1
gyp ERR! not ok

View File

@@ -0,0 +1,344 @@
/**
* Module dependencies.
* @ignore
*/
if(typeof window === 'undefined') {
var Buffer = require('buffer').Buffer; // TODO just use global Buffer
}
/**
* A class representation of the BSON Binary type.
*
* Sub types
* - **BSON.BSON_BINARY_SUBTYPE_DEFAULT**, default BSON type.
* - **BSON.BSON_BINARY_SUBTYPE_FUNCTION**, BSON function type.
* - **BSON.BSON_BINARY_SUBTYPE_BYTE_ARRAY**, BSON byte array type.
* - **BSON.BSON_BINARY_SUBTYPE_UUID**, BSON uuid type.
* - **BSON.BSON_BINARY_SUBTYPE_MD5**, BSON md5 type.
* - **BSON.BSON_BINARY_SUBTYPE_USER_DEFINED**, BSON user defined type.
*
* @class
* @param {Buffer} buffer a buffer object containing the binary data.
* @param {Number} [subType] the option binary type.
* @return {Binary}
*/
function Binary(buffer, subType) {
if(!(this instanceof Binary)) return new Binary(buffer, subType);
this._bsontype = 'Binary';
if(buffer instanceof Number) {
this.sub_type = buffer;
this.position = 0;
} else {
this.sub_type = subType == null ? BSON_BINARY_SUBTYPE_DEFAULT : subType;
this.position = 0;
}
if(buffer != null && !(buffer instanceof Number)) {
// Only accept Buffer, Uint8Array or Arrays
if(typeof buffer == 'string') {
// Different ways of writing the length of the string for the different types
if(typeof Buffer != 'undefined') {
this.buffer = new Buffer(buffer);
} else if(typeof Uint8Array != 'undefined' || (Object.prototype.toString.call(buffer) == '[object Array]')) {
this.buffer = writeStringToArray(buffer);
} else {
throw new Error("only String, Buffer, Uint8Array or Array accepted");
}
} else {
this.buffer = buffer;
}
this.position = buffer.length;
} else {
if(typeof Buffer != 'undefined') {
this.buffer = new Buffer(Binary.BUFFER_SIZE);
} else if(typeof Uint8Array != 'undefined'){
this.buffer = new Uint8Array(new ArrayBuffer(Binary.BUFFER_SIZE));
} else {
this.buffer = new Array(Binary.BUFFER_SIZE);
}
// Set position to start of buffer
this.position = 0;
}
};
/**
* Updates this binary with byte_value.
*
* @method
* @param {string} byte_value a single byte we wish to write.
*/
Binary.prototype.put = function put(byte_value) {
// If it's a string and a has more than one character throw an error
if(byte_value['length'] != null && typeof byte_value != 'number' && byte_value.length != 1) throw new Error("only accepts single character String, Uint8Array or Array");
if(typeof byte_value != 'number' && byte_value < 0 || byte_value > 255) throw new Error("only accepts number in a valid unsigned byte range 0-255");
// Decode the byte value once
var decoded_byte = null;
if(typeof byte_value == 'string') {
decoded_byte = byte_value.charCodeAt(0);
} else if(byte_value['length'] != null) {
decoded_byte = byte_value[0];
} else {
decoded_byte = byte_value;
}
if(this.buffer.length > this.position) {
this.buffer[this.position++] = decoded_byte;
} else {
if(typeof Buffer != 'undefined' && Buffer.isBuffer(this.buffer)) {
// Create additional overflow buffer
var buffer = new Buffer(Binary.BUFFER_SIZE + this.buffer.length);
// Combine the two buffers together
this.buffer.copy(buffer, 0, 0, this.buffer.length);
this.buffer = buffer;
this.buffer[this.position++] = decoded_byte;
} else {
var buffer = null;
// Create a new buffer (typed or normal array)
if(Object.prototype.toString.call(this.buffer) == '[object Uint8Array]') {
buffer = new Uint8Array(new ArrayBuffer(Binary.BUFFER_SIZE + this.buffer.length));
} else {
buffer = new Array(Binary.BUFFER_SIZE + this.buffer.length);
}
// We need to copy all the content to the new array
for(var i = 0; i < this.buffer.length; i++) {
buffer[i] = this.buffer[i];
}
// Reassign the buffer
this.buffer = buffer;
// Write the byte
this.buffer[this.position++] = decoded_byte;
}
}
};
/**
* Writes a buffer or string to the binary.
*
* @method
* @param {(Buffer|string)} string a string or buffer to be written to the Binary BSON object.
* @param {number} offset specify the binary of where to write the content.
* @return {null}
*/
Binary.prototype.write = function write(string, offset) {
offset = typeof offset == 'number' ? offset : this.position;
// If the buffer is to small let's extend the buffer
if(this.buffer.length < offset + string.length) {
var buffer = null;
// If we are in node.js
if(typeof Buffer != 'undefined' && Buffer.isBuffer(this.buffer)) {
buffer = new Buffer(this.buffer.length + string.length);
this.buffer.copy(buffer, 0, 0, this.buffer.length);
} else if(Object.prototype.toString.call(this.buffer) == '[object Uint8Array]') {
// Create a new buffer
buffer = new Uint8Array(new ArrayBuffer(this.buffer.length + string.length))
// Copy the content
for(var i = 0; i < this.position; i++) {
buffer[i] = this.buffer[i];
}
}
// Assign the new buffer
this.buffer = buffer;
}
if(typeof Buffer != 'undefined' && Buffer.isBuffer(string) && Buffer.isBuffer(this.buffer)) {
string.copy(this.buffer, offset, 0, string.length);
this.position = (offset + string.length) > this.position ? (offset + string.length) : this.position;
// offset = string.length
} else if(typeof Buffer != 'undefined' && typeof string == 'string' && Buffer.isBuffer(this.buffer)) {
this.buffer.write(string, 'binary', offset);
this.position = (offset + string.length) > this.position ? (offset + string.length) : this.position;
// offset = string.length;
} else if(Object.prototype.toString.call(string) == '[object Uint8Array]'
|| Object.prototype.toString.call(string) == '[object Array]' && typeof string != 'string') {
for(var i = 0; i < string.length; i++) {
this.buffer[offset++] = string[i];
}
this.position = offset > this.position ? offset : this.position;
} else if(typeof string == 'string') {
for(var i = 0; i < string.length; i++) {
this.buffer[offset++] = string.charCodeAt(i);
}
this.position = offset > this.position ? offset : this.position;
}
};
/**
* Reads **length** bytes starting at **position**.
*
* @method
* @param {number} position read from the given position in the Binary.
* @param {number} length the number of bytes to read.
* @return {Buffer}
*/
Binary.prototype.read = function read(position, length) {
length = length && length > 0
? length
: this.position;
// Let's return the data based on the type we have
if(this.buffer['slice']) {
return this.buffer.slice(position, position + length);
} else {
// Create a buffer to keep the result
var buffer = typeof Uint8Array != 'undefined' ? new Uint8Array(new ArrayBuffer(length)) : new Array(length);
for(var i = 0; i < length; i++) {
buffer[i] = this.buffer[position++];
}
}
// Return the buffer
return buffer;
};
/**
* Returns the value of this binary as a string.
*
* @method
* @return {string}
*/
Binary.prototype.value = function value(asRaw) {
asRaw = asRaw == null ? false : asRaw;
// Optimize to serialize for the situation where the data == size of buffer
if(asRaw && typeof Buffer != 'undefined' && Buffer.isBuffer(this.buffer) && this.buffer.length == this.position)
return this.buffer;
// If it's a node.js buffer object
if(typeof Buffer != 'undefined' && Buffer.isBuffer(this.buffer)) {
return asRaw ? this.buffer.slice(0, this.position) : this.buffer.toString('binary', 0, this.position);
} else {
if(asRaw) {
// we support the slice command use it
if(this.buffer['slice'] != null) {
return this.buffer.slice(0, this.position);
} else {
// Create a new buffer to copy content to
var newBuffer = Object.prototype.toString.call(this.buffer) == '[object Uint8Array]' ? new Uint8Array(new ArrayBuffer(this.position)) : new Array(this.position);
// Copy content
for(var i = 0; i < this.position; i++) {
newBuffer[i] = this.buffer[i];
}
// Return the buffer
return newBuffer;
}
} else {
return convertArraytoUtf8BinaryString(this.buffer, 0, this.position);
}
}
};
/**
* Length.
*
* @method
* @return {number} the length of the binary.
*/
Binary.prototype.length = function length() {
return this.position;
};
/**
* @ignore
*/
Binary.prototype.toJSON = function() {
return this.buffer != null ? this.buffer.toString('base64') : '';
}
/**
* @ignore
*/
Binary.prototype.toString = function(format) {
return this.buffer != null ? this.buffer.slice(0, this.position).toString(format) : '';
}
/**
* Binary default subtype
* @ignore
*/
var BSON_BINARY_SUBTYPE_DEFAULT = 0;
/**
* @ignore
*/
var writeStringToArray = function(data) {
// Create a buffer
var buffer = typeof Uint8Array != 'undefined' ? new Uint8Array(new ArrayBuffer(data.length)) : new Array(data.length);
// Write the content to the buffer
for(var i = 0; i < data.length; i++) {
buffer[i] = data.charCodeAt(i);
}
// Write the string to the buffer
return buffer;
}
/**
* Convert Array ot Uint8Array to Binary String
*
* @ignore
*/
var convertArraytoUtf8BinaryString = function(byteArray, startIndex, endIndex) {
var result = "";
for(var i = startIndex; i < endIndex; i++) {
result = result + String.fromCharCode(byteArray[i]);
}
return result;
};
Binary.BUFFER_SIZE = 256;
/**
* Default BSON type
*
* @classconstant SUBTYPE_DEFAULT
**/
Binary.SUBTYPE_DEFAULT = 0;
/**
* Function BSON type
*
* @classconstant SUBTYPE_DEFAULT
**/
Binary.SUBTYPE_FUNCTION = 1;
/**
* Byte Array BSON type
*
* @classconstant SUBTYPE_DEFAULT
**/
Binary.SUBTYPE_BYTE_ARRAY = 2;
/**
* OLD UUID BSON type
*
* @classconstant SUBTYPE_DEFAULT
**/
Binary.SUBTYPE_UUID_OLD = 3;
/**
* UUID BSON type
*
* @classconstant SUBTYPE_DEFAULT
**/
Binary.SUBTYPE_UUID = 4;
/**
* MD5 BSON type
*
* @classconstant SUBTYPE_DEFAULT
**/
Binary.SUBTYPE_MD5 = 5;
/**
* User BSON type
*
* @classconstant SUBTYPE_DEFAULT
**/
Binary.SUBTYPE_USER_DEFINED = 128;
/**
* Expose.
*/
module.exports = Binary;
module.exports.Binary = Binary;

View File

@@ -0,0 +1,385 @@
/**
* Binary Parser.
* Jonas Raoni Soares Silva
* http://jsfromhell.com/classes/binary-parser [v1.0]
*/
var chr = String.fromCharCode;
var maxBits = [];
for (var i = 0; i < 64; i++) {
maxBits[i] = Math.pow(2, i);
}
function BinaryParser (bigEndian, allowExceptions) {
if(!(this instanceof BinaryParser)) return new BinaryParser(bigEndian, allowExceptions);
this.bigEndian = bigEndian;
this.allowExceptions = allowExceptions;
};
BinaryParser.warn = function warn (msg) {
if (this.allowExceptions) {
throw new Error(msg);
}
return 1;
};
BinaryParser.decodeFloat = function decodeFloat (data, precisionBits, exponentBits) {
var b = new this.Buffer(this.bigEndian, data);
b.checkBuffer(precisionBits + exponentBits + 1);
var bias = maxBits[exponentBits - 1] - 1
, signal = b.readBits(precisionBits + exponentBits, 1)
, exponent = b.readBits(precisionBits, exponentBits)
, significand = 0
, divisor = 2
, curByte = b.buffer.length + (-precisionBits >> 3) - 1;
do {
for (var byteValue = b.buffer[ ++curByte ], startBit = precisionBits % 8 || 8, mask = 1 << startBit; mask >>= 1; ( byteValue & mask ) && ( significand += 1 / divisor ), divisor *= 2 );
} while (precisionBits -= startBit);
return exponent == ( bias << 1 ) + 1 ? significand ? NaN : signal ? -Infinity : +Infinity : ( 1 + signal * -2 ) * ( exponent || significand ? !exponent ? Math.pow( 2, -bias + 1 ) * significand : Math.pow( 2, exponent - bias ) * ( 1 + significand ) : 0 );
};
BinaryParser.decodeInt = function decodeInt (data, bits, signed, forceBigEndian) {
var b = new this.Buffer(this.bigEndian || forceBigEndian, data)
, x = b.readBits(0, bits)
, max = maxBits[bits]; //max = Math.pow( 2, bits );
return signed && x >= max / 2
? x - max
: x;
};
BinaryParser.encodeFloat = function encodeFloat (data, precisionBits, exponentBits) {
var bias = maxBits[exponentBits - 1] - 1
, minExp = -bias + 1
, maxExp = bias
, minUnnormExp = minExp - precisionBits
, n = parseFloat(data)
, status = isNaN(n) || n == -Infinity || n == +Infinity ? n : 0
, exp = 0
, len = 2 * bias + 1 + precisionBits + 3
, bin = new Array(len)
, signal = (n = status !== 0 ? 0 : n) < 0
, intPart = Math.floor(n = Math.abs(n))
, floatPart = n - intPart
, lastBit
, rounded
, result
, i
, j;
for (i = len; i; bin[--i] = 0);
for (i = bias + 2; intPart && i; bin[--i] = intPart % 2, intPart = Math.floor(intPart / 2));
for (i = bias + 1; floatPart > 0 && i; (bin[++i] = ((floatPart *= 2) >= 1) - 0 ) && --floatPart);
for (i = -1; ++i < len && !bin[i];);
if (bin[(lastBit = precisionBits - 1 + (i = (exp = bias + 1 - i) >= minExp && exp <= maxExp ? i + 1 : bias + 1 - (exp = minExp - 1))) + 1]) {
if (!(rounded = bin[lastBit])) {
for (j = lastBit + 2; !rounded && j < len; rounded = bin[j++]);
}
for (j = lastBit + 1; rounded && --j >= 0; (bin[j] = !bin[j] - 0) && (rounded = 0));
}
for (i = i - 2 < 0 ? -1 : i - 3; ++i < len && !bin[i];);
if ((exp = bias + 1 - i) >= minExp && exp <= maxExp) {
++i;
} else if (exp < minExp) {
exp != bias + 1 - len && exp < minUnnormExp && this.warn("encodeFloat::float underflow");
i = bias + 1 - (exp = minExp - 1);
}
if (intPart || status !== 0) {
this.warn(intPart ? "encodeFloat::float overflow" : "encodeFloat::" + status);
exp = maxExp + 1;
i = bias + 2;
if (status == -Infinity) {
signal = 1;
} else if (isNaN(status)) {
bin[i] = 1;
}
}
for (n = Math.abs(exp + bias), j = exponentBits + 1, result = ""; --j; result = (n % 2) + result, n = n >>= 1);
for (n = 0, j = 0, i = (result = (signal ? "1" : "0") + result + bin.slice(i, i + precisionBits).join("")).length, r = []; i; j = (j + 1) % 8) {
n += (1 << j) * result.charAt(--i);
if (j == 7) {
r[r.length] = String.fromCharCode(n);
n = 0;
}
}
r[r.length] = n
? String.fromCharCode(n)
: "";
return (this.bigEndian ? r.reverse() : r).join("");
};
BinaryParser.encodeInt = function encodeInt (data, bits, signed, forceBigEndian) {
var max = maxBits[bits];
if (data >= max || data < -(max / 2)) {
this.warn("encodeInt::overflow");
data = 0;
}
if (data < 0) {
data += max;
}
for (var r = []; data; r[r.length] = String.fromCharCode(data % 256), data = Math.floor(data / 256));
for (bits = -(-bits >> 3) - r.length; bits--; r[r.length] = "\0");
return ((this.bigEndian || forceBigEndian) ? r.reverse() : r).join("");
};
BinaryParser.toSmall = function( data ){ return this.decodeInt( data, 8, true ); };
BinaryParser.fromSmall = function( data ){ return this.encodeInt( data, 8, true ); };
BinaryParser.toByte = function( data ){ return this.decodeInt( data, 8, false ); };
BinaryParser.fromByte = function( data ){ return this.encodeInt( data, 8, false ); };
BinaryParser.toShort = function( data ){ return this.decodeInt( data, 16, true ); };
BinaryParser.fromShort = function( data ){ return this.encodeInt( data, 16, true ); };
BinaryParser.toWord = function( data ){ return this.decodeInt( data, 16, false ); };
BinaryParser.fromWord = function( data ){ return this.encodeInt( data, 16, false ); };
BinaryParser.toInt = function( data ){ return this.decodeInt( data, 32, true ); };
BinaryParser.fromInt = function( data ){ return this.encodeInt( data, 32, true ); };
BinaryParser.toLong = function( data ){ return this.decodeInt( data, 64, true ); };
BinaryParser.fromLong = function( data ){ return this.encodeInt( data, 64, true ); };
BinaryParser.toDWord = function( data ){ return this.decodeInt( data, 32, false ); };
BinaryParser.fromDWord = function( data ){ return this.encodeInt( data, 32, false ); };
BinaryParser.toQWord = function( data ){ return this.decodeInt( data, 64, true ); };
BinaryParser.fromQWord = function( data ){ return this.encodeInt( data, 64, true ); };
BinaryParser.toFloat = function( data ){ return this.decodeFloat( data, 23, 8 ); };
BinaryParser.fromFloat = function( data ){ return this.encodeFloat( data, 23, 8 ); };
BinaryParser.toDouble = function( data ){ return this.decodeFloat( data, 52, 11 ); };
BinaryParser.fromDouble = function( data ){ return this.encodeFloat( data, 52, 11 ); };
// Factor out the encode so it can be shared by add_header and push_int32
BinaryParser.encode_int32 = function encode_int32 (number, asArray) {
var a, b, c, d, unsigned;
unsigned = (number < 0) ? (number + 0x100000000) : number;
a = Math.floor(unsigned / 0xffffff);
unsigned &= 0xffffff;
b = Math.floor(unsigned / 0xffff);
unsigned &= 0xffff;
c = Math.floor(unsigned / 0xff);
unsigned &= 0xff;
d = Math.floor(unsigned);
return asArray ? [chr(a), chr(b), chr(c), chr(d)] : chr(a) + chr(b) + chr(c) + chr(d);
};
BinaryParser.encode_int64 = function encode_int64 (number) {
var a, b, c, d, e, f, g, h, unsigned;
unsigned = (number < 0) ? (number + 0x10000000000000000) : number;
a = Math.floor(unsigned / 0xffffffffffffff);
unsigned &= 0xffffffffffffff;
b = Math.floor(unsigned / 0xffffffffffff);
unsigned &= 0xffffffffffff;
c = Math.floor(unsigned / 0xffffffffff);
unsigned &= 0xffffffffff;
d = Math.floor(unsigned / 0xffffffff);
unsigned &= 0xffffffff;
e = Math.floor(unsigned / 0xffffff);
unsigned &= 0xffffff;
f = Math.floor(unsigned / 0xffff);
unsigned &= 0xffff;
g = Math.floor(unsigned / 0xff);
unsigned &= 0xff;
h = Math.floor(unsigned);
return chr(a) + chr(b) + chr(c) + chr(d) + chr(e) + chr(f) + chr(g) + chr(h);
};
/**
* UTF8 methods
*/
// Take a raw binary string and return a utf8 string
BinaryParser.decode_utf8 = function decode_utf8 (binaryStr) {
var len = binaryStr.length
, decoded = ''
, i = 0
, c = 0
, c1 = 0
, c2 = 0
, c3;
while (i < len) {
c = binaryStr.charCodeAt(i);
if (c < 128) {
decoded += String.fromCharCode(c);
i++;
} else if ((c > 191) && (c < 224)) {
c2 = binaryStr.charCodeAt(i+1);
decoded += String.fromCharCode(((c & 31) << 6) | (c2 & 63));
i += 2;
} else {
c2 = binaryStr.charCodeAt(i+1);
c3 = binaryStr.charCodeAt(i+2);
decoded += String.fromCharCode(((c & 15) << 12) | ((c2 & 63) << 6) | (c3 & 63));
i += 3;
}
}
return decoded;
};
// Encode a cstring
BinaryParser.encode_cstring = function encode_cstring (s) {
return unescape(encodeURIComponent(s)) + BinaryParser.fromByte(0);
};
// Take a utf8 string and return a binary string
BinaryParser.encode_utf8 = function encode_utf8 (s) {
var a = ""
, c;
for (var n = 0, len = s.length; n < len; n++) {
c = s.charCodeAt(n);
if (c < 128) {
a += String.fromCharCode(c);
} else if ((c > 127) && (c < 2048)) {
a += String.fromCharCode((c>>6) | 192) ;
a += String.fromCharCode((c&63) | 128);
} else {
a += String.fromCharCode((c>>12) | 224);
a += String.fromCharCode(((c>>6) & 63) | 128);
a += String.fromCharCode((c&63) | 128);
}
}
return a;
};
BinaryParser.hprint = function hprint (s) {
var number;
for (var i = 0, len = s.length; i < len; i++) {
if (s.charCodeAt(i) < 32) {
number = s.charCodeAt(i) <= 15
? "0" + s.charCodeAt(i).toString(16)
: s.charCodeAt(i).toString(16);
process.stdout.write(number + " ")
} else {
number = s.charCodeAt(i) <= 15
? "0" + s.charCodeAt(i).toString(16)
: s.charCodeAt(i).toString(16);
process.stdout.write(number + " ")
}
}
process.stdout.write("\n\n");
};
BinaryParser.ilprint = function hprint (s) {
var number;
for (var i = 0, len = s.length; i < len; i++) {
if (s.charCodeAt(i) < 32) {
number = s.charCodeAt(i) <= 15
? "0" + s.charCodeAt(i).toString(10)
: s.charCodeAt(i).toString(10);
require('util').debug(number+' : ');
} else {
number = s.charCodeAt(i) <= 15
? "0" + s.charCodeAt(i).toString(10)
: s.charCodeAt(i).toString(10);
require('util').debug(number+' : '+ s.charAt(i));
}
}
};
BinaryParser.hlprint = function hprint (s) {
var number;
for (var i = 0, len = s.length; i < len; i++) {
if (s.charCodeAt(i) < 32) {
number = s.charCodeAt(i) <= 15
? "0" + s.charCodeAt(i).toString(16)
: s.charCodeAt(i).toString(16);
require('util').debug(number+' : ');
} else {
number = s.charCodeAt(i) <= 15
? "0" + s.charCodeAt(i).toString(16)
: s.charCodeAt(i).toString(16);
require('util').debug(number+' : '+ s.charAt(i));
}
}
};
/**
* BinaryParser buffer constructor.
*/
function BinaryParserBuffer (bigEndian, buffer) {
this.bigEndian = bigEndian || 0;
this.buffer = [];
this.setBuffer(buffer);
};
BinaryParserBuffer.prototype.setBuffer = function setBuffer (data) {
var l, i, b;
if (data) {
i = l = data.length;
b = this.buffer = new Array(l);
for (; i; b[l - i] = data.charCodeAt(--i));
this.bigEndian && b.reverse();
}
};
BinaryParserBuffer.prototype.hasNeededBits = function hasNeededBits (neededBits) {
return this.buffer.length >= -(-neededBits >> 3);
};
BinaryParserBuffer.prototype.checkBuffer = function checkBuffer (neededBits) {
if (!this.hasNeededBits(neededBits)) {
throw new Error("checkBuffer::missing bytes");
}
};
BinaryParserBuffer.prototype.readBits = function readBits (start, length) {
//shl fix: Henri Torgemane ~1996 (compressed by Jonas Raoni)
function shl (a, b) {
for (; b--; a = ((a %= 0x7fffffff + 1) & 0x40000000) == 0x40000000 ? a * 2 : (a - 0x40000000) * 2 + 0x7fffffff + 1);
return a;
}
if (start < 0 || length <= 0) {
return 0;
}
this.checkBuffer(start + length);
var offsetLeft
, offsetRight = start % 8
, curByte = this.buffer.length - ( start >> 3 ) - 1
, lastByte = this.buffer.length + ( -( start + length ) >> 3 )
, diff = curByte - lastByte
, sum = ((this.buffer[ curByte ] >> offsetRight) & ((1 << (diff ? 8 - offsetRight : length)) - 1)) + (diff && (offsetLeft = (start + length) % 8) ? (this.buffer[lastByte++] & ((1 << offsetLeft) - 1)) << (diff-- << 3) - offsetRight : 0);
for(; diff; sum += shl(this.buffer[lastByte++], (diff-- << 3) - offsetRight));
return sum;
};
/**
* Expose.
*/
BinaryParser.Buffer = BinaryParserBuffer;
exports.BinaryParser = BinaryParser;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,776 @@
var writeIEEE754 = require('./float_parser').writeIEEE754
, Long = require('./long').Long
, Double = require('./double').Double
, Timestamp = require('./timestamp').Timestamp
, ObjectID = require('./objectid').ObjectID
, Symbol = require('./symbol').Symbol
, Code = require('./code').Code
, MinKey = require('./min_key').MinKey
, MaxKey = require('./max_key').MaxKey
, DBRef = require('./db_ref').DBRef
, Binary = require('./binary').Binary
, BinaryParser = require('./binary_parser').BinaryParser;
// Max Document Buffer size
var buffer = new Buffer(1024 * 1024 * 16);
var checkKey = function checkKey (key, dollarsAndDotsOk) {
if (!key.length) return;
// Check if we have a legal key for the object
if (!!~key.indexOf("\x00")) {
// The BSON spec doesn't allow keys with null bytes because keys are
// null-terminated.
throw Error("key " + key + " must not contain null bytes");
}
if (!dollarsAndDotsOk) {
if('$' == key[0]) {
throw Error("key " + key + " must not start with '$'");
} else if (!!~key.indexOf('.')) {
throw Error("key " + key + " must not contain '.'");
}
}
};
var serializeString = function(key, value, index) {
// Encode String type
buffer[index++] = BSON.BSON_DATA_STRING;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes + 1;
buffer[index - 1] = 0;
// Calculate size
var size = Buffer.byteLength(value) + 1;
// Write the size of the string to buffer
buffer[index + 3] = (size >> 24) & 0xff;
buffer[index + 2] = (size >> 16) & 0xff;
buffer[index + 1] = (size >> 8) & 0xff;
buffer[index] = size & 0xff;
// Ajust the index
index = index + 4;
// Write the string
buffer.write(value, index, 'utf8');
// Update index
index = index + size - 1;
// Write zero
buffer[index++] = 0;
return index;
}
var serializeNumber = function(key, value, index) {
// We have an integer value
if(Math.floor(value) === value && value >= BSON.JS_INT_MIN && value <= BSON.JS_INT_MAX) {
// If the value fits in 32 bits encode as int, if it fits in a double
// encode it as a double, otherwise long
if(value >= BSON.BSON_INT32_MIN && value <= BSON.BSON_INT32_MAX) {
// Set int type 32 bits or less
buffer[index++] = BSON.BSON_DATA_INT;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Write the int value
buffer[index++] = value & 0xff;
buffer[index++] = (value >> 8) & 0xff;
buffer[index++] = (value >> 16) & 0xff;
buffer[index++] = (value >> 24) & 0xff;
} else if(value >= BSON.JS_INT_MIN && value <= BSON.JS_INT_MAX) {
// Encode as double
buffer[index++] = BSON.BSON_DATA_NUMBER;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Write float
writeIEEE754(buffer, value, index, 'little', 52, 8);
// Ajust index
index = index + 8;
} else {
// Set long type
buffer[index++] = BSON.BSON_DATA_LONG;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
var longVal = Long.fromNumber(value);
var lowBits = longVal.getLowBits();
var highBits = longVal.getHighBits();
// Encode low bits
buffer[index++] = lowBits & 0xff;
buffer[index++] = (lowBits >> 8) & 0xff;
buffer[index++] = (lowBits >> 16) & 0xff;
buffer[index++] = (lowBits >> 24) & 0xff;
// Encode high bits
buffer[index++] = highBits & 0xff;
buffer[index++] = (highBits >> 8) & 0xff;
buffer[index++] = (highBits >> 16) & 0xff;
buffer[index++] = (highBits >> 24) & 0xff;
}
} else {
// Encode as double
buffer[index++] = BSON.BSON_DATA_NUMBER;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Write float
writeIEEE754(buffer, value, index, 'little', 52, 8);
// Ajust index
index = index + 8;
}
return index;
}
var serializeUndefined = function(key, value, index) {
// Set long type
buffer[index++] = BSON.BSON_DATA_NULL;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
return index;
}
var serializeBoolean = function(key, value, index) {
// Write the type
buffer[index++] = BSON.BSON_DATA_BOOLEAN;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Encode the boolean value
buffer[index++] = value ? 1 : 0;
return index;
}
var serializeDate = function(key, value, index) {
// Write the type
buffer[index++] = BSON.BSON_DATA_DATE;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Write the date
var dateInMilis = Long.fromNumber(value.getTime());
var lowBits = dateInMilis.getLowBits();
var highBits = dateInMilis.getHighBits();
// Encode low bits
buffer[index++] = lowBits & 0xff;
buffer[index++] = (lowBits >> 8) & 0xff;
buffer[index++] = (lowBits >> 16) & 0xff;
buffer[index++] = (lowBits >> 24) & 0xff;
// Encode high bits
buffer[index++] = highBits & 0xff;
buffer[index++] = (highBits >> 8) & 0xff;
buffer[index++] = (highBits >> 16) & 0xff;
buffer[index++] = (highBits >> 24) & 0xff;
return index;
}
var serializeRegExp = function(key, value, index) {
// Write the type
buffer[index++] = BSON.BSON_DATA_REGEXP;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Write the regular expression string
buffer.write(value.source, index, 'utf8');
// Adjust the index
index = index + Buffer.byteLength(value.source);
// Write zero
buffer[index++] = 0x00;
// Write the parameters
if(value.global) buffer[index++] = 0x73; // s
if(value.ignoreCase) buffer[index++] = 0x69; // i
if(value.multiline) buffer[index++] = 0x6d; // m
// Add ending zero
buffer[index++] = 0x00;
return index;
}
var serializeMinMax = function(key, value, index) {
// Write the type of either min or max key
if(value === null) {
buffer[index++] = BSON.BSON_DATA_NULL;
} else if(value instanceof MinKey) {
buffer[index++] = BSON.BSON_DATA_MIN_KEY;
} else {
buffer[index++] = BSON.BSON_DATA_MAX_KEY;
}
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
return index;
}
var serializeObjectId = function(key, value, index) {
// Write the type
buffer[index++] = BSON.BSON_DATA_OID;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
for(var j = 0; j < 12; j++) {
buffer[index + j] = value.binId[j];
}
// Ajust index
index = index + 12;
return index;
}
var serializeBuffer = function(key, value, index) {
// Write the type
buffer[index++] = BSON.BSON_DATA_BINARY;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Get size of the buffer (current write point)
var size = value.length;
// Write the size of the string to buffer
buffer[index++] = size & 0xff;
buffer[index++] = (size >> 8) & 0xff;
buffer[index++] = (size >> 16) & 0xff;
buffer[index++] = (size >> 24) & 0xff;
// Write the default subtype
buffer[index++] = BSON.BSON_BINARY_SUBTYPE_DEFAULT;
// Copy the content form the binary field to the buffer
value.copy(buffer, index, 0, size);
// Adjust the index
index = index + size;
return index;
}
var serializeObject = function(key, value, index, checkKeys, depth) {
// Write the type
buffer[index++] = Array.isArray(value) ? BSON.BSON_DATA_ARRAY : BSON.BSON_DATA_OBJECT;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
var endIndex = serializeInto(value, checkKeys, index, depth + 1);
// Write size
var size = endIndex - index;
return endIndex;
}
var serializeLong = function(key, value, index) {
// Write the type
buffer[index++] = value instanceof Long ? BSON.BSON_DATA_LONG : BSON.BSON_DATA_TIMESTAMP;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Write the date
var lowBits = value.getLowBits();
var highBits = value.getHighBits();
// Encode low bits
buffer[index++] = lowBits & 0xff;
buffer[index++] = (lowBits >> 8) & 0xff;
buffer[index++] = (lowBits >> 16) & 0xff;
buffer[index++] = (lowBits >> 24) & 0xff;
// Encode high bits
buffer[index++] = highBits & 0xff;
buffer[index++] = (highBits >> 8) & 0xff;
buffer[index++] = (highBits >> 16) & 0xff;
buffer[index++] = (highBits >> 24) & 0xff;
return index;
}
var serializeDouble = function(key, value, index) {
// Encode as double
buffer[index++] = BSON.BSON_DATA_NUMBER;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Write float
writeIEEE754(buffer, value, index, 'little', 52, 8);
// Ajust index
index = index + 8;
return index;
}
var serializeCode = function(key, value, index, checkKeys, depth) {
if(value.scope != null && Object.keys(value.scope).length > 0) {
// Write the type
buffer[index++] = BSON.BSON_DATA_CODE_W_SCOPE;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Starting index
var startIndex = index;
// Serialize the function
// Get the function string
var functionString = typeof value.code == 'string' ? value.code : value.code.toString();
var codeSize = Buffer.byteLength(functionString) + 1;
// Index adjustment
index = index + 4;
// Write the size of the string to buffer
buffer[index] = codeSize & 0xff;
buffer[index + 1] = (codeSize >> 8) & 0xff;
buffer[index + 2] = (codeSize >> 16) & 0xff;
buffer[index + 3] = (codeSize >> 24) & 0xff;
// Write string into buffer
buffer.write(functionString, index + 4, 'utf8');
// Write end 0
buffer[index + 4 + codeSize - 1] = 0;
// Write the
index = index + codeSize + 4;
//
// Serialize the scope value
var endIndex = serializeInto(value.scope, checkKeys, index, depth + 1)
index = endIndex - 1;
// Writ the total
var totalSize = endIndex - startIndex;
// Write the total size of the object
buffer[startIndex++] = totalSize & 0xff;
buffer[startIndex++] = (totalSize >> 8) & 0xff;
buffer[startIndex++] = (totalSize >> 16) & 0xff;
buffer[startIndex++] = (totalSize >> 24) & 0xff;
// Write trailing zero
buffer[index++] = 0;
} else {
buffer[index++] = BSON.BSON_DATA_CODE;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Function string
var functionString = value.code.toString();
// Function Size
var size = Buffer.byteLength(functionString) + 1;
// Write the size of the string to buffer
buffer[index++] = size & 0xff;
buffer[index++] = (size >> 8) & 0xff;
buffer[index++] = (size >> 16) & 0xff;
buffer[index++] = (size >> 24) & 0xff;
// Write the string
buffer.write(functionString, index, 'utf8');
// Update index
index = index + size - 1;
// Write zero
buffer[index++] = 0;
}
return index;
}
var serializeBinary = function(key, value, index) {
// Write the type
buffer[index++] = BSON.BSON_DATA_BINARY;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Extract the buffer
var data = value.value(true);
// Calculate size
var size = value.position;
// Write the size of the string to buffer
buffer[index++] = size & 0xff;
buffer[index++] = (size >> 8) & 0xff;
buffer[index++] = (size >> 16) & 0xff;
buffer[index++] = (size >> 24) & 0xff;
// Write the subtype to the buffer
buffer[index++] = value.sub_type;
// If we have binary type 2 the 4 first bytes are the size
if(value.sub_type == Binary.SUBTYPE_BYTE_ARRAY) {
buffer[index++] = size & 0xff;
buffer[index++] = (size >> 8) & 0xff;
buffer[index++] = (size >> 16) & 0xff;
buffer[index++] = (size >> 24) & 0xff;
}
// Write the data to the object
data.copy(buffer, index, 0, value.position);
// Adjust the index
index = index + value.position;
return index;
}
var serializeSymbol = function(key, value, index) {
// Write the type
buffer[index++] = BSON.BSON_DATA_SYMBOL;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
// Calculate size
var size = Buffer.byteLength(value.value) + 1;
// Write the size of the string to buffer
buffer[index++] = size & 0xff;
buffer[index++] = (size >> 8) & 0xff;
buffer[index++] = (size >> 16) & 0xff;
buffer[index++] = (size >> 24) & 0xff;
// Write the string
buffer.write(value.value, index, 'utf8');
// Update index
index = index + size - 1;
// Write zero
buffer[index++] = 0x00;
return index;
}
var serializeDBRef = function(key, value, index, depth) {
// Write the type
buffer[index++] = BSON.BSON_DATA_OBJECT;
// Number of written bytes
var numberOfWrittenBytes = buffer.write(key, index, 'utf8');
// Encode the name
index = index + numberOfWrittenBytes;
buffer[index++] = 0;
var startIndex = index;
var endIndex;
// Serialize object
if(null != value.db) {
endIndex = serializeInto({
'$ref': value.namespace
, '$id' : value.oid
, '$db' : value.db
}, false, index, depth + 1);
} else {
endIndex = serializeInto({
'$ref': value.namespace
, '$id' : value.oid
}, false, index, depth + 1);
}
// Calculate object size
var size = endIndex - startIndex;
// Write the size
buffer[startIndex++] = size & 0xff;
buffer[startIndex++] = (size >> 8) & 0xff;
buffer[startIndex++] = (size >> 16) & 0xff;
buffer[startIndex++] = (size >> 24) & 0xff;
// Set index
return endIndex;
}
var BSON = function() {
this.buffer = buffer;
}
BSON.prototype.serialize = function serialize(object, checkKeys, index) {
var finishedBuffer = new Buffer(serializeInto(object, checkKeys, index || 0, 0));
this.buffer.copy(finishedBuffer, 0, 0, finishedBuffer.length);
return finishedBuffer;
}
var serializeInto = function serializeInto(object, checkKeys, startingIndex, depth) {
startingIndex = startingIndex || 0;
// Start place to serialize into
var index = startingIndex + 4;
var self = this;
// Special case isArray
if(Array.isArray(object)) {
// Get object keys
for(var i = 0; i < object.length; i++) {
var key = "" + i;
var type = typeof object[i];
// Check the key and throw error if it's illegal
if(key != '$db' && key != '$ref' && key != '$id') {
checkKey(key, !checkKeys);
}
if(type == 'string') {
index = serializeString(key, object[i], index);
} else if(type == 'number') {
index = serializeNumber(key, object[i], index);
} else if(type == 'undefined') {
index = serializeUndefined(key, object[i], index);
} else if(type == 'boolean') {
index = serializeBoolean(key, object[i], index);
} else if(object[i] instanceof Date) {
index = serializeDate(key, object[i], index);
} else if(object[i] instanceof RegExp || Object.prototype.toString.call(object[i]) === '[object RegExp]') {
index = serializeRegExp(key, object[i], index);
} else if(object[i]['_bsontype'] == 'MinKey' || object[i]['_bsontype'] == 'MaxKey') {
index = serializeMinMax(key, object[i], index);
} else if(object[i]['_bsontype'] == 'ObjectID') {
index = serializeObjectId(key, object[i], index);
} else if(Buffer.isBuffer(object[i])) {
index = serializeBuffer(key, object[i], index);
} else if(type == 'object' && object[i]['_bsontype'] == null) {
index = serializeObject(key, object[i], index, checkKeys, depth);
} else if(object[i]['_bsontype'] == 'Long' || object[i]['_bsontype'] == 'Timestamp') {
index = serializeLong(key, object[i], index);
} else if(object[i]['_bsontype'] == 'Double') {
index = serializeDouble(key, object[i], index);
} else if(object[i]['_bsontype'] == 'Code') {
index = serializeCode(key, object[i], index, checkKeys, depth);
} else if(object[i]['_bsontype'] == 'Binary') {
index = serializeBinary(key, object[i], index);
} else if(object[i]['_bsontype'] == 'Symbol') {
index = serializeSymbol(key, object[i], index);
} else if(object[i]['_bsontype'] == 'DBRef') {
index = serializeDBRef(key, object[i], index, depth);
}
}
} else {
var keys = Object.keys(object);
for(var i = 0; i < keys.length; i++) {
var key = keys[i];
var type = typeof object[key];
// Check the key and throw error if it's illegal
if(key != '$db' && key != '$ref' && key != '$id') {
checkKey(key, !checkKeys);
}
if(type == 'string') {
index = serializeString(key, object[key], index);
} else if(type == 'number') {
index = serializeNumber(key, object[key], index);
} else if(type == 'undefined') {
index = serializeUndefined(key, object[key], index);
} else if(type == 'boolean') {
index = serializeBoolean(key, object[key], index);
} else if(object[key] instanceof Date) {
index = serializeDate(key, object[key], index);
} else if(object[key] instanceof RegExp || Object.prototype.toString.call(object[key]) === '[object RegExp]') {
index = serializeRegExp(key, object[key], index);
} else if(object[key]['_bsontype'] == 'MinKey' || object[key]['_bsontype'] == 'MaxKey') {
index = serializeMinMax(key, object[key], index);
} else if(object[key]['_bsontype'] == 'ObjectID') {
index = serializeObjectId(key, object[key], index);
} else if(Buffer.isBuffer(object[key])) {
index = serializeBuffer(key, object[key], index);
} else if(type == 'object' && object[key]['_bsontype'] == null) {
index = serializeObject(key, object[key], index, checkKeys, depth);
} else if(object[key]['_bsontype'] == 'Long' || object[key]['_bsontype'] == 'Timestamp') {
index = serializeLong(key, object[key], index);
} else if(object[key]['_bsontype'] == 'Double') {
index = serializeDouble(key, object[key], index);
} else if(object[key]['_bsontype'] == 'Code') {
index = serializeCode(key, object[key], index, checkKeys, depth);
} else if(object[key]['_bsontype'] == 'Binary') {
index = serializeBinary(key, object[key], index);
} else if(object[key]['_bsontype'] == 'Symbol') {
index = serializeSymbol(key, object[key], index);
} else if(object[key]['_bsontype'] == 'DBRef') {
index = serializeDBRef(key, object[key], index, depth);
}
}
}
// Final padding byte for object
buffer[index++] = 0x00;
// Final size
var size = index - startingIndex;
// Write the size of the object
buffer[startingIndex++] = size & 0xff;
buffer[startingIndex++] = (size >> 8) & 0xff;
buffer[startingIndex++] = (size >> 16) & 0xff;
buffer[startingIndex++] = (size >> 24) & 0xff;
return index;
}
/**
* @ignore
* @api private
*/
// BSON MAX VALUES
BSON.BSON_INT32_MAX = 0x7FFFFFFF;
BSON.BSON_INT32_MIN = -0x80000000;
BSON.BSON_INT64_MAX = Math.pow(2, 63) - 1;
BSON.BSON_INT64_MIN = -Math.pow(2, 63);
// JS MAX PRECISE VALUES
BSON.JS_INT_MAX = 0x20000000000000; // Any integer up to 2^53 can be precisely represented by a double.
BSON.JS_INT_MIN = -0x20000000000000; // Any integer down to -2^53 can be precisely represented by a double.
// Internal long versions
var JS_INT_MAX_LONG = Long.fromNumber(0x20000000000000); // Any integer up to 2^53 can be precisely represented by a double.
var JS_INT_MIN_LONG = Long.fromNumber(-0x20000000000000); // Any integer down to -2^53 can be precisely represented by a double.
/**
* Number BSON Type
*
* @classconstant BSON_DATA_NUMBER
**/
BSON.BSON_DATA_NUMBER = 1;
/**
* String BSON Type
*
* @classconstant BSON_DATA_STRING
**/
BSON.BSON_DATA_STRING = 2;
/**
* Object BSON Type
*
* @classconstant BSON_DATA_OBJECT
**/
BSON.BSON_DATA_OBJECT = 3;
/**
* Array BSON Type
*
* @classconstant BSON_DATA_ARRAY
**/
BSON.BSON_DATA_ARRAY = 4;
/**
* Binary BSON Type
*
* @classconstant BSON_DATA_BINARY
**/
BSON.BSON_DATA_BINARY = 5;
/**
* ObjectID BSON Type
*
* @classconstant BSON_DATA_OID
**/
BSON.BSON_DATA_OID = 7;
/**
* Boolean BSON Type
*
* @classconstant BSON_DATA_BOOLEAN
**/
BSON.BSON_DATA_BOOLEAN = 8;
/**
* Date BSON Type
*
* @classconstant BSON_DATA_DATE
**/
BSON.BSON_DATA_DATE = 9;
/**
* null BSON Type
*
* @classconstant BSON_DATA_NULL
**/
BSON.BSON_DATA_NULL = 10;
/**
* RegExp BSON Type
*
* @classconstant BSON_DATA_REGEXP
**/
BSON.BSON_DATA_REGEXP = 11;
/**
* Code BSON Type
*
* @classconstant BSON_DATA_CODE
**/
BSON.BSON_DATA_CODE = 13;
/**
* Symbol BSON Type
*
* @classconstant BSON_DATA_SYMBOL
**/
BSON.BSON_DATA_SYMBOL = 14;
/**
* Code with Scope BSON Type
*
* @classconstant BSON_DATA_CODE_W_SCOPE
**/
BSON.BSON_DATA_CODE_W_SCOPE = 15;
/**
* 32 bit Integer BSON Type
*
* @classconstant BSON_DATA_INT
**/
BSON.BSON_DATA_INT = 16;
/**
* Timestamp BSON Type
*
* @classconstant BSON_DATA_TIMESTAMP
**/
BSON.BSON_DATA_TIMESTAMP = 17;
/**
* Long BSON Type
*
* @classconstant BSON_DATA_LONG
**/
BSON.BSON_DATA_LONG = 18;
/**
* MinKey BSON Type
*
* @classconstant BSON_DATA_MIN_KEY
**/
BSON.BSON_DATA_MIN_KEY = 0xff;
/**
* MaxKey BSON Type
*
* @classconstant BSON_DATA_MAX_KEY
**/
BSON.BSON_DATA_MAX_KEY = 0x7f;
/**
* Binary Default Type
*
* @classconstant BSON_BINARY_SUBTYPE_DEFAULT
**/
BSON.BSON_BINARY_SUBTYPE_DEFAULT = 0;
/**
* Binary Function Type
*
* @classconstant BSON_BINARY_SUBTYPE_FUNCTION
**/
BSON.BSON_BINARY_SUBTYPE_FUNCTION = 1;
/**
* Binary Byte Array Type
*
* @classconstant BSON_BINARY_SUBTYPE_BYTE_ARRAY
**/
BSON.BSON_BINARY_SUBTYPE_BYTE_ARRAY = 2;
/**
* Binary UUID Type
*
* @classconstant BSON_BINARY_SUBTYPE_UUID
**/
BSON.BSON_BINARY_SUBTYPE_UUID = 3;
/**
* Binary MD5 Type
*
* @classconstant BSON_BINARY_SUBTYPE_MD5
**/
BSON.BSON_BINARY_SUBTYPE_MD5 = 4;
/**
* Binary User Defined Type
*
* @classconstant BSON_BINARY_SUBTYPE_USER_DEFINED
**/
BSON.BSON_BINARY_SUBTYPE_USER_DEFINED = 128;
// Return BSON
exports.BSON = BSON;

View File

@@ -0,0 +1,24 @@
/**
* A class representation of the BSON Code type.
*
* @class
* @param {(string|function)} code a string or function.
* @param {Object} [scope] an optional scope for the function.
* @return {Code}
*/
var Code = function Code(code, scope) {
if(!(this instanceof Code)) return new Code(code, scope);
this._bsontype = 'Code';
this.code = code;
this.scope = scope == null ? {} : scope;
};
/**
* @ignore
*/
Code.prototype.toJSON = function() {
return {scope:this.scope, code:this.code};
}
module.exports = Code;
module.exports.Code = Code;

View File

@@ -0,0 +1,32 @@
/**
* A class representation of the BSON DBRef type.
*
* @class
* @param {string} namespace the collection name.
* @param {ObjectID} oid the reference ObjectID.
* @param {string} [db] optional db name, if omitted the reference is local to the current db.
* @return {DBRef}
*/
function DBRef(namespace, oid, db) {
if(!(this instanceof DBRef)) return new DBRef(namespace, oid, db);
this._bsontype = 'DBRef';
this.namespace = namespace;
this.oid = oid;
this.db = db;
};
/**
* @ignore
* @api private
*/
DBRef.prototype.toJSON = function() {
return {
'$ref':this.namespace,
'$id':this.oid,
'$db':this.db == null ? '' : this.db
};
}
module.exports = DBRef;
module.exports.DBRef = DBRef;

View File

@@ -0,0 +1,33 @@
/**
* A class representation of the BSON Double type.
*
* @class
* @param {number} value the number we want to represent as a double.
* @return {Double}
*/
function Double(value) {
if(!(this instanceof Double)) return new Double(value);
this._bsontype = 'Double';
this.value = value;
}
/**
* Access the number value.
*
* @method
* @return {number} returns the wrapped double number.
*/
Double.prototype.valueOf = function() {
return this.value;
};
/**
* @ignore
*/
Double.prototype.toJSON = function() {
return this.value;
}
module.exports = Double;
module.exports.Double = Double;

View File

@@ -0,0 +1,121 @@
// Copyright (c) 2008, Fair Oaks Labs, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Fair Oaks Labs, Inc. nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// Modifications to writeIEEE754 to support negative zeroes made by Brian White
var readIEEE754 = function(buffer, offset, endian, mLen, nBytes) {
var e, m,
bBE = (endian === 'big'),
eLen = nBytes * 8 - mLen - 1,
eMax = (1 << eLen) - 1,
eBias = eMax >> 1,
nBits = -7,
i = bBE ? 0 : (nBytes - 1),
d = bBE ? 1 : -1,
s = buffer[offset + i];
i += d;
e = s & ((1 << (-nBits)) - 1);
s >>= (-nBits);
nBits += eLen;
for (; nBits > 0; e = e * 256 + buffer[offset + i], i += d, nBits -= 8);
m = e & ((1 << (-nBits)) - 1);
e >>= (-nBits);
nBits += mLen;
for (; nBits > 0; m = m * 256 + buffer[offset + i], i += d, nBits -= 8);
if (e === 0) {
e = 1 - eBias;
} else if (e === eMax) {
return m ? NaN : ((s ? -1 : 1) * Infinity);
} else {
m = m + Math.pow(2, mLen);
e = e - eBias;
}
return (s ? -1 : 1) * m * Math.pow(2, e - mLen);
};
var writeIEEE754 = function(buffer, value, offset, endian, mLen, nBytes) {
var e, m, c,
bBE = (endian === 'big'),
eLen = nBytes * 8 - mLen - 1,
eMax = (1 << eLen) - 1,
eBias = eMax >> 1,
rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0),
i = bBE ? (nBytes-1) : 0,
d = bBE ? -1 : 1,
s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0;
value = Math.abs(value);
if (isNaN(value) || value === Infinity) {
m = isNaN(value) ? 1 : 0;
e = eMax;
} else {
e = Math.floor(Math.log(value) / Math.LN2);
if (value * (c = Math.pow(2, -e)) < 1) {
e--;
c *= 2;
}
if (e+eBias >= 1) {
value += rt / c;
} else {
value += rt * Math.pow(2, 1 - eBias);
}
if (value * c >= 2) {
e++;
c /= 2;
}
if (e + eBias >= eMax) {
m = 0;
e = eMax;
} else if (e + eBias >= 1) {
m = (value * c - 1) * Math.pow(2, mLen);
e = e + eBias;
} else {
m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen);
e = 0;
}
}
for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8);
e = (e << mLen) | m;
eLen += mLen;
for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8);
buffer[offset + i - d] |= s * 128;
};
exports.readIEEE754 = readIEEE754;
exports.writeIEEE754 = writeIEEE754;

View File

@@ -0,0 +1,81 @@
try {
exports.BSONPure = require('./bson');
exports.BSONNative = require('bson-ext');
} catch(err) {
// do nothing
}
[ './binary_parser'
, './binary'
, './code'
, './db_ref'
, './double'
, './max_key'
, './min_key'
, './objectid'
, './symbol'
, './timestamp'
, './long'].forEach(function (path) {
var module = require('./' + path);
for (var i in module) {
exports[i] = module[i];
}
});
// Exports all the classes for the PURE JS BSON Parser
exports.pure = function() {
var classes = {};
// Map all the classes
[ './binary_parser'
, './binary'
, './code'
, './db_ref'
, './double'
, './max_key'
, './min_key'
, './objectid'
, './symbol'
, './timestamp'
, './long'
, '././bson'].forEach(function (path) {
var module = require('./' + path);
for (var i in module) {
classes[i] = module[i];
}
});
// Return classes list
return classes;
}
// Exports all the classes for the NATIVE JS BSON Parser
exports.native = function() {
var classes = {};
// Map all the classes
[ './binary_parser'
, './binary'
, './code'
, './db_ref'
, './double'
, './max_key'
, './min_key'
, './objectid'
, './symbol'
, './timestamp'
, './long'
].forEach(function (path) {
var module = require('./' + path);
for (var i in module) {
classes[i] = module[i];
}
});
// Catch error and return no classes found
try {
classes['BSON'] = require('bson-ext')
} catch(err) {
return exports.pure();
}
// Return classes list
return classes;
}

View File

@@ -0,0 +1,856 @@
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright 2009 Google Inc. All Rights Reserved
/**
* Defines a Long class for representing a 64-bit two's-complement
* integer value, which faithfully simulates the behavior of a Java "Long". This
* implementation is derived from LongLib in GWT.
*
* Constructs a 64-bit two's-complement integer, given its low and high 32-bit
* values as *signed* integers. See the from* functions below for more
* convenient ways of constructing Longs.
*
* The internal representation of a Long is the two given signed, 32-bit values.
* We use 32-bit pieces because these are the size of integers on which
* Javascript performs bit-operations. For operations like addition and
* multiplication, we split each number into 16-bit pieces, which can easily be
* multiplied within Javascript's floating-point representation without overflow
* or change in sign.
*
* In the algorithms below, we frequently reduce the negative case to the
* positive case by negating the input(s) and then post-processing the result.
* Note that we must ALWAYS check specially whether those values are MIN_VALUE
* (-2^63) because -MIN_VALUE == MIN_VALUE (since 2^63 cannot be represented as
* a positive number, it overflows back into a negative). Not handling this
* case would often result in infinite recursion.
*
* @class
* @param {number} low the low (signed) 32 bits of the Long.
* @param {number} high the high (signed) 32 bits of the Long.
* @return {Long}
*/
function Long(low, high) {
if(!(this instanceof Long)) return new Long(low, high);
this._bsontype = 'Long';
/**
* @type {number}
* @ignore
*/
this.low_ = low | 0; // force into 32 signed bits.
/**
* @type {number}
* @ignore
*/
this.high_ = high | 0; // force into 32 signed bits.
};
/**
* Return the int value.
*
* @method
* @return {number} the value, assuming it is a 32-bit integer.
*/
Long.prototype.toInt = function() {
return this.low_;
};
/**
* Return the Number value.
*
* @method
* @return {number} the closest floating-point representation to this value.
*/
Long.prototype.toNumber = function() {
return this.high_ * Long.TWO_PWR_32_DBL_ +
this.getLowBitsUnsigned();
};
/**
* Return the JSON value.
*
* @method
* @return {string} the JSON representation.
*/
Long.prototype.toJSON = function() {
return this.toString();
}
/**
* Return the String value.
*
* @method
* @param {number} [opt_radix] the radix in which the text should be written.
* @return {string} the textual representation of this value.
*/
Long.prototype.toString = function(opt_radix) {
var radix = opt_radix || 10;
if (radix < 2 || 36 < radix) {
throw Error('radix out of range: ' + radix);
}
if (this.isZero()) {
return '0';
}
if (this.isNegative()) {
if (this.equals(Long.MIN_VALUE)) {
// We need to change the Long value before it can be negated, so we remove
// the bottom-most digit in this base and then recurse to do the rest.
var radixLong = Long.fromNumber(radix);
var div = this.div(radixLong);
var rem = div.multiply(radixLong).subtract(this);
return div.toString(radix) + rem.toInt().toString(radix);
} else {
return '-' + this.negate().toString(radix);
}
}
// Do several (6) digits each time through the loop, so as to
// minimize the calls to the very expensive emulated div.
var radixToPower = Long.fromNumber(Math.pow(radix, 6));
var rem = this;
var result = '';
while (true) {
var remDiv = rem.div(radixToPower);
var intval = rem.subtract(remDiv.multiply(radixToPower)).toInt();
var digits = intval.toString(radix);
rem = remDiv;
if (rem.isZero()) {
return digits + result;
} else {
while (digits.length < 6) {
digits = '0' + digits;
}
result = '' + digits + result;
}
}
};
/**
* Return the high 32-bits value.
*
* @method
* @return {number} the high 32-bits as a signed value.
*/
Long.prototype.getHighBits = function() {
return this.high_;
};
/**
* Return the low 32-bits value.
*
* @method
* @return {number} the low 32-bits as a signed value.
*/
Long.prototype.getLowBits = function() {
return this.low_;
};
/**
* Return the low unsigned 32-bits value.
*
* @method
* @return {number} the low 32-bits as an unsigned value.
*/
Long.prototype.getLowBitsUnsigned = function() {
return (this.low_ >= 0) ?
this.low_ : Long.TWO_PWR_32_DBL_ + this.low_;
};
/**
* Returns the number of bits needed to represent the absolute value of this Long.
*
* @method
* @return {number} Returns the number of bits needed to represent the absolute value of this Long.
*/
Long.prototype.getNumBitsAbs = function() {
if (this.isNegative()) {
if (this.equals(Long.MIN_VALUE)) {
return 64;
} else {
return this.negate().getNumBitsAbs();
}
} else {
var val = this.high_ != 0 ? this.high_ : this.low_;
for (var bit = 31; bit > 0; bit--) {
if ((val & (1 << bit)) != 0) {
break;
}
}
return this.high_ != 0 ? bit + 33 : bit + 1;
}
};
/**
* Return whether this value is zero.
*
* @method
* @return {boolean} whether this value is zero.
*/
Long.prototype.isZero = function() {
return this.high_ == 0 && this.low_ == 0;
};
/**
* Return whether this value is negative.
*
* @method
* @return {boolean} whether this value is negative.
*/
Long.prototype.isNegative = function() {
return this.high_ < 0;
};
/**
* Return whether this value is odd.
*
* @method
* @return {boolean} whether this value is odd.
*/
Long.prototype.isOdd = function() {
return (this.low_ & 1) == 1;
};
/**
* Return whether this Long equals the other
*
* @method
* @param {Long} other Long to compare against.
* @return {boolean} whether this Long equals the other
*/
Long.prototype.equals = function(other) {
return (this.high_ == other.high_) && (this.low_ == other.low_);
};
/**
* Return whether this Long does not equal the other.
*
* @method
* @param {Long} other Long to compare against.
* @return {boolean} whether this Long does not equal the other.
*/
Long.prototype.notEquals = function(other) {
return (this.high_ != other.high_) || (this.low_ != other.low_);
};
/**
* Return whether this Long is less than the other.
*
* @method
* @param {Long} other Long to compare against.
* @return {boolean} whether this Long is less than the other.
*/
Long.prototype.lessThan = function(other) {
return this.compare(other) < 0;
};
/**
* Return whether this Long is less than or equal to the other.
*
* @method
* @param {Long} other Long to compare against.
* @return {boolean} whether this Long is less than or equal to the other.
*/
Long.prototype.lessThanOrEqual = function(other) {
return this.compare(other) <= 0;
};
/**
* Return whether this Long is greater than the other.
*
* @method
* @param {Long} other Long to compare against.
* @return {boolean} whether this Long is greater than the other.
*/
Long.prototype.greaterThan = function(other) {
return this.compare(other) > 0;
};
/**
* Return whether this Long is greater than or equal to the other.
*
* @method
* @param {Long} other Long to compare against.
* @return {boolean} whether this Long is greater than or equal to the other.
*/
Long.prototype.greaterThanOrEqual = function(other) {
return this.compare(other) >= 0;
};
/**
* Compares this Long with the given one.
*
* @method
* @param {Long} other Long to compare against.
* @return {boolean} 0 if they are the same, 1 if the this is greater, and -1 if the given one is greater.
*/
Long.prototype.compare = function(other) {
if (this.equals(other)) {
return 0;
}
var thisNeg = this.isNegative();
var otherNeg = other.isNegative();
if (thisNeg && !otherNeg) {
return -1;
}
if (!thisNeg && otherNeg) {
return 1;
}
// at this point, the signs are the same, so subtraction will not overflow
if (this.subtract(other).isNegative()) {
return -1;
} else {
return 1;
}
};
/**
* The negation of this value.
*
* @method
* @return {Long} the negation of this value.
*/
Long.prototype.negate = function() {
if (this.equals(Long.MIN_VALUE)) {
return Long.MIN_VALUE;
} else {
return this.not().add(Long.ONE);
}
};
/**
* Returns the sum of this and the given Long.
*
* @method
* @param {Long} other Long to add to this one.
* @return {Long} the sum of this and the given Long.
*/
Long.prototype.add = function(other) {
// Divide each number into 4 chunks of 16 bits, and then sum the chunks.
var a48 = this.high_ >>> 16;
var a32 = this.high_ & 0xFFFF;
var a16 = this.low_ >>> 16;
var a00 = this.low_ & 0xFFFF;
var b48 = other.high_ >>> 16;
var b32 = other.high_ & 0xFFFF;
var b16 = other.low_ >>> 16;
var b00 = other.low_ & 0xFFFF;
var c48 = 0, c32 = 0, c16 = 0, c00 = 0;
c00 += a00 + b00;
c16 += c00 >>> 16;
c00 &= 0xFFFF;
c16 += a16 + b16;
c32 += c16 >>> 16;
c16 &= 0xFFFF;
c32 += a32 + b32;
c48 += c32 >>> 16;
c32 &= 0xFFFF;
c48 += a48 + b48;
c48 &= 0xFFFF;
return Long.fromBits((c16 << 16) | c00, (c48 << 16) | c32);
};
/**
* Returns the difference of this and the given Long.
*
* @method
* @param {Long} other Long to subtract from this.
* @return {Long} the difference of this and the given Long.
*/
Long.prototype.subtract = function(other) {
return this.add(other.negate());
};
/**
* Returns the product of this and the given Long.
*
* @method
* @param {Long} other Long to multiply with this.
* @return {Long} the product of this and the other.
*/
Long.prototype.multiply = function(other) {
if (this.isZero()) {
return Long.ZERO;
} else if (other.isZero()) {
return Long.ZERO;
}
if (this.equals(Long.MIN_VALUE)) {
return other.isOdd() ? Long.MIN_VALUE : Long.ZERO;
} else if (other.equals(Long.MIN_VALUE)) {
return this.isOdd() ? Long.MIN_VALUE : Long.ZERO;
}
if (this.isNegative()) {
if (other.isNegative()) {
return this.negate().multiply(other.negate());
} else {
return this.negate().multiply(other).negate();
}
} else if (other.isNegative()) {
return this.multiply(other.negate()).negate();
}
// If both Longs are small, use float multiplication
if (this.lessThan(Long.TWO_PWR_24_) &&
other.lessThan(Long.TWO_PWR_24_)) {
return Long.fromNumber(this.toNumber() * other.toNumber());
}
// Divide each Long into 4 chunks of 16 bits, and then add up 4x4 products.
// We can skip products that would overflow.
var a48 = this.high_ >>> 16;
var a32 = this.high_ & 0xFFFF;
var a16 = this.low_ >>> 16;
var a00 = this.low_ & 0xFFFF;
var b48 = other.high_ >>> 16;
var b32 = other.high_ & 0xFFFF;
var b16 = other.low_ >>> 16;
var b00 = other.low_ & 0xFFFF;
var c48 = 0, c32 = 0, c16 = 0, c00 = 0;
c00 += a00 * b00;
c16 += c00 >>> 16;
c00 &= 0xFFFF;
c16 += a16 * b00;
c32 += c16 >>> 16;
c16 &= 0xFFFF;
c16 += a00 * b16;
c32 += c16 >>> 16;
c16 &= 0xFFFF;
c32 += a32 * b00;
c48 += c32 >>> 16;
c32 &= 0xFFFF;
c32 += a16 * b16;
c48 += c32 >>> 16;
c32 &= 0xFFFF;
c32 += a00 * b32;
c48 += c32 >>> 16;
c32 &= 0xFFFF;
c48 += a48 * b00 + a32 * b16 + a16 * b32 + a00 * b48;
c48 &= 0xFFFF;
return Long.fromBits((c16 << 16) | c00, (c48 << 16) | c32);
};
/**
* Returns this Long divided by the given one.
*
* @method
* @param {Long} other Long by which to divide.
* @return {Long} this Long divided by the given one.
*/
Long.prototype.div = function(other) {
if (other.isZero()) {
throw Error('division by zero');
} else if (this.isZero()) {
return Long.ZERO;
}
if (this.equals(Long.MIN_VALUE)) {
if (other.equals(Long.ONE) ||
other.equals(Long.NEG_ONE)) {
return Long.MIN_VALUE; // recall that -MIN_VALUE == MIN_VALUE
} else if (other.equals(Long.MIN_VALUE)) {
return Long.ONE;
} else {
// At this point, we have |other| >= 2, so |this/other| < |MIN_VALUE|.
var halfThis = this.shiftRight(1);
var approx = halfThis.div(other).shiftLeft(1);
if (approx.equals(Long.ZERO)) {
return other.isNegative() ? Long.ONE : Long.NEG_ONE;
} else {
var rem = this.subtract(other.multiply(approx));
var result = approx.add(rem.div(other));
return result;
}
}
} else if (other.equals(Long.MIN_VALUE)) {
return Long.ZERO;
}
if (this.isNegative()) {
if (other.isNegative()) {
return this.negate().div(other.negate());
} else {
return this.negate().div(other).negate();
}
} else if (other.isNegative()) {
return this.div(other.negate()).negate();
}
// Repeat the following until the remainder is less than other: find a
// floating-point that approximates remainder / other *from below*, add this
// into the result, and subtract it from the remainder. It is critical that
// the approximate value is less than or equal to the real value so that the
// remainder never becomes negative.
var res = Long.ZERO;
var rem = this;
while (rem.greaterThanOrEqual(other)) {
// Approximate the result of division. This may be a little greater or
// smaller than the actual value.
var approx = Math.max(1, Math.floor(rem.toNumber() / other.toNumber()));
// We will tweak the approximate result by changing it in the 48-th digit or
// the smallest non-fractional digit, whichever is larger.
var log2 = Math.ceil(Math.log(approx) / Math.LN2);
var delta = (log2 <= 48) ? 1 : Math.pow(2, log2 - 48);
// Decrease the approximation until it is smaller than the remainder. Note
// that if it is too large, the product overflows and is negative.
var approxRes = Long.fromNumber(approx);
var approxRem = approxRes.multiply(other);
while (approxRem.isNegative() || approxRem.greaterThan(rem)) {
approx -= delta;
approxRes = Long.fromNumber(approx);
approxRem = approxRes.multiply(other);
}
// We know the answer can't be zero... and actually, zero would cause
// infinite recursion since we would make no progress.
if (approxRes.isZero()) {
approxRes = Long.ONE;
}
res = res.add(approxRes);
rem = rem.subtract(approxRem);
}
return res;
};
/**
* Returns this Long modulo the given one.
*
* @method
* @param {Long} other Long by which to mod.
* @return {Long} this Long modulo the given one.
*/
Long.prototype.modulo = function(other) {
return this.subtract(this.div(other).multiply(other));
};
/**
* The bitwise-NOT of this value.
*
* @method
* @return {Long} the bitwise-NOT of this value.
*/
Long.prototype.not = function() {
return Long.fromBits(~this.low_, ~this.high_);
};
/**
* Returns the bitwise-AND of this Long and the given one.
*
* @method
* @param {Long} other the Long with which to AND.
* @return {Long} the bitwise-AND of this and the other.
*/
Long.prototype.and = function(other) {
return Long.fromBits(this.low_ & other.low_, this.high_ & other.high_);
};
/**
* Returns the bitwise-OR of this Long and the given one.
*
* @method
* @param {Long} other the Long with which to OR.
* @return {Long} the bitwise-OR of this and the other.
*/
Long.prototype.or = function(other) {
return Long.fromBits(this.low_ | other.low_, this.high_ | other.high_);
};
/**
* Returns the bitwise-XOR of this Long and the given one.
*
* @method
* @param {Long} other the Long with which to XOR.
* @return {Long} the bitwise-XOR of this and the other.
*/
Long.prototype.xor = function(other) {
return Long.fromBits(this.low_ ^ other.low_, this.high_ ^ other.high_);
};
/**
* Returns this Long with bits shifted to the left by the given amount.
*
* @method
* @param {number} numBits the number of bits by which to shift.
* @return {Long} this shifted to the left by the given amount.
*/
Long.prototype.shiftLeft = function(numBits) {
numBits &= 63;
if (numBits == 0) {
return this;
} else {
var low = this.low_;
if (numBits < 32) {
var high = this.high_;
return Long.fromBits(
low << numBits,
(high << numBits) | (low >>> (32 - numBits)));
} else {
return Long.fromBits(0, low << (numBits - 32));
}
}
};
/**
* Returns this Long with bits shifted to the right by the given amount.
*
* @method
* @param {number} numBits the number of bits by which to shift.
* @return {Long} this shifted to the right by the given amount.
*/
Long.prototype.shiftRight = function(numBits) {
numBits &= 63;
if (numBits == 0) {
return this;
} else {
var high = this.high_;
if (numBits < 32) {
var low = this.low_;
return Long.fromBits(
(low >>> numBits) | (high << (32 - numBits)),
high >> numBits);
} else {
return Long.fromBits(
high >> (numBits - 32),
high >= 0 ? 0 : -1);
}
}
};
/**
* Returns this Long with bits shifted to the right by the given amount, with the new top bits matching the current sign bit.
*
* @method
* @param {number} numBits the number of bits by which to shift.
* @return {Long} this shifted to the right by the given amount, with zeros placed into the new leading bits.
*/
Long.prototype.shiftRightUnsigned = function(numBits) {
numBits &= 63;
if (numBits == 0) {
return this;
} else {
var high = this.high_;
if (numBits < 32) {
var low = this.low_;
return Long.fromBits(
(low >>> numBits) | (high << (32 - numBits)),
high >>> numBits);
} else if (numBits == 32) {
return Long.fromBits(high, 0);
} else {
return Long.fromBits(high >>> (numBits - 32), 0);
}
}
};
/**
* Returns a Long representing the given (32-bit) integer value.
*
* @method
* @param {number} value the 32-bit integer in question.
* @return {Long} the corresponding Long value.
*/
Long.fromInt = function(value) {
if (-128 <= value && value < 128) {
var cachedObj = Long.INT_CACHE_[value];
if (cachedObj) {
return cachedObj;
}
}
var obj = new Long(value | 0, value < 0 ? -1 : 0);
if (-128 <= value && value < 128) {
Long.INT_CACHE_[value] = obj;
}
return obj;
};
/**
* Returns a Long representing the given value, provided that it is a finite number. Otherwise, zero is returned.
*
* @method
* @param {number} value the number in question.
* @return {Long} the corresponding Long value.
*/
Long.fromNumber = function(value) {
if (isNaN(value) || !isFinite(value)) {
return Long.ZERO;
} else if (value <= -Long.TWO_PWR_63_DBL_) {
return Long.MIN_VALUE;
} else if (value + 1 >= Long.TWO_PWR_63_DBL_) {
return Long.MAX_VALUE;
} else if (value < 0) {
return Long.fromNumber(-value).negate();
} else {
return new Long(
(value % Long.TWO_PWR_32_DBL_) | 0,
(value / Long.TWO_PWR_32_DBL_) | 0);
}
};
/**
* Returns a Long representing the 64-bit integer that comes by concatenating the given high and low bits. Each is assumed to use 32 bits.
*
* @method
* @param {number} lowBits the low 32-bits.
* @param {number} highBits the high 32-bits.
* @return {Long} the corresponding Long value.
*/
Long.fromBits = function(lowBits, highBits) {
return new Long(lowBits, highBits);
};
/**
* Returns a Long representation of the given string, written using the given radix.
*
* @method
* @param {string} str the textual representation of the Long.
* @param {number} opt_radix the radix in which the text is written.
* @return {Long} the corresponding Long value.
*/
Long.fromString = function(str, opt_radix) {
if (str.length == 0) {
throw Error('number format error: empty string');
}
var radix = opt_radix || 10;
if (radix < 2 || 36 < radix) {
throw Error('radix out of range: ' + radix);
}
if (str.charAt(0) == '-') {
return Long.fromString(str.substring(1), radix).negate();
} else if (str.indexOf('-') >= 0) {
throw Error('number format error: interior "-" character: ' + str);
}
// Do several (8) digits each time through the loop, so as to
// minimize the calls to the very expensive emulated div.
var radixToPower = Long.fromNumber(Math.pow(radix, 8));
var result = Long.ZERO;
for (var i = 0; i < str.length; i += 8) {
var size = Math.min(8, str.length - i);
var value = parseInt(str.substring(i, i + size), radix);
if (size < 8) {
var power = Long.fromNumber(Math.pow(radix, size));
result = result.multiply(power).add(Long.fromNumber(value));
} else {
result = result.multiply(radixToPower);
result = result.add(Long.fromNumber(value));
}
}
return result;
};
// NOTE: Common constant values ZERO, ONE, NEG_ONE, etc. are defined below the
// from* methods on which they depend.
/**
* A cache of the Long representations of small integer values.
* @type {Object}
* @ignore
*/
Long.INT_CACHE_ = {};
// NOTE: the compiler should inline these constant values below and then remove
// these variables, so there should be no runtime penalty for these.
/**
* Number used repeated below in calculations. This must appear before the
* first call to any from* function below.
* @type {number}
* @ignore
*/
Long.TWO_PWR_16_DBL_ = 1 << 16;
/**
* @type {number}
* @ignore
*/
Long.TWO_PWR_24_DBL_ = 1 << 24;
/**
* @type {number}
* @ignore
*/
Long.TWO_PWR_32_DBL_ = Long.TWO_PWR_16_DBL_ * Long.TWO_PWR_16_DBL_;
/**
* @type {number}
* @ignore
*/
Long.TWO_PWR_31_DBL_ = Long.TWO_PWR_32_DBL_ / 2;
/**
* @type {number}
* @ignore
*/
Long.TWO_PWR_48_DBL_ = Long.TWO_PWR_32_DBL_ * Long.TWO_PWR_16_DBL_;
/**
* @type {number}
* @ignore
*/
Long.TWO_PWR_64_DBL_ = Long.TWO_PWR_32_DBL_ * Long.TWO_PWR_32_DBL_;
/**
* @type {number}
* @ignore
*/
Long.TWO_PWR_63_DBL_ = Long.TWO_PWR_64_DBL_ / 2;
/** @type {Long} */
Long.ZERO = Long.fromInt(0);
/** @type {Long} */
Long.ONE = Long.fromInt(1);
/** @type {Long} */
Long.NEG_ONE = Long.fromInt(-1);
/** @type {Long} */
Long.MAX_VALUE =
Long.fromBits(0xFFFFFFFF | 0, 0x7FFFFFFF | 0);
/** @type {Long} */
Long.MIN_VALUE = Long.fromBits(0, 0x80000000 | 0);
/**
* @type {Long}
* @ignore
*/
Long.TWO_PWR_24_ = Long.fromInt(1 << 24);
/**
* Expose.
*/
module.exports = Long;
module.exports.Long = Long;

View File

@@ -0,0 +1,14 @@
/**
* A class representation of the BSON MaxKey type.
*
* @class
* @return {MaxKey} A MaxKey instance
*/
function MaxKey() {
if(!(this instanceof MaxKey)) return new MaxKey();
this._bsontype = 'MaxKey';
}
module.exports = MaxKey;
module.exports.MaxKey = MaxKey;

View File

@@ -0,0 +1,14 @@
/**
* A class representation of the BSON MinKey type.
*
* @class
* @return {MinKey} A MinKey instance
*/
function MinKey() {
if(!(this instanceof MinKey)) return new MinKey();
this._bsontype = 'MinKey';
}
module.exports = MinKey;
module.exports.MinKey = MinKey;

View File

@@ -0,0 +1,274 @@
/**
* Module dependencies.
* @ignore
*/
var BinaryParser = require('./binary_parser').BinaryParser;
/**
* Machine id.
*
* Create a random 3-byte value (i.e. unique for this
* process). Other drivers use a md5 of the machine id here, but
* that would mean an asyc call to gethostname, so we don't bother.
* @ignore
*/
var MACHINE_ID = parseInt(Math.random() * 0xFFFFFF, 10);
// Regular expression that checks for hex value
var checkForHexRegExp = new RegExp("^[0-9a-fA-F]{24}$");
/**
* Create a new ObjectID instance
*
* @class
* @param {(string|number)} id Can be a 24 byte hex string, 12 byte binary string or a Number.
* @property {number} generationTime The generation time of this ObjectId instance
* @return {ObjectID} instance of ObjectID.
*/
var ObjectID = function ObjectID(id) {
if(!(this instanceof ObjectID)) return new ObjectID(id);
if((id instanceof ObjectID)) return id;
this._bsontype = 'ObjectID';
var __id = null;
var valid = ObjectID.isValid(id);
// Throw an error if it's not a valid setup
if(!valid && id != null){
throw new Error("Argument passed in must be a single String of 12 bytes or a string of 24 hex characters");
} else if(valid && typeof id == 'string' && id.length == 24) {
return ObjectID.createFromHexString(id);
} else if(id == null || typeof id == 'number') {
// convert to 12 byte binary string
this.id = this.generate(id);
} else if(id != null && id.length === 12) {
// assume 12 byte string
this.id = id;
}
if(ObjectID.cacheHexString) this.__id = this.toHexString();
};
// Allow usage of ObjectId as well as ObjectID
var ObjectId = ObjectID;
// Precomputed hex table enables speedy hex string conversion
var hexTable = [];
for (var i = 0; i < 256; i++) {
hexTable[i] = (i <= 15 ? '0' : '') + i.toString(16);
}
/**
* Return the ObjectID id as a 24 byte hex string representation
*
* @method
* @return {string} return the 24 byte hex string representation.
*/
ObjectID.prototype.toHexString = function() {
if(ObjectID.cacheHexString && this.__id) return this.__id;
var hexString = '';
for (var i = 0; i < this.id.length; i++) {
hexString += hexTable[this.id.charCodeAt(i)];
}
if(ObjectID.cacheHexString) this.__id = hexString;
return hexString;
};
/**
* Update the ObjectID index used in generating new ObjectID's on the driver
*
* @method
* @return {number} returns next index value.
* @ignore
*/
ObjectID.prototype.get_inc = function() {
return ObjectID.index = (ObjectID.index + 1) % 0xFFFFFF;
};
/**
* Update the ObjectID index used in generating new ObjectID's on the driver
*
* @method
* @return {number} returns next index value.
* @ignore
*/
ObjectID.prototype.getInc = function() {
return this.get_inc();
};
/**
* Generate a 12 byte id string used in ObjectID's
*
* @method
* @param {number} [time] optional parameter allowing to pass in a second based timestamp.
* @return {string} return the 12 byte id binary string.
*/
ObjectID.prototype.generate = function(time) {
if ('number' != typeof time) {
time = parseInt(Date.now()/1000,10);
}
var time4Bytes = BinaryParser.encodeInt(time, 32, true, true);
/* for time-based ObjectID the bytes following the time will be zeroed */
var machine3Bytes = BinaryParser.encodeInt(MACHINE_ID, 24, false);
var pid2Bytes = BinaryParser.fromShort(typeof process === 'undefined' ? Math.floor(Math.random() * 100000) : process.pid % 0xFFFF);
var index3Bytes = BinaryParser.encodeInt(this.get_inc(), 24, false, true);
return time4Bytes + machine3Bytes + pid2Bytes + index3Bytes;
};
/**
* Converts the id into a 24 byte hex string for printing
*
* @return {String} return the 24 byte hex string representation.
* @ignore
*/
ObjectID.prototype.toString = function() {
return this.toHexString();
};
/**
* Converts to a string representation of this Id.
*
* @return {String} return the 24 byte hex string representation.
* @ignore
*/
ObjectID.prototype.inspect = ObjectID.prototype.toString;
/**
* Converts to its JSON representation.
*
* @return {String} return the 24 byte hex string representation.
* @ignore
*/
ObjectID.prototype.toJSON = function() {
return this.toHexString();
};
/**
* Compares the equality of this ObjectID with `otherID`.
*
* @method
* @param {object} otherID ObjectID instance to compare against.
* @return {boolean} the result of comparing two ObjectID's
*/
ObjectID.prototype.equals = function equals (otherID) {
if(otherID == null) return false;
var id = (otherID instanceof ObjectID || otherID.toHexString)
? otherID.id
: ObjectID.createFromHexString(otherID).id;
return this.id === id;
}
/**
* Returns the generation date (accurate up to the second) that this ID was generated.
*
* @method
* @return {date} the generation date
*/
ObjectID.prototype.getTimestamp = function() {
var timestamp = new Date();
timestamp.setTime(Math.floor(BinaryParser.decodeInt(this.id.substring(0,4), 32, true, true)) * 1000);
return timestamp;
}
/**
* @ignore
*/
ObjectID.index = parseInt(Math.random() * 0xFFFFFF, 10);
/**
* @ignore
*/
ObjectID.createPk = function createPk () {
return new ObjectID();
};
/**
* Creates an ObjectID from a second based number, with the rest of the ObjectID zeroed out. Used for comparisons or sorting the ObjectID.
*
* @method
* @param {number} time an integer number representing a number of seconds.
* @return {ObjectID} return the created ObjectID
*/
ObjectID.createFromTime = function createFromTime (time) {
var id = BinaryParser.encodeInt(time, 32, true, true) +
BinaryParser.encodeInt(0, 64, true, true);
return new ObjectID(id);
};
/**
* Creates an ObjectID from a hex string representation of an ObjectID.
*
* @method
* @param {string} hexString create a ObjectID from a passed in 24 byte hexstring.
* @return {ObjectID} return the created ObjectID
*/
ObjectID.createFromHexString = function createFromHexString (hexString) {
// Throw an error if it's not a valid setup
if(typeof hexString === 'undefined' || hexString != null && hexString.length != 24)
throw new Error("Argument passed in must be a single String of 12 bytes or a string of 24 hex characters");
var len = hexString.length;
if(len > 12*2) {
throw new Error('Id cannot be longer than 12 bytes');
}
var result = ''
, string
, number;
for (var index = 0; index < len; index += 2) {
string = hexString.substr(index, 2);
number = parseInt(string, 16);
result += BinaryParser.fromByte(number);
}
return new ObjectID(result, hexString);
};
/**
* Checks if a value is a valid bson ObjectId
*
* @method
* @return {boolean} return true if the value is a valid bson ObjectId, return false otherwise.
*/
ObjectID.isValid = function isValid(id) {
if(id == null) return false;
if(id != null && 'number' != typeof id && (id.length != 12 && id.length != 24)) {
return false;
} else {
// Check specifically for hex correctness
if(typeof id == 'string' && id.length == 24) return checkForHexRegExp.test(id);
return true;
}
};
/**
* @ignore
*/
Object.defineProperty(ObjectID.prototype, "generationTime", {
enumerable: true
, get: function () {
return Math.floor(BinaryParser.decodeInt(this.id.substring(0,4), 32, true, true));
}
, set: function (value) {
var value = BinaryParser.encodeInt(value, 32, true, true);
this.id = value + this.id.substr(4);
// delete this.__id;
this.toHexString();
}
});
/**
* Expose.
*/
module.exports = ObjectID;
module.exports.ObjectID = ObjectID;
module.exports.ObjectId = ObjectID;

View File

@@ -0,0 +1,47 @@
/**
* A class representation of the BSON Symbol type.
*
* @class
* @deprecated
* @param {string} value the string representing the symbol.
* @return {Symbol}
*/
function Symbol(value) {
if(!(this instanceof Symbol)) return new Symbol(value);
this._bsontype = 'Symbol';
this.value = value;
}
/**
* Access the wrapped string value.
*
* @method
* @return {String} returns the wrapped string.
*/
Symbol.prototype.valueOf = function() {
return this.value;
};
/**
* @ignore
*/
Symbol.prototype.toString = function() {
return this.value;
}
/**
* @ignore
*/
Symbol.prototype.inspect = function() {
return this.value;
}
/**
* @ignore
*/
Symbol.prototype.toJSON = function() {
return this.value;
}
module.exports = Symbol;
module.exports.Symbol = Symbol;

View File

@@ -0,0 +1,856 @@
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright 2009 Google Inc. All Rights Reserved
/**
* This type is for INTERNAL use in MongoDB only and should not be used in applications.
* The appropriate corresponding type is the JavaScript Date type.
*
* Defines a Timestamp class for representing a 64-bit two's-complement
* integer value, which faithfully simulates the behavior of a Java "Timestamp". This
* implementation is derived from TimestampLib in GWT.
*
* Constructs a 64-bit two's-complement integer, given its low and high 32-bit
* values as *signed* integers. See the from* functions below for more
* convenient ways of constructing Timestamps.
*
* The internal representation of a Timestamp is the two given signed, 32-bit values.
* We use 32-bit pieces because these are the size of integers on which
* Javascript performs bit-operations. For operations like addition and
* multiplication, we split each number into 16-bit pieces, which can easily be
* multiplied within Javascript's floating-point representation without overflow
* or change in sign.
*
* In the algorithms below, we frequently reduce the negative case to the
* positive case by negating the input(s) and then post-processing the result.
* Note that we must ALWAYS check specially whether those values are MIN_VALUE
* (-2^63) because -MIN_VALUE == MIN_VALUE (since 2^63 cannot be represented as
* a positive number, it overflows back into a negative). Not handling this
* case would often result in infinite recursion.
*
* @class
* @param {number} low the low (signed) 32 bits of the Timestamp.
* @param {number} high the high (signed) 32 bits of the Timestamp.
*/
function Timestamp(low, high) {
if(!(this instanceof Timestamp)) return new Timestamp(low, high);
this._bsontype = 'Timestamp';
/**
* @type {number}
* @ignore
*/
this.low_ = low | 0; // force into 32 signed bits.
/**
* @type {number}
* @ignore
*/
this.high_ = high | 0; // force into 32 signed bits.
};
/**
* Return the int value.
*
* @return {number} the value, assuming it is a 32-bit integer.
*/
Timestamp.prototype.toInt = function() {
return this.low_;
};
/**
* Return the Number value.
*
* @method
* @return {number} the closest floating-point representation to this value.
*/
Timestamp.prototype.toNumber = function() {
return this.high_ * Timestamp.TWO_PWR_32_DBL_ +
this.getLowBitsUnsigned();
};
/**
* Return the JSON value.
*
* @method
* @return {string} the JSON representation.
*/
Timestamp.prototype.toJSON = function() {
return this.toString();
}
/**
* Return the String value.
*
* @method
* @param {number} [opt_radix] the radix in which the text should be written.
* @return {string} the textual representation of this value.
*/
Timestamp.prototype.toString = function(opt_radix) {
var radix = opt_radix || 10;
if (radix < 2 || 36 < radix) {
throw Error('radix out of range: ' + radix);
}
if (this.isZero()) {
return '0';
}
if (this.isNegative()) {
if (this.equals(Timestamp.MIN_VALUE)) {
// We need to change the Timestamp value before it can be negated, so we remove
// the bottom-most digit in this base and then recurse to do the rest.
var radixTimestamp = Timestamp.fromNumber(radix);
var div = this.div(radixTimestamp);
var rem = div.multiply(radixTimestamp).subtract(this);
return div.toString(radix) + rem.toInt().toString(radix);
} else {
return '-' + this.negate().toString(radix);
}
}
// Do several (6) digits each time through the loop, so as to
// minimize the calls to the very expensive emulated div.
var radixToPower = Timestamp.fromNumber(Math.pow(radix, 6));
var rem = this;
var result = '';
while (true) {
var remDiv = rem.div(radixToPower);
var intval = rem.subtract(remDiv.multiply(radixToPower)).toInt();
var digits = intval.toString(radix);
rem = remDiv;
if (rem.isZero()) {
return digits + result;
} else {
while (digits.length < 6) {
digits = '0' + digits;
}
result = '' + digits + result;
}
}
};
/**
* Return the high 32-bits value.
*
* @method
* @return {number} the high 32-bits as a signed value.
*/
Timestamp.prototype.getHighBits = function() {
return this.high_;
};
/**
* Return the low 32-bits value.
*
* @method
* @return {number} the low 32-bits as a signed value.
*/
Timestamp.prototype.getLowBits = function() {
return this.low_;
};
/**
* Return the low unsigned 32-bits value.
*
* @method
* @return {number} the low 32-bits as an unsigned value.
*/
Timestamp.prototype.getLowBitsUnsigned = function() {
return (this.low_ >= 0) ?
this.low_ : Timestamp.TWO_PWR_32_DBL_ + this.low_;
};
/**
* Returns the number of bits needed to represent the absolute value of this Timestamp.
*
* @method
* @return {number} Returns the number of bits needed to represent the absolute value of this Timestamp.
*/
Timestamp.prototype.getNumBitsAbs = function() {
if (this.isNegative()) {
if (this.equals(Timestamp.MIN_VALUE)) {
return 64;
} else {
return this.negate().getNumBitsAbs();
}
} else {
var val = this.high_ != 0 ? this.high_ : this.low_;
for (var bit = 31; bit > 0; bit--) {
if ((val & (1 << bit)) != 0) {
break;
}
}
return this.high_ != 0 ? bit + 33 : bit + 1;
}
};
/**
* Return whether this value is zero.
*
* @method
* @return {boolean} whether this value is zero.
*/
Timestamp.prototype.isZero = function() {
return this.high_ == 0 && this.low_ == 0;
};
/**
* Return whether this value is negative.
*
* @method
* @return {boolean} whether this value is negative.
*/
Timestamp.prototype.isNegative = function() {
return this.high_ < 0;
};
/**
* Return whether this value is odd.
*
* @method
* @return {boolean} whether this value is odd.
*/
Timestamp.prototype.isOdd = function() {
return (this.low_ & 1) == 1;
};
/**
* Return whether this Timestamp equals the other
*
* @method
* @param {Timestamp} other Timestamp to compare against.
* @return {boolean} whether this Timestamp equals the other
*/
Timestamp.prototype.equals = function(other) {
return (this.high_ == other.high_) && (this.low_ == other.low_);
};
/**
* Return whether this Timestamp does not equal the other.
*
* @method
* @param {Timestamp} other Timestamp to compare against.
* @return {boolean} whether this Timestamp does not equal the other.
*/
Timestamp.prototype.notEquals = function(other) {
return (this.high_ != other.high_) || (this.low_ != other.low_);
};
/**
* Return whether this Timestamp is less than the other.
*
* @method
* @param {Timestamp} other Timestamp to compare against.
* @return {boolean} whether this Timestamp is less than the other.
*/
Timestamp.prototype.lessThan = function(other) {
return this.compare(other) < 0;
};
/**
* Return whether this Timestamp is less than or equal to the other.
*
* @method
* @param {Timestamp} other Timestamp to compare against.
* @return {boolean} whether this Timestamp is less than or equal to the other.
*/
Timestamp.prototype.lessThanOrEqual = function(other) {
return this.compare(other) <= 0;
};
/**
* Return whether this Timestamp is greater than the other.
*
* @method
* @param {Timestamp} other Timestamp to compare against.
* @return {boolean} whether this Timestamp is greater than the other.
*/
Timestamp.prototype.greaterThan = function(other) {
return this.compare(other) > 0;
};
/**
* Return whether this Timestamp is greater than or equal to the other.
*
* @method
* @param {Timestamp} other Timestamp to compare against.
* @return {boolean} whether this Timestamp is greater than or equal to the other.
*/
Timestamp.prototype.greaterThanOrEqual = function(other) {
return this.compare(other) >= 0;
};
/**
* Compares this Timestamp with the given one.
*
* @method
* @param {Timestamp} other Timestamp to compare against.
* @return {boolean} 0 if they are the same, 1 if the this is greater, and -1 if the given one is greater.
*/
Timestamp.prototype.compare = function(other) {
if (this.equals(other)) {
return 0;
}
var thisNeg = this.isNegative();
var otherNeg = other.isNegative();
if (thisNeg && !otherNeg) {
return -1;
}
if (!thisNeg && otherNeg) {
return 1;
}
// at this point, the signs are the same, so subtraction will not overflow
if (this.subtract(other).isNegative()) {
return -1;
} else {
return 1;
}
};
/**
* The negation of this value.
*
* @method
* @return {Timestamp} the negation of this value.
*/
Timestamp.prototype.negate = function() {
if (this.equals(Timestamp.MIN_VALUE)) {
return Timestamp.MIN_VALUE;
} else {
return this.not().add(Timestamp.ONE);
}
};
/**
* Returns the sum of this and the given Timestamp.
*
* @method
* @param {Timestamp} other Timestamp to add to this one.
* @return {Timestamp} the sum of this and the given Timestamp.
*/
Timestamp.prototype.add = function(other) {
// Divide each number into 4 chunks of 16 bits, and then sum the chunks.
var a48 = this.high_ >>> 16;
var a32 = this.high_ & 0xFFFF;
var a16 = this.low_ >>> 16;
var a00 = this.low_ & 0xFFFF;
var b48 = other.high_ >>> 16;
var b32 = other.high_ & 0xFFFF;
var b16 = other.low_ >>> 16;
var b00 = other.low_ & 0xFFFF;
var c48 = 0, c32 = 0, c16 = 0, c00 = 0;
c00 += a00 + b00;
c16 += c00 >>> 16;
c00 &= 0xFFFF;
c16 += a16 + b16;
c32 += c16 >>> 16;
c16 &= 0xFFFF;
c32 += a32 + b32;
c48 += c32 >>> 16;
c32 &= 0xFFFF;
c48 += a48 + b48;
c48 &= 0xFFFF;
return Timestamp.fromBits((c16 << 16) | c00, (c48 << 16) | c32);
};
/**
* Returns the difference of this and the given Timestamp.
*
* @method
* @param {Timestamp} other Timestamp to subtract from this.
* @return {Timestamp} the difference of this and the given Timestamp.
*/
Timestamp.prototype.subtract = function(other) {
return this.add(other.negate());
};
/**
* Returns the product of this and the given Timestamp.
*
* @method
* @param {Timestamp} other Timestamp to multiply with this.
* @return {Timestamp} the product of this and the other.
*/
Timestamp.prototype.multiply = function(other) {
if (this.isZero()) {
return Timestamp.ZERO;
} else if (other.isZero()) {
return Timestamp.ZERO;
}
if (this.equals(Timestamp.MIN_VALUE)) {
return other.isOdd() ? Timestamp.MIN_VALUE : Timestamp.ZERO;
} else if (other.equals(Timestamp.MIN_VALUE)) {
return this.isOdd() ? Timestamp.MIN_VALUE : Timestamp.ZERO;
}
if (this.isNegative()) {
if (other.isNegative()) {
return this.negate().multiply(other.negate());
} else {
return this.negate().multiply(other).negate();
}
} else if (other.isNegative()) {
return this.multiply(other.negate()).negate();
}
// If both Timestamps are small, use float multiplication
if (this.lessThan(Timestamp.TWO_PWR_24_) &&
other.lessThan(Timestamp.TWO_PWR_24_)) {
return Timestamp.fromNumber(this.toNumber() * other.toNumber());
}
// Divide each Timestamp into 4 chunks of 16 bits, and then add up 4x4 products.
// We can skip products that would overflow.
var a48 = this.high_ >>> 16;
var a32 = this.high_ & 0xFFFF;
var a16 = this.low_ >>> 16;
var a00 = this.low_ & 0xFFFF;
var b48 = other.high_ >>> 16;
var b32 = other.high_ & 0xFFFF;
var b16 = other.low_ >>> 16;
var b00 = other.low_ & 0xFFFF;
var c48 = 0, c32 = 0, c16 = 0, c00 = 0;
c00 += a00 * b00;
c16 += c00 >>> 16;
c00 &= 0xFFFF;
c16 += a16 * b00;
c32 += c16 >>> 16;
c16 &= 0xFFFF;
c16 += a00 * b16;
c32 += c16 >>> 16;
c16 &= 0xFFFF;
c32 += a32 * b00;
c48 += c32 >>> 16;
c32 &= 0xFFFF;
c32 += a16 * b16;
c48 += c32 >>> 16;
c32 &= 0xFFFF;
c32 += a00 * b32;
c48 += c32 >>> 16;
c32 &= 0xFFFF;
c48 += a48 * b00 + a32 * b16 + a16 * b32 + a00 * b48;
c48 &= 0xFFFF;
return Timestamp.fromBits((c16 << 16) | c00, (c48 << 16) | c32);
};
/**
* Returns this Timestamp divided by the given one.
*
* @method
* @param {Timestamp} other Timestamp by which to divide.
* @return {Timestamp} this Timestamp divided by the given one.
*/
Timestamp.prototype.div = function(other) {
if (other.isZero()) {
throw Error('division by zero');
} else if (this.isZero()) {
return Timestamp.ZERO;
}
if (this.equals(Timestamp.MIN_VALUE)) {
if (other.equals(Timestamp.ONE) ||
other.equals(Timestamp.NEG_ONE)) {
return Timestamp.MIN_VALUE; // recall that -MIN_VALUE == MIN_VALUE
} else if (other.equals(Timestamp.MIN_VALUE)) {
return Timestamp.ONE;
} else {
// At this point, we have |other| >= 2, so |this/other| < |MIN_VALUE|.
var halfThis = this.shiftRight(1);
var approx = halfThis.div(other).shiftLeft(1);
if (approx.equals(Timestamp.ZERO)) {
return other.isNegative() ? Timestamp.ONE : Timestamp.NEG_ONE;
} else {
var rem = this.subtract(other.multiply(approx));
var result = approx.add(rem.div(other));
return result;
}
}
} else if (other.equals(Timestamp.MIN_VALUE)) {
return Timestamp.ZERO;
}
if (this.isNegative()) {
if (other.isNegative()) {
return this.negate().div(other.negate());
} else {
return this.negate().div(other).negate();
}
} else if (other.isNegative()) {
return this.div(other.negate()).negate();
}
// Repeat the following until the remainder is less than other: find a
// floating-point that approximates remainder / other *from below*, add this
// into the result, and subtract it from the remainder. It is critical that
// the approximate value is less than or equal to the real value so that the
// remainder never becomes negative.
var res = Timestamp.ZERO;
var rem = this;
while (rem.greaterThanOrEqual(other)) {
// Approximate the result of division. This may be a little greater or
// smaller than the actual value.
var approx = Math.max(1, Math.floor(rem.toNumber() / other.toNumber()));
// We will tweak the approximate result by changing it in the 48-th digit or
// the smallest non-fractional digit, whichever is larger.
var log2 = Math.ceil(Math.log(approx) / Math.LN2);
var delta = (log2 <= 48) ? 1 : Math.pow(2, log2 - 48);
// Decrease the approximation until it is smaller than the remainder. Note
// that if it is too large, the product overflows and is negative.
var approxRes = Timestamp.fromNumber(approx);
var approxRem = approxRes.multiply(other);
while (approxRem.isNegative() || approxRem.greaterThan(rem)) {
approx -= delta;
approxRes = Timestamp.fromNumber(approx);
approxRem = approxRes.multiply(other);
}
// We know the answer can't be zero... and actually, zero would cause
// infinite recursion since we would make no progress.
if (approxRes.isZero()) {
approxRes = Timestamp.ONE;
}
res = res.add(approxRes);
rem = rem.subtract(approxRem);
}
return res;
};
/**
* Returns this Timestamp modulo the given one.
*
* @method
* @param {Timestamp} other Timestamp by which to mod.
* @return {Timestamp} this Timestamp modulo the given one.
*/
Timestamp.prototype.modulo = function(other) {
return this.subtract(this.div(other).multiply(other));
};
/**
* The bitwise-NOT of this value.
*
* @method
* @return {Timestamp} the bitwise-NOT of this value.
*/
Timestamp.prototype.not = function() {
return Timestamp.fromBits(~this.low_, ~this.high_);
};
/**
* Returns the bitwise-AND of this Timestamp and the given one.
*
* @method
* @param {Timestamp} other the Timestamp with which to AND.
* @return {Timestamp} the bitwise-AND of this and the other.
*/
Timestamp.prototype.and = function(other) {
return Timestamp.fromBits(this.low_ & other.low_, this.high_ & other.high_);
};
/**
* Returns the bitwise-OR of this Timestamp and the given one.
*
* @method
* @param {Timestamp} other the Timestamp with which to OR.
* @return {Timestamp} the bitwise-OR of this and the other.
*/
Timestamp.prototype.or = function(other) {
return Timestamp.fromBits(this.low_ | other.low_, this.high_ | other.high_);
};
/**
* Returns the bitwise-XOR of this Timestamp and the given one.
*
* @method
* @param {Timestamp} other the Timestamp with which to XOR.
* @return {Timestamp} the bitwise-XOR of this and the other.
*/
Timestamp.prototype.xor = function(other) {
return Timestamp.fromBits(this.low_ ^ other.low_, this.high_ ^ other.high_);
};
/**
* Returns this Timestamp with bits shifted to the left by the given amount.
*
* @method
* @param {number} numBits the number of bits by which to shift.
* @return {Timestamp} this shifted to the left by the given amount.
*/
Timestamp.prototype.shiftLeft = function(numBits) {
numBits &= 63;
if (numBits == 0) {
return this;
} else {
var low = this.low_;
if (numBits < 32) {
var high = this.high_;
return Timestamp.fromBits(
low << numBits,
(high << numBits) | (low >>> (32 - numBits)));
} else {
return Timestamp.fromBits(0, low << (numBits - 32));
}
}
};
/**
* Returns this Timestamp with bits shifted to the right by the given amount.
*
* @method
* @param {number} numBits the number of bits by which to shift.
* @return {Timestamp} this shifted to the right by the given amount.
*/
Timestamp.prototype.shiftRight = function(numBits) {
numBits &= 63;
if (numBits == 0) {
return this;
} else {
var high = this.high_;
if (numBits < 32) {
var low = this.low_;
return Timestamp.fromBits(
(low >>> numBits) | (high << (32 - numBits)),
high >> numBits);
} else {
return Timestamp.fromBits(
high >> (numBits - 32),
high >= 0 ? 0 : -1);
}
}
};
/**
* Returns this Timestamp with bits shifted to the right by the given amount, with the new top bits matching the current sign bit.
*
* @method
* @param {number} numBits the number of bits by which to shift.
* @return {Timestamp} this shifted to the right by the given amount, with zeros placed into the new leading bits.
*/
Timestamp.prototype.shiftRightUnsigned = function(numBits) {
numBits &= 63;
if (numBits == 0) {
return this;
} else {
var high = this.high_;
if (numBits < 32) {
var low = this.low_;
return Timestamp.fromBits(
(low >>> numBits) | (high << (32 - numBits)),
high >>> numBits);
} else if (numBits == 32) {
return Timestamp.fromBits(high, 0);
} else {
return Timestamp.fromBits(high >>> (numBits - 32), 0);
}
}
};
/**
* Returns a Timestamp representing the given (32-bit) integer value.
*
* @method
* @param {number} value the 32-bit integer in question.
* @return {Timestamp} the corresponding Timestamp value.
*/
Timestamp.fromInt = function(value) {
if (-128 <= value && value < 128) {
var cachedObj = Timestamp.INT_CACHE_[value];
if (cachedObj) {
return cachedObj;
}
}
var obj = new Timestamp(value | 0, value < 0 ? -1 : 0);
if (-128 <= value && value < 128) {
Timestamp.INT_CACHE_[value] = obj;
}
return obj;
};
/**
* Returns a Timestamp representing the given value, provided that it is a finite number. Otherwise, zero is returned.
*
* @method
* @param {number} value the number in question.
* @return {Timestamp} the corresponding Timestamp value.
*/
Timestamp.fromNumber = function(value) {
if (isNaN(value) || !isFinite(value)) {
return Timestamp.ZERO;
} else if (value <= -Timestamp.TWO_PWR_63_DBL_) {
return Timestamp.MIN_VALUE;
} else if (value + 1 >= Timestamp.TWO_PWR_63_DBL_) {
return Timestamp.MAX_VALUE;
} else if (value < 0) {
return Timestamp.fromNumber(-value).negate();
} else {
return new Timestamp(
(value % Timestamp.TWO_PWR_32_DBL_) | 0,
(value / Timestamp.TWO_PWR_32_DBL_) | 0);
}
};
/**
* Returns a Timestamp representing the 64-bit integer that comes by concatenating the given high and low bits. Each is assumed to use 32 bits.
*
* @method
* @param {number} lowBits the low 32-bits.
* @param {number} highBits the high 32-bits.
* @return {Timestamp} the corresponding Timestamp value.
*/
Timestamp.fromBits = function(lowBits, highBits) {
return new Timestamp(lowBits, highBits);
};
/**
* Returns a Timestamp representation of the given string, written using the given radix.
*
* @method
* @param {string} str the textual representation of the Timestamp.
* @param {number} opt_radix the radix in which the text is written.
* @return {Timestamp} the corresponding Timestamp value.
*/
Timestamp.fromString = function(str, opt_radix) {
if (str.length == 0) {
throw Error('number format error: empty string');
}
var radix = opt_radix || 10;
if (radix < 2 || 36 < radix) {
throw Error('radix out of range: ' + radix);
}
if (str.charAt(0) == '-') {
return Timestamp.fromString(str.substring(1), radix).negate();
} else if (str.indexOf('-') >= 0) {
throw Error('number format error: interior "-" character: ' + str);
}
// Do several (8) digits each time through the loop, so as to
// minimize the calls to the very expensive emulated div.
var radixToPower = Timestamp.fromNumber(Math.pow(radix, 8));
var result = Timestamp.ZERO;
for (var i = 0; i < str.length; i += 8) {
var size = Math.min(8, str.length - i);
var value = parseInt(str.substring(i, i + size), radix);
if (size < 8) {
var power = Timestamp.fromNumber(Math.pow(radix, size));
result = result.multiply(power).add(Timestamp.fromNumber(value));
} else {
result = result.multiply(radixToPower);
result = result.add(Timestamp.fromNumber(value));
}
}
return result;
};
// NOTE: Common constant values ZERO, ONE, NEG_ONE, etc. are defined below the
// from* methods on which they depend.
/**
* A cache of the Timestamp representations of small integer values.
* @type {Object}
* @ignore
*/
Timestamp.INT_CACHE_ = {};
// NOTE: the compiler should inline these constant values below and then remove
// these variables, so there should be no runtime penalty for these.
/**
* Number used repeated below in calculations. This must appear before the
* first call to any from* function below.
* @type {number}
* @ignore
*/
Timestamp.TWO_PWR_16_DBL_ = 1 << 16;
/**
* @type {number}
* @ignore
*/
Timestamp.TWO_PWR_24_DBL_ = 1 << 24;
/**
* @type {number}
* @ignore
*/
Timestamp.TWO_PWR_32_DBL_ = Timestamp.TWO_PWR_16_DBL_ * Timestamp.TWO_PWR_16_DBL_;
/**
* @type {number}
* @ignore
*/
Timestamp.TWO_PWR_31_DBL_ = Timestamp.TWO_PWR_32_DBL_ / 2;
/**
* @type {number}
* @ignore
*/
Timestamp.TWO_PWR_48_DBL_ = Timestamp.TWO_PWR_32_DBL_ * Timestamp.TWO_PWR_16_DBL_;
/**
* @type {number}
* @ignore
*/
Timestamp.TWO_PWR_64_DBL_ = Timestamp.TWO_PWR_32_DBL_ * Timestamp.TWO_PWR_32_DBL_;
/**
* @type {number}
* @ignore
*/
Timestamp.TWO_PWR_63_DBL_ = Timestamp.TWO_PWR_64_DBL_ / 2;
/** @type {Timestamp} */
Timestamp.ZERO = Timestamp.fromInt(0);
/** @type {Timestamp} */
Timestamp.ONE = Timestamp.fromInt(1);
/** @type {Timestamp} */
Timestamp.NEG_ONE = Timestamp.fromInt(-1);
/** @type {Timestamp} */
Timestamp.MAX_VALUE =
Timestamp.fromBits(0xFFFFFFFF | 0, 0x7FFFFFFF | 0);
/** @type {Timestamp} */
Timestamp.MIN_VALUE = Timestamp.fromBits(0, 0x80000000 | 0);
/**
* @type {Timestamp}
* @ignore
*/
Timestamp.TWO_PWR_24_ = Timestamp.fromInt(1 << 24);
/**
* Expose.
*/
module.exports = Timestamp;
module.exports.Timestamp = Timestamp;

View File

@@ -0,0 +1,45 @@
# Logs
logs
*.log
# Runtime data
pids
*.pid
*.seed
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Compiled binary addons (http://nodejs.org/api/addons.html)
build/Release
# Dependency directory
# Commenting this out is preferred by some people, see
# https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git-
node_modules
# Users Environment Variables
.lock-wscript
.DS_Store
*.swp
*.seed
.project
.settings
./data
node_modules/
output
build
.bin
npm-debug.log
builderror.log
bson.sublime-project
bson.sublime-workspace

View File

@@ -0,0 +1,3 @@
0.1.0 2015-03-26
-----------------
- First pusht to npm, cleanup of project and left only c++ and test harnesses.

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,16 @@
NODE = node
NPM = npm
NODEUNIT = node_modules/nodeunit/bin/nodeunit
all: clean node_gyp
test: clean node_gyp
npm test
node_gyp: clean
node-gyp configure build
clean:
node-gyp clean
.PHONY: all

View File

@@ -0,0 +1,4 @@
Javascript + C++ BSON parser
============================
This module contains the C++ BSON parser only and is not meant to be used in isolation from the `bson` NPM module. It lives in it's own module so it can be an optional dependency for the `bson` module.

View File

@@ -0,0 +1,18 @@
{
'targets': [
{
'target_name': 'bson',
'sources': [ 'ext/bson.cc' ],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ],
'include_dirs': [ '<!(node -e "require(\'nan\')")' ],
'conditions': [
['OS=="mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES'
}
}]
]
}
]
}

View File

@@ -0,0 +1,332 @@
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := ..
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= .
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= Release
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
CC.target ?= $(CC)
CFLAGS.target ?= $(CFLAGS)
CXX.target ?= $(CXX)
CXXFLAGS.target ?= $(CXXFLAGS)
LINK.target ?= $(LINK)
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
#
# Note: flock is used to seralize linking. Linking is a memory-intensive
# process so running parallel links can often lead to thrashing. To disable
# the serialization, override LINK via an envrionment variable as follows:
#
# export LINK=g++
#
# This will allow make to invoke N linker processes as specified in -jN.
LINK ?= flock $(builddir)/linker.lock $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= gcc
CFLAGS.host ?=
CXX.host ?= g++
CXXFLAGS.host ?=
LINK.host ?= $(CXX.host)
LDFLAGS.host ?=
AR.host ?= ar
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),?,$1)
unreplace_spaces = $(subst ?,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters.
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp -af "$<" "$@"
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%s\n' '$(call escape_quotes,$(1))'
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain ? instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\
for p in $(POSTBUILDS); do\
eval $$p;\
E=$$?;\
if [ $$E -ne 0 ]; then\
break;\
fi;\
done;\
if [ $$E -ne 0 ]; then\
rm -rf "$@";\
exit $$E;\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains ? for
# spaces already and dirx strips the ? characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word 1,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "all" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: all
all:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
TOOLSET := target
# Suffix rules, putting all outputs into $(obj).
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cpp FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cxx FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.S FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.s FORCE_DO_CMD
@$(call do_cmd,cc,1)
# Try building from generated source, too.
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cpp FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cxx FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.S FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.s FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.cpp FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.cxx FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.S FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.s FORCE_DO_CMD
@$(call do_cmd,cc,1)
ifeq ($(strip $(foreach prefix,$(NO_LOAD),\
$(findstring $(join ^,$(prefix)),\
$(join ^,bson.target.mk)))),)
include bson.target.mk
endif
quiet_cmd_regen_makefile = ACTION Regenerating $@
cmd_regen_makefile = cd $(srcdir); /usr/lib/node_modules/npm/node_modules/node-gyp/gyp/gyp_main.py -fmake --ignore-environment "--toplevel-dir=." -I/var/www/server/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/node_modules/bson-ext/build/config.gypi -I/usr/lib/node_modules/npm/node_modules/node-gyp/addon.gypi -I/home/kasperrt/.node-gyp/0.10.33/common.gypi "--depth=." "-Goutput_dir=." "--generator-output=build" "-Dlibrary=shared_library" "-Dvisibility=default" "-Dnode_root_dir=/home/kasperrt/.node-gyp/0.10.33" "-Dmodule_root_dir=/var/www/server/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/node_modules/bson-ext" binding.gyp
Makefile: $(srcdir)/../../../../../../../../../../../home/kasperrt/.node-gyp/0.10.33/common.gypi $(srcdir)/build/config.gypi $(srcdir)/binding.gyp $(srcdir)/../../../../../../../../../../../usr/lib/node_modules/npm/node_modules/node-gyp/addon.gypi
$(call do_cmd,regen_makefile)
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif

View File

@@ -0,0 +1 @@
cmd_Release/bson.node := rm -rf "Release/bson.node" && cp -af "Release/obj.target/bson.node" "Release/bson.node"

View File

@@ -0,0 +1 @@
cmd_Release/obj.target/bson.node := flock ./Release/linker.lock g++ -shared -pthread -rdynamic -m64 -Wl,-soname=bson.node -o Release/obj.target/bson.node -Wl,--start-group Release/obj.target/bson/ext/bson.o -Wl,--end-group

View File

@@ -0,0 +1,33 @@
cmd_Release/obj.target/bson/ext/bson.o := g++ '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-DBUILDING_NODE_EXTENSION' -I/home/kasperrt/.node-gyp/0.10.33/src -I/home/kasperrt/.node-gyp/0.10.33/deps/uv/include -I/home/kasperrt/.node-gyp/0.10.33/deps/v8/include -I../node_modules/nan -fPIC -Wall -Wextra -Wno-unused-parameter -pthread -m64 -O2 -fno-strict-aliasing -fno-tree-vrp -fno-omit-frame-pointer -fno-rtti -MMD -MF ./Release/.deps/Release/obj.target/bson/ext/bson.o.d.raw -c -o Release/obj.target/bson/ext/bson.o ../ext/bson.cc
Release/obj.target/bson/ext/bson.o: ../ext/bson.cc \
/home/kasperrt/.node-gyp/0.10.33/deps/v8/include/v8.h \
/home/kasperrt/.node-gyp/0.10.33/deps/v8/include/v8stdint.h \
/home/kasperrt/.node-gyp/0.10.33/src/node.h \
/home/kasperrt/.node-gyp/0.10.33/deps/uv/include/uv.h \
/home/kasperrt/.node-gyp/0.10.33/deps/uv/include/uv-private/uv-unix.h \
/home/kasperrt/.node-gyp/0.10.33/deps/uv/include/uv-private/ngx-queue.h \
/home/kasperrt/.node-gyp/0.10.33/deps/uv/include/uv-private/uv-linux.h \
/home/kasperrt/.node-gyp/0.10.33/src/node_object_wrap.h \
/home/kasperrt/.node-gyp/0.10.33/src/node.h \
/home/kasperrt/.node-gyp/0.10.33/src/node_version.h \
/home/kasperrt/.node-gyp/0.10.33/src/node_buffer.h ../ext/bson.h \
/home/kasperrt/.node-gyp/0.10.33/src/node_object_wrap.h \
../node_modules/nan/nan.h ../node_modules/nan/nan_new.h \
../node_modules/nan/nan_implementation_pre_12_inl.h
../ext/bson.cc:
/home/kasperrt/.node-gyp/0.10.33/deps/v8/include/v8.h:
/home/kasperrt/.node-gyp/0.10.33/deps/v8/include/v8stdint.h:
/home/kasperrt/.node-gyp/0.10.33/src/node.h:
/home/kasperrt/.node-gyp/0.10.33/deps/uv/include/uv.h:
/home/kasperrt/.node-gyp/0.10.33/deps/uv/include/uv-private/uv-unix.h:
/home/kasperrt/.node-gyp/0.10.33/deps/uv/include/uv-private/ngx-queue.h:
/home/kasperrt/.node-gyp/0.10.33/deps/uv/include/uv-private/uv-linux.h:
/home/kasperrt/.node-gyp/0.10.33/src/node_object_wrap.h:
/home/kasperrt/.node-gyp/0.10.33/src/node.h:
/home/kasperrt/.node-gyp/0.10.33/src/node_version.h:
/home/kasperrt/.node-gyp/0.10.33/src/node_buffer.h:
../ext/bson.h:
/home/kasperrt/.node-gyp/0.10.33/src/node_object_wrap.h:
../node_modules/nan/nan.h:
../node_modules/nan/nan_new.h:
../node_modules/nan/nan_implementation_pre_12_inl.h:

View File

@@ -0,0 +1,6 @@
# This file is generated by gyp; do not edit.
export builddir_name ?= ./build/.
.PHONY: all
all:
$(MAKE) bson

View File

@@ -0,0 +1,130 @@
# This file is generated by gyp; do not edit.
TOOLSET := target
TARGET := bson
DEFS_Debug := \
'-D_LARGEFILE_SOURCE' \
'-D_FILE_OFFSET_BITS=64' \
'-DBUILDING_NODE_EXTENSION' \
'-DDEBUG' \
'-D_DEBUG'
# Flags passed to all source files.
CFLAGS_Debug := \
-fPIC \
-Wall \
-Wextra \
-Wno-unused-parameter \
-pthread \
-m64 \
-g \
-O0
# Flags passed to only C files.
CFLAGS_C_Debug :=
# Flags passed to only C++ files.
CFLAGS_CC_Debug := \
-fno-rtti
INCS_Debug := \
-I/home/kasperrt/.node-gyp/0.10.33/src \
-I/home/kasperrt/.node-gyp/0.10.33/deps/uv/include \
-I/home/kasperrt/.node-gyp/0.10.33/deps/v8/include \
-I$(srcdir)/node_modules/nan
DEFS_Release := \
'-D_LARGEFILE_SOURCE' \
'-D_FILE_OFFSET_BITS=64' \
'-DBUILDING_NODE_EXTENSION'
# Flags passed to all source files.
CFLAGS_Release := \
-fPIC \
-Wall \
-Wextra \
-Wno-unused-parameter \
-pthread \
-m64 \
-O2 \
-fno-strict-aliasing \
-fno-tree-vrp \
-fno-omit-frame-pointer
# Flags passed to only C files.
CFLAGS_C_Release :=
# Flags passed to only C++ files.
CFLAGS_CC_Release := \
-fno-rtti
INCS_Release := \
-I/home/kasperrt/.node-gyp/0.10.33/src \
-I/home/kasperrt/.node-gyp/0.10.33/deps/uv/include \
-I/home/kasperrt/.node-gyp/0.10.33/deps/v8/include \
-I$(srcdir)/node_modules/nan
OBJS := \
$(obj).target/$(TARGET)/ext/bson.o
# Add to the list of files we specially track dependencies for.
all_deps += $(OBJS)
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.
$(OBJS): TOOLSET := $(TOOLSET)
$(OBJS): GYP_CFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_C_$(BUILDTYPE))
$(OBJS): GYP_CXXFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_CC_$(BUILDTYPE))
# Suffix rules, putting all outputs into $(obj).
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
# Try building from generated source, too.
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
# End of this set of suffix rules
### Rules for final target.
LDFLAGS_Debug := \
-pthread \
-rdynamic \
-m64
LDFLAGS_Release := \
-pthread \
-rdynamic \
-m64
LIBS :=
$(obj).target/bson.node: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))
$(obj).target/bson.node: LIBS := $(LIBS)
$(obj).target/bson.node: TOOLSET := $(TOOLSET)
$(obj).target/bson.node: $(OBJS) FORCE_DO_CMD
$(call do_cmd,solink_module)
all_deps += $(obj).target/bson.node
# Add target alias
.PHONY: bson
bson: $(builddir)/bson.node
# Copy this to the executable output path.
$(builddir)/bson.node: TOOLSET := $(TOOLSET)
$(builddir)/bson.node: $(obj).target/bson.node FORCE_DO_CMD
$(call do_cmd,copy)
all_deps += $(builddir)/bson.node
# Short alias for building this executable.
.PHONY: bson.node
bson.node: $(obj).target/bson.node $(builddir)/bson.node
# Add executable to "all" target.
.PHONY: all
all: $(builddir)/bson.node

View File

@@ -0,0 +1,122 @@
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 46,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/usr",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/kasperrt/.node-gyp/0.10.33",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/1.4.28 node/v0.10.33 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "1000",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "Infinity",
"userconfig": "/home/kasperrt/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/zsh",
"prefix": "/usr",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/kasperrt/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "",
"node_version": "0.10.33",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/kasperrt/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": "",
"spin": "true"
}
}

Some files were not shown because too many files have changed in this diff Show More