mirror of
https://github.com/KevinMidboe/zoff.git
synced 2026-01-01 15:16:17 +00:00
Started on node.js+socket.io+mongoDB on the backend for more responsivnes
This commit is contained in:
347
server/node_modules/mongodb/lib/admin.js
generated
vendored
Executable file
347
server/node_modules/mongodb/lib/admin.js
generated
vendored
Executable file
@@ -0,0 +1,347 @@
|
||||
"use strict";
|
||||
|
||||
var toError = require('./utils').toError;
|
||||
|
||||
/**
|
||||
* @fileOverview The **Admin** class is an internal class that allows convenient access to
|
||||
* the admin functionality and commands for MongoDB.
|
||||
*
|
||||
* **ADMIN Cannot directly be instantiated**
|
||||
* @example
|
||||
* var MongoClient = require('mongodb').MongoClient,
|
||||
* test = require('assert');
|
||||
* // Connection url
|
||||
* var url = 'mongodb://localhost:27017/test';
|
||||
* // Connect using MongoClient
|
||||
* MongoClient.connect(url, function(err, db) {
|
||||
* // Use the admin database for the operation
|
||||
* var adminDb = db.admin();
|
||||
*
|
||||
* // List all the available databases
|
||||
* adminDb.listDatabases(function(err, dbs) {
|
||||
* test.equal(null, err);
|
||||
* test.ok(dbs.databases.length > 0);
|
||||
* db.close();
|
||||
* });
|
||||
* });
|
||||
*/
|
||||
|
||||
/**
|
||||
* Create a new Admin instance (INTERNAL TYPE, do not instantiate directly)
|
||||
* @class
|
||||
* @return {Admin} a collection instance.
|
||||
*/
|
||||
var Admin = function(db, topology) {
|
||||
if(!(this instanceof Admin)) return new Admin(db, topology);
|
||||
var self = this;
|
||||
|
||||
// Internal state
|
||||
this.s = {
|
||||
db: db
|
||||
, topology: topology
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback Admin~resultCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {object} result The result object if the command was executed successfully.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Execute a command
|
||||
* @method
|
||||
* @param {object} command The command hash
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
|
||||
* @param {number} [options.maxTimeMS=null] Number of milliseconds to wait before aborting the query.
|
||||
* @param {Admin~resultCallback} callback The command result callback
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.command = function(command, options, callback) {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
callback = args.pop();
|
||||
options = args.length ? args.shift() : {};
|
||||
|
||||
// Execute a command
|
||||
this.s.db.executeDbAdminCommand(command, options, function(err, doc) {
|
||||
return callback != null ? callback(err, doc) : null;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the server information for the current
|
||||
* instance of the db client
|
||||
*
|
||||
* @param {Admin~resultCallback} callback The command result callback
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.buildInfo = function(callback) {
|
||||
this.serverInfo(callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the server information for the current
|
||||
* instance of the db client
|
||||
*
|
||||
* @param {Admin~resultCallback} callback The command result callback
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.serverInfo = function(callback) {
|
||||
this.s.db.executeDbAdminCommand({buildinfo:1}, function(err, doc) {
|
||||
if(err != null) return callback(err, null);
|
||||
return callback(null, doc);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve this db's server status.
|
||||
*
|
||||
* @param {Admin~resultCallback} callback The command result callback
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.serverStatus = function(callback) {
|
||||
var self = this;
|
||||
|
||||
this.s.db.executeDbAdminCommand({serverStatus: 1}, function(err, doc) {
|
||||
if(err == null && doc.ok === 1) {
|
||||
callback(null, doc);
|
||||
} else {
|
||||
if(err) return callback(err, false);
|
||||
return callback(toError(doc), false);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieve the current profiling Level for MongoDB
|
||||
*
|
||||
* @param {Admin~resultCallback} callback The command result callback
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.profilingLevel = function(callback) {
|
||||
var self = this;
|
||||
|
||||
this.s.db.executeDbAdminCommand({profile:-1}, function(err, doc) {
|
||||
doc = doc;
|
||||
|
||||
if(err == null && doc.ok === 1) {
|
||||
var was = doc.was;
|
||||
if(was == 0) return callback(null, "off");
|
||||
if(was == 1) return callback(null, "slow_only");
|
||||
if(was == 2) return callback(null, "all");
|
||||
return callback(new Error("Error: illegal profiling level value " + was), null);
|
||||
} else {
|
||||
err != null ? callback(err, null) : callback(new Error("Error with profile command"), null);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Ping the MongoDB server and retrieve results
|
||||
*
|
||||
* @param {Admin~resultCallback} callback The command result callback
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.ping = function(options, callback) {
|
||||
var args = Array.prototype.slice.call(arguments, 0);
|
||||
this.s.db.executeDbAdminCommand({ping: 1}, args.pop());
|
||||
}
|
||||
|
||||
/**
|
||||
* Authenticate a user against the server.
|
||||
* @method
|
||||
* @param {string} username The username.
|
||||
* @param {string} [password] The password.
|
||||
* @param {Admin~resultCallback} callback The command result callback
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.authenticate = function(username, password, callback) {
|
||||
this.s.db.authenticate(username, password, {authdb: 'admin'}, function(err, doc) {
|
||||
return callback(err, doc);
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Logout user from server, fire off on all connections and remove all auth info
|
||||
* @method
|
||||
* @param {Admin~resultCallback} callback The command result callback
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.logout = function(callback) {
|
||||
this.s.db.logout({authdb: 'admin'}, function(err, doc) {
|
||||
return callback(err, doc);
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a user to the database.
|
||||
* @method
|
||||
* @param {string} username The username.
|
||||
* @param {string} password The password.
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {(number|string)} [options.w=null] The write concern.
|
||||
* @param {number} [options.wtimeout=null] The write concern timeout.
|
||||
* @param {boolean} [options.j=false] Specify a journal write concern.
|
||||
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
|
||||
* @param {object} [options.customData=null] Custom data associated with the user (only Mongodb 2.6 or higher)
|
||||
* @param {object[]} [options.roles=null] Roles associated with the created user (only Mongodb 2.6 or higher)
|
||||
* @param {Admin~resultCallback} callback The command result callback
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.addUser = function(username, password, options, callback) {
|
||||
var args = Array.prototype.slice.call(arguments, 2);
|
||||
callback = args.pop();
|
||||
options = args.length ? args.shift() : {};
|
||||
// Set the db name to admin
|
||||
options.dbName = 'admin';
|
||||
// Add user
|
||||
this.s.db.addUser(username, password, options, function(err, doc) {
|
||||
return callback(err, doc);
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a user from a database
|
||||
* @method
|
||||
* @param {string} username The username.
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {(number|string)} [options.w=null] The write concern.
|
||||
* @param {number} [options.wtimeout=null] The write concern timeout.
|
||||
* @param {boolean} [options.j=false] Specify a journal write concern.
|
||||
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
|
||||
* @param {Admin~resultCallback} callback The command result callback
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.removeUser = function(username, options, callback) {
|
||||
var self = this;
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
callback = args.pop();
|
||||
options = args.length ? args.shift() : {};
|
||||
options.dbName = 'admin';
|
||||
|
||||
this.s.db.removeUser(username, options, function(err, doc) {
|
||||
return callback(err, doc);
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the current profiling level of MongoDB
|
||||
*
|
||||
* @param {string} level The new profiling level (off, slow_only, all).
|
||||
* @param {Admin~resultCallback} callback The command result callback.
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.setProfilingLevel = function(level, callback) {
|
||||
var self = this;
|
||||
var command = {};
|
||||
var profile = 0;
|
||||
|
||||
if(level == "off") {
|
||||
profile = 0;
|
||||
} else if(level == "slow_only") {
|
||||
profile = 1;
|
||||
} else if(level == "all") {
|
||||
profile = 2;
|
||||
} else {
|
||||
return callback(new Error("Error: illegal profiling level value " + level));
|
||||
}
|
||||
|
||||
// Set up the profile number
|
||||
command['profile'] = profile;
|
||||
|
||||
this.s.db.executeDbAdminCommand(command, function(err, doc) {
|
||||
doc = doc;
|
||||
|
||||
if(err == null && doc.ok === 1)
|
||||
return callback(null, level);
|
||||
return err != null ? callback(err, null) : callback(new Error("Error with profile command"), null);
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrive the current profiling information for MongoDB
|
||||
*
|
||||
* @param {Admin~resultCallback} callback The command result callback.
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.profilingInfo = function(callback) {
|
||||
try {
|
||||
this.s.topology.cursor("admin.system.profile", { find: 'system.profile', query: {}}, {}).toArray(callback);
|
||||
} catch (err) {
|
||||
return callback(err, null);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Validate an existing collection
|
||||
*
|
||||
* @param {string} collectionName The name of the collection to validate.
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {Admin~resultCallback} callback The command result callback.
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.validateCollection = function(collectionName, options, callback) {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
callback = args.pop();
|
||||
options = args.length ? args.shift() : {};
|
||||
|
||||
var self = this;
|
||||
var command = {validate: collectionName};
|
||||
var keys = Object.keys(options);
|
||||
|
||||
// Decorate command with extra options
|
||||
for(var i = 0; i < keys.length; i++) {
|
||||
if(options.hasOwnProperty(keys[i])) {
|
||||
command[keys[i]] = options[keys[i]];
|
||||
}
|
||||
}
|
||||
|
||||
this.s.db.command(command, function(err, doc) {
|
||||
if(err != null) return callback(err, null);
|
||||
|
||||
if(doc.ok === 0)
|
||||
return callback(new Error("Error with validate command"), null);
|
||||
if(doc.result != null && doc.result.constructor != String)
|
||||
return callback(new Error("Error with validation data"), null);
|
||||
if(doc.result != null && doc.result.match(/exception|corrupt/) != null)
|
||||
return callback(new Error("Error: invalid collection " + collectionName), null);
|
||||
if(doc.valid != null && !doc.valid)
|
||||
return callback(new Error("Error: invalid collection " + collectionName), null);
|
||||
|
||||
return callback(null, doc);
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* List the available databases
|
||||
*
|
||||
* @param {Admin~resultCallback} callback The command result callback.
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.listDatabases = function(callback) {
|
||||
this.s.db.executeDbAdminCommand({listDatabases:1}, {}, function(err, doc) {
|
||||
if(err != null) return callback(err, null);
|
||||
return callback(null, doc);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get ReplicaSet status
|
||||
*
|
||||
* @param {Admin~resultCallback} callback The command result callback.
|
||||
* @return {null}
|
||||
*/
|
||||
Admin.prototype.replSetGetStatus = function(callback) {
|
||||
var self = this;
|
||||
|
||||
this.s.db.executeDbAdminCommand({replSetGetStatus:1}, function(err, doc) {
|
||||
if(err == null && doc.ok === 1)
|
||||
return callback(null, doc);
|
||||
if(err) return callback(err, false);
|
||||
return callback(toError(doc), false);
|
||||
});
|
||||
};
|
||||
|
||||
module.exports = Admin;
|
||||
397
server/node_modules/mongodb/lib/aggregation_cursor.js
generated
vendored
Executable file
397
server/node_modules/mongodb/lib/aggregation_cursor.js
generated
vendored
Executable file
@@ -0,0 +1,397 @@
|
||||
"use strict";
|
||||
|
||||
var inherits = require('util').inherits
|
||||
, f = require('util').format
|
||||
, toError = require('./utils').toError
|
||||
, getSingleProperty = require('./utils').getSingleProperty
|
||||
, formattedOrderClause = require('./utils').formattedOrderClause
|
||||
, handleCallback = require('./utils').handleCallback
|
||||
, Logger = require('mongodb-core').Logger
|
||||
, EventEmitter = require('events').EventEmitter
|
||||
, ReadPreference = require('./read_preference')
|
||||
, MongoError = require('mongodb-core').MongoError
|
||||
, Readable = require('stream').Readable || require('readable-stream').Readable
|
||||
// , CoreCursor = require('mongodb-core').Cursor
|
||||
, CoreCursor = require('./cursor')
|
||||
, Query = require('mongodb-core').Query
|
||||
, CoreReadPreference = require('mongodb-core').ReadPreference;
|
||||
|
||||
/**
|
||||
* @fileOverview The **AggregationCursor** class is an internal class that embodies an aggregation cursor on MongoDB
|
||||
* allowing for iteration over the results returned from the underlying query. It supports
|
||||
* one by one document iteration, conversion to an array or can be iterated as a Node 0.10.X
|
||||
* or higher stream
|
||||
*
|
||||
* **AGGREGATIONCURSOR Cannot directly be instantiated**
|
||||
* @example
|
||||
* var MongoClient = require('mongodb').MongoClient,
|
||||
* test = require('assert');
|
||||
* // Connection url
|
||||
* var url = 'mongodb://localhost:27017/test';
|
||||
* // Connect using MongoClient
|
||||
* MongoClient.connect(url, function(err, db) {
|
||||
* // Create a collection we want to drop later
|
||||
* var col = db.collection('createIndexExample1');
|
||||
* // Insert a bunch of documents
|
||||
* col.insert([{a:1, b:1}
|
||||
* , {a:2, b:2}, {a:3, b:3}
|
||||
* , {a:4, b:4}], {w:1}, function(err, result) {
|
||||
* test.equal(null, err);
|
||||
*
|
||||
* // Show that duplicate records got dropped
|
||||
* col.aggregation({}, {cursor: {}}).toArray(function(err, items) {
|
||||
* test.equal(null, err);
|
||||
* test.equal(4, items.length);
|
||||
* db.close();
|
||||
* });
|
||||
* });
|
||||
* });
|
||||
*/
|
||||
|
||||
/**
|
||||
* Namespace provided by the browser.
|
||||
* @external Readable
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates a new Aggregation Cursor instance (INTERNAL TYPE, do not instantiate directly)
|
||||
* @class
|
||||
* @extends external:Readable
|
||||
* @fires AggregationCursor#data
|
||||
* @fires AggregationCursor#end
|
||||
* @fires AggregationCursor#close
|
||||
* @fires AggregationCursor#readable
|
||||
* @return {AggregationCursor} an AggregationCursor instance.
|
||||
*/
|
||||
var AggregationCursor = function(bson, ns, cmd, options, topology, topologyOptions) {
|
||||
CoreCursor.apply(this, Array.prototype.slice.call(arguments, 0));
|
||||
var self = this;
|
||||
var state = AggregationCursor.INIT;
|
||||
var streamOptions = {};
|
||||
|
||||
// MaxTimeMS
|
||||
var maxTimeMS = null;
|
||||
|
||||
// Set up
|
||||
Readable.call(this, {objectMode: true});
|
||||
|
||||
// Internal state
|
||||
this.s = {
|
||||
// MaxTimeMS
|
||||
maxTimeMS: maxTimeMS
|
||||
// State
|
||||
, state: state
|
||||
// Stream options
|
||||
, streamOptions: streamOptions
|
||||
// BSON
|
||||
, bson: bson
|
||||
// Namespae
|
||||
, ns: ns
|
||||
// Command
|
||||
, cmd: cmd
|
||||
// Options
|
||||
, options: options
|
||||
// Topology
|
||||
, topology: topology
|
||||
// Topology Options
|
||||
, topologyOptions: topologyOptions
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* AggregationCursor stream data event, fired for each document in the cursor.
|
||||
*
|
||||
* @event AggregationCursor#data
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* AggregationCursor stream end event
|
||||
*
|
||||
* @event AggregationCursor#end
|
||||
* @type {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* AggregationCursor stream close event
|
||||
*
|
||||
* @event AggregationCursor#close
|
||||
* @type {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* AggregationCursor stream readable event
|
||||
*
|
||||
* @event AggregationCursor#readable
|
||||
* @type {null}
|
||||
*/
|
||||
|
||||
// // Extend the Cursor
|
||||
// inherits(AggregationCursor, CoreCursor);
|
||||
|
||||
// Inherit from Readable
|
||||
inherits(AggregationCursor, Readable);
|
||||
|
||||
// Extend the Cursor
|
||||
for(var name in CoreCursor.prototype) {
|
||||
AggregationCursor.prototype[name] = CoreCursor.prototype[name];
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the batch size for the cursor.
|
||||
* @method
|
||||
* @param {number} value The batchSize for the cursor.
|
||||
* @throws {MongoError}
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
AggregationCursor.prototype.batchSize = function(value) {
|
||||
if(this.s.state == AggregationCursor.CLOSED || this.isDead()) throw new MongoError("Cursor is closed");
|
||||
if(typeof value != 'number') throw new MongoError("batchSize requires an integer");
|
||||
if(this.s.cmd.cursor) this.s.cmd.cursor.batchSize = value;
|
||||
this.setCursorBatchSize(value);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a geoNear stage to the aggregation pipeline
|
||||
* @method
|
||||
* @param {object} document The geoNear stage document.
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
AggregationCursor.prototype.geoNear = function(document) {
|
||||
this.s.cmd.pipeline.push({$geoNear: document});
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a group stage to the aggregation pipeline
|
||||
* @method
|
||||
* @param {object} document The group stage document.
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
AggregationCursor.prototype.group = function(document) {
|
||||
this.s.cmd.pipeline.push({$group: document});
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a limit stage to the aggregation pipeline
|
||||
* @method
|
||||
* @param {number} value The state limit value.
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
AggregationCursor.prototype.limit = function(value) {
|
||||
this.s.cmd.pipeline.push({$limit: value});
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a match stage to the aggregation pipeline
|
||||
* @method
|
||||
* @param {object} document The match stage document.
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
AggregationCursor.prototype.match = function(document) {
|
||||
this.s.cmd.pipeline.push({$match: document});
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a maxTimeMS stage to the aggregation pipeline
|
||||
* @method
|
||||
* @param {number} value The state maxTimeMS value.
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
AggregationCursor.prototype.maxTimeMS = function(value) {
|
||||
if(this.s.topology.lastIsMaster().minWireVersion > 2) {
|
||||
this.s.cmd.maxTimeMS = value;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a out stage to the aggregation pipeline
|
||||
* @method
|
||||
* @param {number} destination The destination name.
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
AggregationCursor.prototype.out = function(destination) {
|
||||
this.s.cmd.pipeline.push({$out: destination});
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a project stage to the aggregation pipeline
|
||||
* @method
|
||||
* @param {object} document The project stage document.
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
AggregationCursor.prototype.project = function(document) {
|
||||
this.s.cmd.pipeline.push({$project: document});
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a redact stage to the aggregation pipeline
|
||||
* @method
|
||||
* @param {object} document The redact stage document.
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
AggregationCursor.prototype.redact = function(document) {
|
||||
this.s.cmd.pipeline.push({$redact: document});
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a skip stage to the aggregation pipeline
|
||||
* @method
|
||||
* @param {number} value The state skip value.
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
AggregationCursor.prototype.skip = function(value) {
|
||||
this.s.cmd.pipeline.push({$skip: value});
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a sort stage to the aggregation pipeline
|
||||
* @method
|
||||
* @param {object} document The sort stage document.
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
AggregationCursor.prototype.sort = function(document) {
|
||||
this.s.cmd.pipeline.push({$sort: document});
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a unwind stage to the aggregation pipeline
|
||||
* @method
|
||||
* @param {number} field The unwind field name.
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
AggregationCursor.prototype.unwind = function(field) {
|
||||
this.s.cmd.pipeline.push({$unwind: field});
|
||||
return this;
|
||||
}
|
||||
|
||||
AggregationCursor.prototype.get = AggregationCursor.prototype.toArray;
|
||||
|
||||
/**
|
||||
* Get the next available document from the cursor, returns null if no more documents are available.
|
||||
* @function AggregationCursor.prototype.next
|
||||
* @param {AggregationCursor~resultCallback} callback The result callback.
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Set the new batchSize of the cursor
|
||||
* @function AggregationCursor.prototype.setBatchSize
|
||||
* @param {number} value The new batchSize for the cursor
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get the batchSize of the cursor
|
||||
* @function AggregationCursor.prototype.batchSize
|
||||
* @param {number} value The current batchSize for the cursor
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback AggregationCursor~toArrayResultCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {object[]} documents All the documents the satisfy the cursor.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Returns an array of documents. The caller is responsible for making sure that there
|
||||
* is enough memory to store the results. Note that the array only contain partial
|
||||
* results when this cursor had been previouly accessed. In that case,
|
||||
* cursor.rewind() can be used to reset the cursor.
|
||||
* @method AggregationCursor.prototype.toArray
|
||||
* @param {AggregationCursor~toArrayResultCallback} callback The result callback.
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback AggregationCursor~resultCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {(object|null)} result The result object if the command was executed successfully.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Iterates over all the documents for this cursor. As with **{cursor.toArray}**,
|
||||
* not all of the elements will be iterated if this cursor had been previouly accessed.
|
||||
* In that case, **{cursor.rewind}** can be used to reset the cursor. However, unlike
|
||||
* **{cursor.toArray}**, the cursor will only hold a maximum of batch size elements
|
||||
* at any given time if batch size is specified. Otherwise, the caller is responsible
|
||||
* for making sure that the entire result can fit the memory.
|
||||
* @method AggregationCursor.prototype.each
|
||||
* @param {AggregationCursor~resultCallback} callback The result callback.
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Close the cursor, sending a KillCursor command and emitting close.
|
||||
* @method AggregationCursor.prototype.close
|
||||
* @param {AggregationCursor~resultCallback} [callback] The result callback.
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Is the cursor closed
|
||||
* @method AggregationCursor.prototype.isClosed
|
||||
* @return {boolean}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Execute the explain for the cursor
|
||||
* @method AggregationCursor.prototype.explain
|
||||
* @param {AggregationCursor~resultCallback} [callback] The result callback.
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Clone the cursor
|
||||
* @function AggregationCursor.prototype.clone
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Resets the cursor
|
||||
* @function AggregationCursor.prototype.rewind
|
||||
* @return {AggregationCursor}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The callback format for the forEach iterator method
|
||||
* @callback AggregationCursor~iteratorCallback
|
||||
* @param {Object} doc An emitted document for the iterator
|
||||
*/
|
||||
|
||||
/**
|
||||
* The callback error format for the forEach iterator method
|
||||
* @callback AggregationCursor~endCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Iterates over all the documents for this cursor using the iterator, callback pattern.
|
||||
* @method AggregationCursor.prototype.forEach
|
||||
* @param {AggregationCursor~iteratorCallback} iterator The iteration callback.
|
||||
* @param {AggregationCursor~endCallback} callback The end callback.
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
AggregationCursor.INIT = 0;
|
||||
AggregationCursor.OPEN = 1;
|
||||
AggregationCursor.CLOSED = 2;
|
||||
|
||||
module.exports = AggregationCursor;
|
||||
393
server/node_modules/mongodb/lib/bulk/common.js
generated
vendored
Executable file
393
server/node_modules/mongodb/lib/bulk/common.js
generated
vendored
Executable file
@@ -0,0 +1,393 @@
|
||||
"use strict";
|
||||
|
||||
var utils = require('../utils');
|
||||
|
||||
// Error codes
|
||||
var UNKNOWN_ERROR = 8;
|
||||
var INVALID_BSON_ERROR = 22;
|
||||
var WRITE_CONCERN_ERROR = 64;
|
||||
var MULTIPLE_ERROR = 65;
|
||||
|
||||
// Insert types
|
||||
var INSERT = 1;
|
||||
var UPDATE = 2;
|
||||
var REMOVE = 3
|
||||
|
||||
|
||||
// Get write concern
|
||||
var writeConcern = function(target, col, options) {
|
||||
if(options.w != null || options.j != null || options.fsync != null) {
|
||||
target.writeConcern = options;
|
||||
} else if(col.writeConcern.w != null || col.writeConcern.j != null || col.writeConcern.fsync != null) {
|
||||
target.writeConcern = col.writeConcern;
|
||||
}
|
||||
|
||||
return target
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to define properties
|
||||
* @ignore
|
||||
*/
|
||||
var defineReadOnlyProperty = function(self, name, value) {
|
||||
Object.defineProperty(self, name, {
|
||||
enumerable: true
|
||||
, get: function() {
|
||||
return value;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Keeps the state of a unordered batch so we can rewrite the results
|
||||
* correctly after command execution
|
||||
* @ignore
|
||||
*/
|
||||
var Batch = function(batchType, originalZeroIndex) {
|
||||
this.originalZeroIndex = originalZeroIndex;
|
||||
this.currentIndex = 0;
|
||||
this.originalIndexes = [];
|
||||
this.batchType = batchType;
|
||||
this.operations = [];
|
||||
this.size = 0;
|
||||
this.sizeBytes = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps a legacy operation so we can correctly rewrite it's error
|
||||
* @ignore
|
||||
*/
|
||||
var LegacyOp = function(batchType, operation, index) {
|
||||
this.batchType = batchType;
|
||||
this.index = index;
|
||||
this.operation = operation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new BulkWriteResult instance (INTERNAL TYPE, do not instantiate directly)
|
||||
*
|
||||
* @class
|
||||
* @property {boolean} ok Did bulk operation correctly execute
|
||||
* @property {number} nInserted number of inserted documents
|
||||
* @property {number} nUpdated number of documents updated logically
|
||||
* @property {number} nUpserted Number of upserted documents
|
||||
* @property {number} nModified Number of documents updated physically on disk
|
||||
* @property {number} nRemoved Number of removed documents
|
||||
* @return {BulkWriteResult} a BulkWriteResult instance
|
||||
*/
|
||||
var BulkWriteResult = function(bulkResult) {
|
||||
defineReadOnlyProperty(this, "ok", bulkResult.ok);
|
||||
defineReadOnlyProperty(this, "nInserted", bulkResult.nInserted);
|
||||
defineReadOnlyProperty(this, "nUpserted", bulkResult.nUpserted);
|
||||
defineReadOnlyProperty(this, "nMatched", bulkResult.nMatched);
|
||||
defineReadOnlyProperty(this, "nModified", bulkResult.nModified);
|
||||
defineReadOnlyProperty(this, "nRemoved", bulkResult.nRemoved);
|
||||
|
||||
/**
|
||||
* Return an array of inserted ids
|
||||
*
|
||||
* @return {object[]}
|
||||
*/
|
||||
this.getInsertedIds = function() {
|
||||
return bulkResult.insertedIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an array of upserted ids
|
||||
*
|
||||
* @return {object[]}
|
||||
*/
|
||||
this.getUpsertedIds = function() {
|
||||
return bulkResult.upserted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the upserted id at position x
|
||||
*
|
||||
* @param {number} index the number of the upserted id to return, returns undefined if no result for passed in index
|
||||
* @return {object}
|
||||
*/
|
||||
this.getUpsertedIdAt = function(index) {
|
||||
return bulkResult.upserted[index];
|
||||
}
|
||||
|
||||
/**
|
||||
* Return raw internal result
|
||||
*
|
||||
* @return {object}
|
||||
*/
|
||||
this.getRawResponse = function() {
|
||||
return bulkResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the bulk operation contains a write error
|
||||
*
|
||||
* @return {boolean}
|
||||
*/
|
||||
this.hasWriteErrors = function() {
|
||||
return bulkResult.writeErrors.length > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of write errors off the bulk operation
|
||||
*
|
||||
* @return {number}
|
||||
*/
|
||||
this.getWriteErrorCount = function() {
|
||||
return bulkResult.writeErrors.length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a specific write error object
|
||||
*
|
||||
* @return {WriteError}
|
||||
*/
|
||||
this.getWriteErrorAt = function(index) {
|
||||
if(index < bulkResult.writeErrors.length) {
|
||||
return bulkResult.writeErrors[index];
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve all write errors
|
||||
*
|
||||
* @return {object[]}
|
||||
*/
|
||||
this.getWriteErrors = function() {
|
||||
return bulkResult.writeErrors;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve lastOp if available
|
||||
*
|
||||
* @return {object}
|
||||
*/
|
||||
this.getLastOp = function() {
|
||||
return bulkResult.lastOp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the write concern error if any
|
||||
*
|
||||
* @return {WriteConcernError}
|
||||
*/
|
||||
this.getWriteConcernError = function() {
|
||||
if(bulkResult.writeConcernErrors.length == 0) {
|
||||
return null;
|
||||
} else if(bulkResult.writeConcernErrors.length == 1) {
|
||||
// Return the error
|
||||
return bulkResult.writeConcernErrors[0];
|
||||
} else {
|
||||
|
||||
// Combine the errors
|
||||
var errmsg = "";
|
||||
for(var i = 0; i < bulkResult.writeConcernErrors.length; i++) {
|
||||
var err = bulkResult.writeConcernErrors[i];
|
||||
errmsg = errmsg + err.errmsg;
|
||||
|
||||
// TODO: Something better
|
||||
if(i == 0) errmsg = errmsg + " and ";
|
||||
}
|
||||
|
||||
return new WriteConcernError({ errmsg : errmsg, code : WRITE_CONCERN_ERROR });
|
||||
}
|
||||
}
|
||||
|
||||
this.toJSON = function() {
|
||||
return bulkResult;
|
||||
}
|
||||
|
||||
this.toString = function() {
|
||||
return "BulkWriteResult(" + this.toJSON(bulkResult) + ")";
|
||||
}
|
||||
|
||||
this.isOk = function() {
|
||||
return bulkResult.ok == 1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new WriteConcernError instance (INTERNAL TYPE, do not instantiate directly)
|
||||
*
|
||||
* @class
|
||||
* @property {number} code Write concern error code.
|
||||
* @property {string} errmsg Write concern error message.
|
||||
* @return {WriteConcernError} a WriteConcernError instance
|
||||
*/
|
||||
var WriteConcernError = function(err) {
|
||||
if(!(this instanceof WriteConcernError)) return new WriteConcernError(err);
|
||||
|
||||
// Define properties
|
||||
defineReadOnlyProperty(this, "code", err.code);
|
||||
defineReadOnlyProperty(this, "errmsg", err.errmsg);
|
||||
|
||||
this.toJSON = function() {
|
||||
return {code: err.code, errmsg: err.errmsg};
|
||||
}
|
||||
|
||||
this.toString = function() {
|
||||
return "WriteConcernError(" + err.errmsg + ")";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new WriteError instance (INTERNAL TYPE, do not instantiate directly)
|
||||
*
|
||||
* @class
|
||||
* @property {number} code Write concern error code.
|
||||
* @property {number} index Write concern error original bulk operation index.
|
||||
* @property {string} errmsg Write concern error message.
|
||||
* @return {WriteConcernError} a WriteConcernError instance
|
||||
*/
|
||||
var WriteError = function(err) {
|
||||
if(!(this instanceof WriteError)) return new WriteError(err);
|
||||
|
||||
// Define properties
|
||||
defineReadOnlyProperty(this, "code", err.code);
|
||||
defineReadOnlyProperty(this, "index", err.index);
|
||||
defineReadOnlyProperty(this, "errmsg", err.errmsg);
|
||||
|
||||
//
|
||||
// Define access methods
|
||||
this.getOperation = function() {
|
||||
return err.op;
|
||||
}
|
||||
|
||||
this.toJSON = function() {
|
||||
return {code: err.code, index: err.index, errmsg: err.errmsg, op: err.op};
|
||||
}
|
||||
|
||||
this.toString = function() {
|
||||
return "WriteError(" + JSON.stringify(this.toJSON()) + ")";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges results into shared data structure
|
||||
* @ignore
|
||||
*/
|
||||
var mergeBatchResults = function(ordered, batch, bulkResult, err, result) {
|
||||
// If we have an error set the result to be the err object
|
||||
if(err) {
|
||||
result = err;
|
||||
} else if(result && result.result) {
|
||||
result = result.result;
|
||||
} else if(result == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Do we have a top level error stop processing and return
|
||||
if(result.ok == 0 && bulkResult.ok == 1) {
|
||||
bulkResult.ok = 0;
|
||||
// bulkResult.error = utils.toError(result);
|
||||
var writeError = {
|
||||
index: 0
|
||||
, code: result.code || 0
|
||||
, errmsg: result.message
|
||||
, op: batch.operations[0]
|
||||
};
|
||||
|
||||
bulkResult.writeErrors.push(new WriteError(writeError));
|
||||
return;
|
||||
} else if(result.ok == 0 && bulkResult.ok == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Add lastop if available
|
||||
if(result.lastOp) {
|
||||
bulkResult.lastOp = result.lastOp;
|
||||
}
|
||||
|
||||
// If we have an insert Batch type
|
||||
if(batch.batchType == INSERT && result.n) {
|
||||
bulkResult.nInserted = bulkResult.nInserted + result.n;
|
||||
}
|
||||
|
||||
// If we have an insert Batch type
|
||||
if(batch.batchType == REMOVE && result.n) {
|
||||
bulkResult.nRemoved = bulkResult.nRemoved + result.n;
|
||||
}
|
||||
|
||||
var nUpserted = 0;
|
||||
|
||||
// We have an array of upserted values, we need to rewrite the indexes
|
||||
if(Array.isArray(result.upserted)) {
|
||||
nUpserted = result.upserted.length;
|
||||
|
||||
for(var i = 0; i < result.upserted.length; i++) {
|
||||
bulkResult.upserted.push({
|
||||
index: result.upserted[i].index + batch.originalZeroIndex
|
||||
, _id: result.upserted[i]._id
|
||||
});
|
||||
}
|
||||
} else if(result.upserted) {
|
||||
|
||||
nUpserted = 1;
|
||||
|
||||
bulkResult.upserted.push({
|
||||
index: batch.originalZeroIndex
|
||||
, _id: result.upserted
|
||||
});
|
||||
}
|
||||
|
||||
// If we have an update Batch type
|
||||
if(batch.batchType == UPDATE && result.n) {
|
||||
var nModified = result.nModified;
|
||||
bulkResult.nUpserted = bulkResult.nUpserted + nUpserted;
|
||||
bulkResult.nMatched = bulkResult.nMatched + (result.n - nUpserted);
|
||||
|
||||
if(typeof nModified == 'number') {
|
||||
bulkResult.nModified = bulkResult.nModified + nModified;
|
||||
} else {
|
||||
bulkResult.nModified = null;
|
||||
}
|
||||
}
|
||||
|
||||
if(Array.isArray(result.writeErrors)) {
|
||||
for(var i = 0; i < result.writeErrors.length; i++) {
|
||||
|
||||
var writeError = {
|
||||
index: batch.originalZeroIndex + result.writeErrors[i].index
|
||||
, code: result.writeErrors[i].code
|
||||
, errmsg: result.writeErrors[i].errmsg
|
||||
, op: batch.operations[result.writeErrors[i].index]
|
||||
};
|
||||
|
||||
bulkResult.writeErrors.push(new WriteError(writeError));
|
||||
}
|
||||
}
|
||||
|
||||
if(result.writeConcernError) {
|
||||
bulkResult.writeConcernErrors.push(new WriteConcernError(result.writeConcernError));
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Clone the options
|
||||
var cloneOptions = function(options) {
|
||||
var clone = {};
|
||||
var keys = Object.keys(options);
|
||||
for(var i = 0; i < keys.length; i++) {
|
||||
clone[keys[i]] = options[keys[i]];
|
||||
}
|
||||
|
||||
return clone;
|
||||
}
|
||||
|
||||
// Exports symbols
|
||||
exports.BulkWriteResult = BulkWriteResult;
|
||||
exports.WriteError = WriteError;
|
||||
exports.Batch = Batch;
|
||||
exports.LegacyOp = LegacyOp;
|
||||
exports.mergeBatchResults = mergeBatchResults;
|
||||
exports.cloneOptions = cloneOptions;
|
||||
exports.writeConcern = writeConcern;
|
||||
exports.INVALID_BSON_ERROR = INVALID_BSON_ERROR;
|
||||
exports.WRITE_CONCERN_ERROR = WRITE_CONCERN_ERROR;
|
||||
exports.MULTIPLE_ERROR = MULTIPLE_ERROR;
|
||||
exports.UNKNOWN_ERROR = UNKNOWN_ERROR;
|
||||
exports.INSERT = INSERT;
|
||||
exports.UPDATE = UPDATE;
|
||||
exports.REMOVE = REMOVE;
|
||||
470
server/node_modules/mongodb/lib/bulk/ordered.js
generated
vendored
Executable file
470
server/node_modules/mongodb/lib/bulk/ordered.js
generated
vendored
Executable file
@@ -0,0 +1,470 @@
|
||||
"use strict";
|
||||
|
||||
var common = require('./common')
|
||||
, utils = require('../utils')
|
||||
, toError = require('../utils').toError
|
||||
, f = require('util').format
|
||||
, shallowClone = utils.shallowClone
|
||||
, WriteError = common.WriteError
|
||||
, BulkWriteResult = common.BulkWriteResult
|
||||
, LegacyOp = common.LegacyOp
|
||||
, ObjectID = require('mongodb-core').BSON.ObjectID
|
||||
, Batch = common.Batch
|
||||
, mergeBatchResults = common.mergeBatchResults;
|
||||
|
||||
/**
|
||||
* Create a FindOperatorsOrdered instance (INTERNAL TYPE, do not instantiate directly)
|
||||
* @class
|
||||
* @return {FindOperatorsOrdered} a FindOperatorsOrdered instance.
|
||||
*/
|
||||
var FindOperatorsOrdered = function(self) {
|
||||
this.s = self.s;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single update document to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} doc update operations
|
||||
* @throws {MongoError}
|
||||
* @return {OrderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsOrdered.prototype.update = function(updateDocument) {
|
||||
// Perform upsert
|
||||
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
|
||||
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector
|
||||
, u: updateDocument
|
||||
, multi: true
|
||||
, upsert: upsert
|
||||
}
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the update document to the list
|
||||
return addToOperationsList(this, common.UPDATE, document);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single update one document to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} doc update operations
|
||||
* @throws {MongoError}
|
||||
* @return {OrderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsOrdered.prototype.updateOne = function(updateDocument) {
|
||||
// Perform upsert
|
||||
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
|
||||
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector
|
||||
, u: updateDocument
|
||||
, multi: false
|
||||
, upsert: upsert
|
||||
}
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the update document to the list
|
||||
return addToOperationsList(this, common.UPDATE, document);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a replace one operation to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} doc the new document to replace the existing one with
|
||||
* @throws {MongoError}
|
||||
* @return {OrderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsOrdered.prototype.replaceOne = function(updateDocument) {
|
||||
this.updateOne(updateDocument);
|
||||
}
|
||||
|
||||
/**
|
||||
* Upsert modifier for update bulk operation
|
||||
*
|
||||
* @method
|
||||
* @throws {MongoError}
|
||||
* @return {FindOperatorsOrdered}
|
||||
*/
|
||||
FindOperatorsOrdered.prototype.upsert = function() {
|
||||
this.s.currentOp.upsert = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a remove one operation to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @throws {MongoError}
|
||||
* @return {OrderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsOrdered.prototype.deleteOne = function() {
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector
|
||||
, limit: 1
|
||||
}
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the remove document to the list
|
||||
return addToOperationsList(this, common.REMOVE, document);
|
||||
}
|
||||
|
||||
// Backward compatibility
|
||||
FindOperatorsOrdered.prototype.removeOne = FindOperatorsOrdered.prototype.deleteOne;
|
||||
|
||||
/**
|
||||
* Add a remove operation to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @throws {MongoError}
|
||||
* @return {OrderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsOrdered.prototype.delete = function() {
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector
|
||||
, limit: 0
|
||||
}
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the remove document to the list
|
||||
return addToOperationsList(this, common.REMOVE, document);
|
||||
}
|
||||
|
||||
// Backward compatibility
|
||||
FindOperatorsOrdered.prototype.remove = FindOperatorsOrdered.prototype.delete;
|
||||
|
||||
// Add to internal list of documents
|
||||
var addToOperationsList = function(_self, docType, document) {
|
||||
// Get the bsonSize
|
||||
var bsonSize = _self.s.bson.calculateObjectSize(document, false);
|
||||
|
||||
// Throw error if the doc is bigger than the max BSON size
|
||||
if(bsonSize >= _self.s.maxBatchSizeBytes) throw toError("document is larger than the maximum size " + _self.s.maxBatchSizeBytes);
|
||||
// Create a new batch object if we don't have a current one
|
||||
if(_self.s.currentBatch == null) _self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
|
||||
|
||||
// Check if we need to create a new batch
|
||||
if(((_self.s.currentBatchSize + 1) >= _self.s.maxWriteBatchSize)
|
||||
|| ((_self.s.currentBatchSizeBytes + _self.s.currentBatchSizeBytes) >= _self.s.maxBatchSizeBytes)
|
||||
|| (_self.s.currentBatch.batchType != docType)) {
|
||||
// Save the batch to the execution stack
|
||||
_self.s.batches.push(_self.s.currentBatch);
|
||||
|
||||
// Create a new batch
|
||||
_self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
|
||||
|
||||
// Reset the current size trackers
|
||||
_self.s.currentBatchSize = 0;
|
||||
_self.s.currentBatchSizeBytes = 0;
|
||||
} else {
|
||||
// Update current batch size
|
||||
_self.s.currentBatchSize = _self.s.currentBatchSize + 1;
|
||||
_self.s.currentBatchSizeBytes = _self.s.currentBatchSizeBytes + bsonSize;
|
||||
}
|
||||
|
||||
if(docType == common.INSERT) {
|
||||
_self.s.bulkResult.insertedIds.push({index: _self.s.currentIndex, _id: document._id});
|
||||
}
|
||||
|
||||
// We have an array of documents
|
||||
if(Array.isArray(document)) {
|
||||
throw toError("operation passed in cannot be an Array");
|
||||
} else {
|
||||
_self.s.currentBatch.originalIndexes.push(_self.s.currentIndex);
|
||||
_self.s.currentBatch.operations.push(document)
|
||||
_self.s.currentIndex = _self.s.currentIndex + 1;
|
||||
}
|
||||
|
||||
// Return self
|
||||
return _self;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new OrderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
|
||||
* @class
|
||||
* @property {number} length Get the number of operations in the bulk.
|
||||
* @return {OrderedBulkOperation} a OrderedBulkOperation instance.
|
||||
*/
|
||||
function OrderedBulkOperation(topology, collection, options) {
|
||||
options = options == null ? {} : options;
|
||||
// TODO Bring from driver information in isMaster
|
||||
var self = this;
|
||||
var executed = false;
|
||||
|
||||
// Current item
|
||||
var currentOp = null;
|
||||
|
||||
// Handle to the bson serializer, used to calculate running sizes
|
||||
var bson = topology.bson;
|
||||
|
||||
// Namespace for the operation
|
||||
var namespace = collection.collectionName;
|
||||
|
||||
// Set max byte size
|
||||
var maxBatchSizeBytes = topology.isMasterDoc.maxBsonObjectSize;
|
||||
var maxWriteBatchSize = topology.isMasterDoc.maxWriteBatchSize || 1000;
|
||||
|
||||
// Get the capabilities
|
||||
var capabilities = topology.capabilities();
|
||||
|
||||
// Get the write concern
|
||||
var writeConcern = common.writeConcern(shallowClone(options), collection, options);
|
||||
|
||||
// Current batch
|
||||
var currentBatch = null;
|
||||
var currentIndex = 0;
|
||||
var currentBatchSize = 0;
|
||||
var currentBatchSizeBytes = 0;
|
||||
var batches = [];
|
||||
|
||||
// Final results
|
||||
var bulkResult = {
|
||||
ok: 1
|
||||
, writeErrors: []
|
||||
, writeConcernErrors: []
|
||||
, insertedIds: []
|
||||
, nInserted: 0
|
||||
, nUpserted: 0
|
||||
, nMatched: 0
|
||||
, nModified: 0
|
||||
, nRemoved: 0
|
||||
, upserted: []
|
||||
};
|
||||
|
||||
// Internal state
|
||||
this.s = {
|
||||
// Final result
|
||||
bulkResult: bulkResult
|
||||
// Current batch state
|
||||
, currentBatch: null
|
||||
, currentIndex: 0
|
||||
, currentBatchSize: 0
|
||||
, currentBatchSizeBytes: 0
|
||||
, batches: []
|
||||
// Write concern
|
||||
, writeConcern: writeConcern
|
||||
// Capabilities
|
||||
, capabilities: capabilities
|
||||
// Max batch size options
|
||||
, maxBatchSizeBytes: maxBatchSizeBytes
|
||||
, maxWriteBatchSize: maxWriteBatchSize
|
||||
// Namespace
|
||||
, namespace: namespace
|
||||
// BSON
|
||||
, bson: bson
|
||||
// Topology
|
||||
, topology: topology
|
||||
// Options
|
||||
, options: options
|
||||
// Current operation
|
||||
, currentOp: currentOp
|
||||
// Executed
|
||||
, executed: executed
|
||||
// Collection
|
||||
, collection: collection
|
||||
}
|
||||
}
|
||||
|
||||
OrderedBulkOperation.prototype.raw = function(op) {
|
||||
var key = Object.keys(op)[0];
|
||||
|
||||
// Update operations
|
||||
if((op.updateOne && op.updateOne.q)
|
||||
|| (op.updateMany && op.updateMany.q)
|
||||
|| (op.replaceOne && op.replaceOne.q)) {
|
||||
op[key].multi = op.updateOne || op.replaceOne ? false : true;
|
||||
return addToOperationsList(this, common.UPDATE, op[key]);
|
||||
}
|
||||
|
||||
// Crud spec update format
|
||||
if(op.updateOne || op.updateMany || op.replaceOne) {
|
||||
var multi = op.updateOne || op.replaceOne ? false : true;
|
||||
var operation = {q: op[key].filter, u: op[key].update || op[key].replacement, multi: multi}
|
||||
if(op[key].upsert) operation.upsert = true;
|
||||
return addToOperationsList(this, common.UPDATE, operation);
|
||||
}
|
||||
|
||||
// Remove operations
|
||||
if(op.removeOne || op.removeMany || (op.deleteOne && op.deleteOne.q) || op.deleteMany && op.deleteMany.q) {
|
||||
op[key].limit = op.removeOne ? 1 : 0;
|
||||
return addToOperationsList(this, common.REMOVE, op[key]);
|
||||
}
|
||||
|
||||
// Crud spec delete operations, less efficient
|
||||
if(op.deleteOne || op.deleteMany) {
|
||||
var limit = op.deleteOne ? 1 : 0;
|
||||
var operation = {q: op[key].filter, limit: limit}
|
||||
return addToOperationsList(this, common.REMOVE, operation);
|
||||
}
|
||||
|
||||
// Insert operations
|
||||
if(op.insertOne && op.insertOne.document == null) {
|
||||
if(op.insertOne._id == null) op.insertOne._id = new ObjectID();
|
||||
return addToOperationsList(this, common.INSERT, op.insertOne);
|
||||
} else if(op.insertOne && op.insertOne.document) {
|
||||
if(op.insertOne.document._id == null) op.insertOne.document._id = new ObjectID();
|
||||
return addToOperationsList(this, common.INSERT, op.insertOne.document);
|
||||
}
|
||||
|
||||
if(op.insertMany) {
|
||||
for(var i = 0; i < op.insertMany.length; i++) {
|
||||
if(op.insertMany[i]._id == null) op.insertMany[i]._id = new ObjectID();
|
||||
addToOperationsList(this, common.INSERT, op.insertMany[i]);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// No valid type of operation
|
||||
throw toError("bulkWrite only supports insertOne, insertMany, updateOne, updateMany, removeOne, removeMany, deleteOne, deleteMany");
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single insert document to the bulk operation
|
||||
*
|
||||
* @param {object} doc the document to insert
|
||||
* @throws {MongoError}
|
||||
* @return {OrderedBulkOperation}
|
||||
*/
|
||||
OrderedBulkOperation.prototype.insert = function(document) {
|
||||
if(document._id == null) document._id = new ObjectID();
|
||||
return addToOperationsList(this, common.INSERT, document);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate a find operation for an update/updateOne/remove/removeOne/replaceOne
|
||||
*
|
||||
* @method
|
||||
* @param {object} selector The selector for the bulk operation.
|
||||
* @throws {MongoError}
|
||||
* @return {FindOperatorsOrdered}
|
||||
*/
|
||||
OrderedBulkOperation.prototype.find = function(selector) {
|
||||
if (!selector) {
|
||||
throw toError("Bulk find operation must specify a selector");
|
||||
}
|
||||
|
||||
// Save a current selector
|
||||
this.s.currentOp = {
|
||||
selector: selector
|
||||
}
|
||||
|
||||
return new FindOperatorsOrdered(this);
|
||||
}
|
||||
|
||||
Object.defineProperty(OrderedBulkOperation.prototype, 'length', {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
return this.s.currentIndex;
|
||||
}
|
||||
});
|
||||
|
||||
//
|
||||
// Execute next write command in a chain
|
||||
var executeCommands = function(self, callback) {
|
||||
if(self.s.batches.length == 0) {
|
||||
return callback(null, new BulkWriteResult(self.s.bulkResult));
|
||||
}
|
||||
|
||||
// Ordered execution of the command
|
||||
var batch = self.s.batches.shift();
|
||||
|
||||
var resultHandler = function(err, result) {
|
||||
// If we have and error
|
||||
if(err) err.ok = 0;
|
||||
// Merge the results together
|
||||
var mergeResult = mergeBatchResults(true, batch, self.s.bulkResult, err, result);
|
||||
if(mergeResult != null) {
|
||||
return callback(null, new BulkWriteResult(self.s.bulkResult));
|
||||
}
|
||||
|
||||
// If we are ordered and have errors and they are
|
||||
// not all replication errors terminate the operation
|
||||
if(self.s.bulkResult.writeErrors.length > 0) {
|
||||
return callback(self.s.bulkResult.writeErrors[0], new BulkWriteResult(self.s.bulkResult));
|
||||
}
|
||||
|
||||
// Execute the next command in line
|
||||
executeCommands(self, callback);
|
||||
}
|
||||
|
||||
var finalOptions = {ordered: true}
|
||||
if(self.s.writeConcern != null) {
|
||||
finalOptions.writeConcern = self.s.writeConcern;
|
||||
}
|
||||
|
||||
try {
|
||||
if(batch.batchType == common.INSERT) {
|
||||
self.s.topology.insert(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
|
||||
} else if(batch.batchType == common.UPDATE) {
|
||||
self.s.topology.update(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
|
||||
} else if(batch.batchType == common.REMOVE) {
|
||||
self.s.topology.remove(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
|
||||
}
|
||||
} catch(err) {
|
||||
// Force top level error
|
||||
err.ok = 0;
|
||||
// Merge top level error and return
|
||||
callback(null, mergeBatchResults(false, batch, self.s.bulkResult, err, null));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback OrderedBulkOperation~resultCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {BulkWriteResult} result The bulk write result.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Execute the ordered bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {(number|string)} [options.w=null] The write concern.
|
||||
* @param {number} [options.wtimeout=null] The write concern timeout.
|
||||
* @param {boolean} [options.j=false] Specify a journal write concern.
|
||||
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
|
||||
* @param {OrderedBulkOperation~resultCallback} callback The result callback
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
OrderedBulkOperation.prototype.execute = function(_writeConcern, callback) {
|
||||
if(this.s.executed) throw new toError("batch cannot be re-executed");
|
||||
if(typeof _writeConcern == 'function') {
|
||||
callback = _writeConcern;
|
||||
} else {
|
||||
this.s.writeConcern = _writeConcern;
|
||||
}
|
||||
|
||||
// If we have current batch
|
||||
if(this.s.currentBatch) this.s.batches.push(this.s.currentBatch);
|
||||
|
||||
// If we have no operations in the bulk raise an error
|
||||
if(this.s.batches.length == 0) {
|
||||
throw toError("Invalid Operation, No operations in bulk");
|
||||
}
|
||||
|
||||
// Execute the commands
|
||||
return executeCommands(this, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an unordered batch object
|
||||
* @ignore
|
||||
*/
|
||||
var initializeOrderedBulkOp = function(topology, collection, options) {
|
||||
return new OrderedBulkOperation(topology, collection, options);
|
||||
}
|
||||
|
||||
module.exports = initializeOrderedBulkOp;
|
||||
482
server/node_modules/mongodb/lib/bulk/unordered.js
generated
vendored
Executable file
482
server/node_modules/mongodb/lib/bulk/unordered.js
generated
vendored
Executable file
@@ -0,0 +1,482 @@
|
||||
"use strict";
|
||||
|
||||
var common = require('./common')
|
||||
, utils = require('../utils')
|
||||
, toError = require('../utils').toError
|
||||
, f = require('util').format
|
||||
, shallowClone = utils.shallowClone
|
||||
, WriteError = common.WriteError
|
||||
, BulkWriteResult = common.BulkWriteResult
|
||||
, LegacyOp = common.LegacyOp
|
||||
, ObjectID = require('mongodb-core').BSON.ObjectID
|
||||
, Batch = common.Batch
|
||||
, mergeBatchResults = common.mergeBatchResults;
|
||||
|
||||
/**
|
||||
* Create a FindOperatorsUnordered instance (INTERNAL TYPE, do not instantiate directly)
|
||||
* @class
|
||||
* @property {number} length Get the number of operations in the bulk.
|
||||
* @return {FindOperatorsUnordered} a FindOperatorsUnordered instance.
|
||||
*/
|
||||
var FindOperatorsUnordered = function(self) {
|
||||
this.s = self.s;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single update document to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} doc update operations
|
||||
* @throws {MongoError}
|
||||
* @return {UnorderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsUnordered.prototype.update = function(updateDocument) {
|
||||
// Perform upsert
|
||||
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
|
||||
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector
|
||||
, u: updateDocument
|
||||
, multi: true
|
||||
, upsert: upsert
|
||||
}
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the update document to the list
|
||||
return addToOperationsList(this, common.UPDATE, document);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single update one document to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} doc update operations
|
||||
* @throws {MongoError}
|
||||
* @return {UnorderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsUnordered.prototype.updateOne = function(updateDocument) {
|
||||
// Perform upsert
|
||||
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
|
||||
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector
|
||||
, u: updateDocument
|
||||
, multi: false
|
||||
, upsert: upsert
|
||||
}
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the update document to the list
|
||||
return addToOperationsList(this, common.UPDATE, document);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a replace one operation to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} doc the new document to replace the existing one with
|
||||
* @throws {MongoError}
|
||||
* @return {UnorderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsUnordered.prototype.replaceOne = function(updateDocument) {
|
||||
this.updateOne(updateDocument);
|
||||
}
|
||||
|
||||
/**
|
||||
* Upsert modifier for update bulk operation
|
||||
*
|
||||
* @method
|
||||
* @throws {MongoError}
|
||||
* @return {UnorderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsUnordered.prototype.upsert = function() {
|
||||
this.s.currentOp.upsert = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a remove one operation to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @throws {MongoError}
|
||||
* @return {UnorderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsUnordered.prototype.removeOne = function() {
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector
|
||||
, limit: 1
|
||||
}
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the remove document to the list
|
||||
return addToOperationsList(this, common.REMOVE, document);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a remove operation to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @throws {MongoError}
|
||||
* @return {UnorderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsUnordered.prototype.remove = function() {
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector
|
||||
, limit: 0
|
||||
}
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the remove document to the list
|
||||
return addToOperationsList(this, common.REMOVE, document);
|
||||
}
|
||||
|
||||
//
|
||||
// Add to the operations list
|
||||
//
|
||||
var addToOperationsList = function(_self, docType, document) {
|
||||
// Get the bsonSize
|
||||
var bsonSize = _self.s.bson.calculateObjectSize(document, false);
|
||||
// Throw error if the doc is bigger than the max BSON size
|
||||
if(bsonSize >= _self.s.maxBatchSizeBytes) throw toError("document is larger than the maximum size " + _self.s.maxBatchSizeBytes);
|
||||
// Holds the current batch
|
||||
_self.s.currentBatch = null;
|
||||
// Get the right type of batch
|
||||
if(docType == common.INSERT) {
|
||||
_self.s.currentBatch = _self.s.currentInsertBatch;
|
||||
} else if(docType == common.UPDATE) {
|
||||
_self.s.currentBatch = _self.s.currentUpdateBatch;
|
||||
} else if(docType == common.REMOVE) {
|
||||
_self.s.currentBatch = _self.s.currentRemoveBatch;
|
||||
}
|
||||
|
||||
// Create a new batch object if we don't have a current one
|
||||
if(_self.s.currentBatch == null) _self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
|
||||
|
||||
// Check if we need to create a new batch
|
||||
if(((_self.s.currentBatch.size + 1) >= _self.s.maxWriteBatchSize)
|
||||
|| ((_self.s.currentBatch.sizeBytes + bsonSize) >= _self.s.maxBatchSizeBytes)
|
||||
|| (_self.s.currentBatch.batchType != docType)) {
|
||||
// Save the batch to the execution stack
|
||||
_self.s.batches.push(_self.s.currentBatch);
|
||||
|
||||
// Create a new batch
|
||||
_self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
|
||||
}
|
||||
|
||||
// We have an array of documents
|
||||
if(Array.isArray(document)) {
|
||||
throw toError("operation passed in cannot be an Array");
|
||||
} else {
|
||||
_self.s.currentBatch.operations.push(document);
|
||||
_self.s.currentBatch.originalIndexes.push(_self.s.currentIndex);
|
||||
_self.s.currentIndex = _self.s.currentIndex + 1;
|
||||
}
|
||||
|
||||
// Save back the current Batch to the right type
|
||||
if(docType == common.INSERT) {
|
||||
_self.s.currentInsertBatch = _self.s.currentBatch;
|
||||
_self.s.bulkResult.insertedIds.push({index: _self.s.currentIndex, _id: document._id});
|
||||
} else if(docType == common.UPDATE) {
|
||||
_self.s.currentUpdateBatch = _self.s.currentBatch;
|
||||
} else if(docType == common.REMOVE) {
|
||||
_self.s.currentRemoveBatch = _self.s.currentBatch;
|
||||
}
|
||||
|
||||
// Update current batch size
|
||||
_self.s.currentBatch.size = _self.s.currentBatch.size + 1;
|
||||
_self.s.currentBatch.sizeBytes = _self.s.currentBatch.sizeBytes + bsonSize;
|
||||
|
||||
// Return self
|
||||
return _self;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new UnorderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
|
||||
* @class
|
||||
* @return {UnorderedBulkOperation} a UnorderedBulkOperation instance.
|
||||
*/
|
||||
var UnorderedBulkOperation = function(topology, collection, options) {
|
||||
options = options == null ? {} : options;
|
||||
|
||||
// Contains reference to self
|
||||
var self = this;
|
||||
// Get the namesspace for the write operations
|
||||
var namespace = collection.collectionName;
|
||||
// Used to mark operation as executed
|
||||
var executed = false;
|
||||
|
||||
// Current item
|
||||
// var currentBatch = null;
|
||||
var currentOp = null;
|
||||
var currentIndex = 0;
|
||||
var batches = [];
|
||||
|
||||
// The current Batches for the different operations
|
||||
var currentInsertBatch = null;
|
||||
var currentUpdateBatch = null;
|
||||
var currentRemoveBatch = null;
|
||||
|
||||
// Handle to the bson serializer, used to calculate running sizes
|
||||
var bson = topology.bson;
|
||||
|
||||
// Get the capabilities
|
||||
var capabilities = topology.capabilities();
|
||||
|
||||
// Set max byte size
|
||||
var maxBatchSizeBytes = topology.isMasterDoc.maxBsonObjectSize;
|
||||
var maxWriteBatchSize = topology.isMasterDoc.maxWriteBatchSize || 1000;
|
||||
|
||||
// Get the write concern
|
||||
var writeConcern = common.writeConcern(shallowClone(options), collection, options);
|
||||
|
||||
// Final results
|
||||
var bulkResult = {
|
||||
ok: 1
|
||||
, writeErrors: []
|
||||
, writeConcernErrors: []
|
||||
, insertedIds: []
|
||||
, nInserted: 0
|
||||
, nUpserted: 0
|
||||
, nMatched: 0
|
||||
, nModified: 0
|
||||
, nRemoved: 0
|
||||
, upserted: []
|
||||
};
|
||||
|
||||
// Internal state
|
||||
this.s = {
|
||||
// Final result
|
||||
bulkResult: bulkResult
|
||||
// Current batch state
|
||||
, currentInsertBatch: null
|
||||
, currentUpdateBatch: null
|
||||
, currentRemoveBatch: null
|
||||
, currentBatch: null
|
||||
, currentIndex: 0
|
||||
, batches: []
|
||||
// Write concern
|
||||
, writeConcern: writeConcern
|
||||
// Capabilities
|
||||
, capabilities: capabilities
|
||||
// Max batch size options
|
||||
, maxBatchSizeBytes: maxBatchSizeBytes
|
||||
, maxWriteBatchSize: maxWriteBatchSize
|
||||
// Namespace
|
||||
, namespace: namespace
|
||||
// BSON
|
||||
, bson: bson
|
||||
// Topology
|
||||
, topology: topology
|
||||
// Options
|
||||
, options: options
|
||||
// Current operation
|
||||
, currentOp: currentOp
|
||||
// Executed
|
||||
, executed: executed
|
||||
// Collection
|
||||
, collection: collection
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a single insert document to the bulk operation
|
||||
*
|
||||
* @param {object} doc the document to insert
|
||||
* @throws {MongoError}
|
||||
* @return {UnorderedBulkOperation}
|
||||
*/
|
||||
UnorderedBulkOperation.prototype.insert = function(document) {
|
||||
if(document._id == null) document._id = new ObjectID();
|
||||
return addToOperationsList(this, common.INSERT, document);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate a find operation for an update/updateOne/remove/removeOne/replaceOne
|
||||
*
|
||||
* @method
|
||||
* @param {object} selector The selector for the bulk operation.
|
||||
* @throws {MongoError}
|
||||
* @return {FindOperatorsUnordered}
|
||||
*/
|
||||
UnorderedBulkOperation.prototype.find = function(selector) {
|
||||
if (!selector) {
|
||||
throw toError("Bulk find operation must specify a selector");
|
||||
}
|
||||
|
||||
// Save a current selector
|
||||
this.s.currentOp = {
|
||||
selector: selector
|
||||
}
|
||||
|
||||
return new FindOperatorsUnordered(this);
|
||||
}
|
||||
|
||||
Object.defineProperty(UnorderedBulkOperation.prototype, 'length', {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
return this.s.currentIndex;
|
||||
}
|
||||
});
|
||||
|
||||
UnorderedBulkOperation.prototype.raw = function(op) {
|
||||
var key = Object.keys(op)[0];
|
||||
|
||||
// Update operations
|
||||
if((op.updateOne && op.updateOne.q)
|
||||
|| (op.updateMany && op.updateMany.q)
|
||||
|| (op.replaceOne && op.replaceOne.q)) {
|
||||
op[key].multi = op.updateOne || op.replaceOne ? false : true;
|
||||
return addToOperationsList(this, common.UPDATE, op[key]);
|
||||
}
|
||||
|
||||
// Crud spec update format
|
||||
if(op.updateOne || op.updateMany || op.replaceOne) {
|
||||
var multi = op.updateOne || op.replaceOne ? false : true;
|
||||
var operation = {q: op[key].filter, u: op[key].update || op[key].replacement, multi: multi}
|
||||
if(op[key].upsert) operation.upsert = true;
|
||||
return addToOperationsList(this, common.UPDATE, operation);
|
||||
}
|
||||
|
||||
// Remove operations
|
||||
if(op.removeOne || op.removeMany || (op.deleteOne && op.deleteOne.q) || op.deleteMany && op.deleteMany.q) {
|
||||
op[key].limit = op.removeOne ? 1 : 0;
|
||||
return addToOperationsList(this, common.REMOVE, op[key]);
|
||||
}
|
||||
|
||||
// Crud spec delete operations, less efficient
|
||||
if(op.deleteOne || op.deleteMany) {
|
||||
var limit = op.deleteOne ? 1 : 0;
|
||||
var operation = {q: op[key].filter, limit: limit}
|
||||
return addToOperationsList(this, common.REMOVE, operation);
|
||||
}
|
||||
|
||||
// Insert operations
|
||||
if(op.insertOne && op.insertOne.document == null) {
|
||||
if(op.insertOne._id == null) op.insertOne._id = new ObjectID();
|
||||
return addToOperationsList(this, common.INSERT, op.insertOne);
|
||||
} else if(op.insertOne && op.insertOne.document) {
|
||||
if(op.insertOne.document._id == null) op.insertOne.document._id = new ObjectID();
|
||||
return addToOperationsList(this, common.INSERT, op.insertOne.document);
|
||||
}
|
||||
|
||||
if(op.insertMany) {
|
||||
for(var i = 0; i < op.insertMany.length; i++) {
|
||||
addToOperationsList(this, common.INSERT, op.insertMany[i]);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// No valid type of operation
|
||||
throw toError("bulkWrite only supports insertOne, insertMany, updateOne, updateMany, removeOne, removeMany, deleteOne, deleteMany");
|
||||
}
|
||||
|
||||
//
|
||||
// Execute the command
|
||||
var executeBatch = function(self, batch, callback) {
|
||||
var finalOptions = {ordered: false}
|
||||
if(self.s.writeConcern != null) {
|
||||
finalOptions.writeConcern = self.s.writeConcern;
|
||||
}
|
||||
|
||||
var resultHandler = function(err, result) {
|
||||
// If we have and error
|
||||
if(err) err.ok = 0;
|
||||
callback(null, mergeBatchResults(false, batch, self.s.bulkResult, err, result));
|
||||
}
|
||||
|
||||
try {
|
||||
if(batch.batchType == common.INSERT) {
|
||||
self.s.topology.insert(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
|
||||
} else if(batch.batchType == common.UPDATE) {
|
||||
self.s.topology.update(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
|
||||
} else if(batch.batchType == common.REMOVE) {
|
||||
self.s.topology.remove(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
|
||||
}
|
||||
} catch(err) {
|
||||
// Force top level error
|
||||
err.ok = 0;
|
||||
// Merge top level error and return
|
||||
callback(null, mergeBatchResults(false, batch, self.s.bulkResult, err, null));
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Execute all the commands
|
||||
var executeBatches = function(self, callback) {
|
||||
var numberOfCommandsToExecute = self.s.batches.length;
|
||||
// Execute over all the batches
|
||||
for(var i = 0; i < self.s.batches.length; i++) {
|
||||
executeBatch(self, self.s.batches[i], function(err, result) {
|
||||
numberOfCommandsToExecute = numberOfCommandsToExecute - 1;
|
||||
|
||||
// Execute
|
||||
if(numberOfCommandsToExecute == 0) {
|
||||
var error = self.s.bulkResult.writeErrors.length > 0 ? self.s.bulkResult.writeErrors[0] : null;
|
||||
callback(error, new BulkWriteResult(self.s.bulkResult));
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback UnorderedBulkOperation~resultCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {BulkWriteResult} result The bulk write result.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Execute the ordered bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {(number|string)} [options.w=null] The write concern.
|
||||
* @param {number} [options.wtimeout=null] The write concern timeout.
|
||||
* @param {boolean} [options.j=false] Specify a journal write concern.
|
||||
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
|
||||
* @param {UnorderedBulkOperation~resultCallback} callback The result callback
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
UnorderedBulkOperation.prototype.execute = function(_writeConcern, callback) {
|
||||
if(this.s.executed) throw toError("batch cannot be re-executed");
|
||||
if(typeof _writeConcern == 'function') {
|
||||
callback = _writeConcern;
|
||||
} else {
|
||||
this.s.writeConcern = _writeConcern;
|
||||
}
|
||||
|
||||
// If we have current batch
|
||||
if(this.s.currentInsertBatch) this.s.batches.push(this.s.currentInsertBatch);
|
||||
if(this.s.currentUpdateBatch) this.s.batches.push(this.s.currentUpdateBatch);
|
||||
if(this.s.currentRemoveBatch) this.s.batches.push(this.s.currentRemoveBatch);
|
||||
|
||||
// If we have no operations in the bulk raise an error
|
||||
if(this.s.batches.length == 0) {
|
||||
throw toError("Invalid Operation, No operations in bulk");
|
||||
}
|
||||
|
||||
// Execute batches
|
||||
return executeBatches(this, function(err, result) {
|
||||
callback(err, result);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an unordered batch object
|
||||
* @ignore
|
||||
*/
|
||||
var initializeUnorderedBulkOp = function(topology, collection, options) {
|
||||
return new UnorderedBulkOperation(topology, collection, options);
|
||||
}
|
||||
|
||||
module.exports = initializeUnorderedBulkOp;
|
||||
2079
server/node_modules/mongodb/lib/collection.js
generated
vendored
Executable file
2079
server/node_modules/mongodb/lib/collection.js
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
279
server/node_modules/mongodb/lib/command_cursor.js
generated
vendored
Executable file
279
server/node_modules/mongodb/lib/command_cursor.js
generated
vendored
Executable file
@@ -0,0 +1,279 @@
|
||||
"use strict";
|
||||
|
||||
var inherits = require('util').inherits
|
||||
, f = require('util').format
|
||||
, toError = require('./utils').toError
|
||||
, getSingleProperty = require('./utils').getSingleProperty
|
||||
, formattedOrderClause = require('./utils').formattedOrderClause
|
||||
, handleCallback = require('./utils').handleCallback
|
||||
, Logger = require('mongodb-core').Logger
|
||||
, EventEmitter = require('events').EventEmitter
|
||||
, ReadPreference = require('./read_preference')
|
||||
, MongoError = require('mongodb-core').MongoError
|
||||
, Readable = require('stream').Readable || require('readable-stream').Readable
|
||||
// , CoreCursor = require('mongodb-core').Cursor
|
||||
, CoreCursor = require('./cursor')
|
||||
, Query = require('mongodb-core').Query
|
||||
, CoreReadPreference = require('mongodb-core').ReadPreference;
|
||||
|
||||
/**
|
||||
* @fileOverview The **CommandCursor** class is an internal class that embodies a
|
||||
* generalized cursor based on a MongoDB command allowing for iteration over the
|
||||
* results returned. It supports one by one document iteration, conversion to an
|
||||
* array or can be iterated as a Node 0.10.X or higher stream
|
||||
*
|
||||
* **CommandCursor Cannot directly be instantiated**
|
||||
* @example
|
||||
* var MongoClient = require('mongodb').MongoClient,
|
||||
* test = require('assert');
|
||||
* // Connection url
|
||||
* var url = 'mongodb://localhost:27017/test';
|
||||
* // Connect using MongoClient
|
||||
* MongoClient.connect(url, function(err, db) {
|
||||
* // Create a collection we want to drop later
|
||||
* var col = db.collection('listCollectionsExample1');
|
||||
* // Insert a bunch of documents
|
||||
* col.insert([{a:1, b:1}
|
||||
* , {a:2, b:2}, {a:3, b:3}
|
||||
* , {a:4, b:4}], {w:1}, function(err, result) {
|
||||
* test.equal(null, err);
|
||||
*
|
||||
* // List the database collections available
|
||||
* db.listCollections().toArray(function(err, items) {
|
||||
* test.equal(null, err);
|
||||
* db.close();
|
||||
* });
|
||||
* });
|
||||
* });
|
||||
*/
|
||||
|
||||
/**
|
||||
* Namespace provided by the browser.
|
||||
* @external Readable
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates a new Command Cursor instance (INTERNAL TYPE, do not instantiate directly)
|
||||
* @class
|
||||
* @extends external:Readable
|
||||
* @fires CommandCursor#data
|
||||
* @fires CommandCursor#end
|
||||
* @fires CommandCursor#close
|
||||
* @fires CommandCursor#readable
|
||||
* @return {CommandCursor} an CommandCursor instance.
|
||||
*/
|
||||
var CommandCursor = function(bson, ns, cmd, options, topology, topologyOptions) {
|
||||
CoreCursor.apply(this, Array.prototype.slice.call(arguments, 0));
|
||||
var self = this;
|
||||
var state = CommandCursor.INIT;
|
||||
var streamOptions = {};
|
||||
|
||||
// MaxTimeMS
|
||||
var maxTimeMS = null;
|
||||
|
||||
// Set up
|
||||
Readable.call(this, {objectMode: true});
|
||||
|
||||
// Internal state
|
||||
this.s = {
|
||||
// MaxTimeMS
|
||||
maxTimeMS: maxTimeMS
|
||||
// State
|
||||
, state: state
|
||||
// Stream options
|
||||
, streamOptions: streamOptions
|
||||
// BSON
|
||||
, bson: bson
|
||||
// Namespae
|
||||
, ns: ns
|
||||
// Command
|
||||
, cmd: cmd
|
||||
// Options
|
||||
, options: options
|
||||
// Topology
|
||||
, topology: topology
|
||||
// Topology Options
|
||||
, topologyOptions: topologyOptions
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* CommandCursor stream data event, fired for each document in the cursor.
|
||||
*
|
||||
* @event CommandCursor#data
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* CommandCursor stream end event
|
||||
*
|
||||
* @event CommandCursor#end
|
||||
* @type {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* CommandCursor stream close event
|
||||
*
|
||||
* @event CommandCursor#close
|
||||
* @type {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* CommandCursor stream readable event
|
||||
*
|
||||
* @event CommandCursor#readable
|
||||
* @type {null}
|
||||
*/
|
||||
|
||||
// Inherit from Readable
|
||||
inherits(CommandCursor, Readable);
|
||||
|
||||
// Set the methods to inherit from prototype
|
||||
var methodsToInherit = ['next', 'each', 'forEach', 'toArray'
|
||||
, 'rewind', 'bufferedCount', 'readBufferedDocuments', 'close', 'isClosed'];
|
||||
|
||||
// Only inherit the types we need
|
||||
for(var i = 0; i < methodsToInherit.length; i++) {
|
||||
CommandCursor.prototype[methodsToInherit[i]] = CoreCursor.prototype[methodsToInherit[i]];
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the batch size for the cursor.
|
||||
* @method
|
||||
* @param {number} value The batchSize for the cursor.
|
||||
* @throws {MongoError}
|
||||
* @return {CommandCursor}
|
||||
*/
|
||||
CommandCursor.prototype.batchSize = function(value) {
|
||||
if(this.s.state == CommandCursor.CLOSED || this.isDead()) throw new MongoError("Cursor is closed");
|
||||
if(typeof value != 'number') throw new MongoError("batchSize requires an integer");
|
||||
if(this.s.cmd.cursor) this.s.cmd.cursor.batchSize = value;
|
||||
this.setCursorBatchSize(value);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a maxTimeMS stage to the aggregation pipeline
|
||||
* @method
|
||||
* @param {number} value The state maxTimeMS value.
|
||||
* @return {CommandCursor}
|
||||
*/
|
||||
CommandCursor.prototype.maxTimeMS = function(value) {
|
||||
if(this.s.topology.lastIsMaster().minWireVersion > 2) {
|
||||
this.s.cmd.maxTimeMS = value;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
CommandCursor.prototype.get = CommandCursor.prototype.toArray;
|
||||
|
||||
/**
|
||||
* Get the next available document from the cursor, returns null if no more documents are available.
|
||||
* @function CommandCursor.prototype.next
|
||||
* @param {CommandCursor~resultCallback} callback The result callback.
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Set the new batchSize of the cursor
|
||||
* @function CommandCursor.prototype.setBatchSize
|
||||
* @param {number} value The new batchSize for the cursor
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get the batchSize of the cursor
|
||||
* @function CommandCursor.prototype.batchSize
|
||||
* @param {number} value The current batchSize for the cursor
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback CommandCursor~toArrayResultCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {object[]} documents All the documents the satisfy the cursor.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Returns an array of documents. The caller is responsible for making sure that there
|
||||
* is enough memory to store the results. Note that the array only contain partial
|
||||
* results when this cursor had been previouly accessed.
|
||||
* @method CommandCursor.prototype.toArray
|
||||
* @param {CommandCursor~toArrayResultCallback} callback The result callback.
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback CommandCursor~resultCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {(object|null)} result The result object if the command was executed successfully.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Iterates over all the documents for this cursor. As with **{cursor.toArray}**,
|
||||
* not all of the elements will be iterated if this cursor had been previouly accessed.
|
||||
* In that case, **{cursor.rewind}** can be used to reset the cursor. However, unlike
|
||||
* **{cursor.toArray}**, the cursor will only hold a maximum of batch size elements
|
||||
* at any given time if batch size is specified. Otherwise, the caller is responsible
|
||||
* for making sure that the entire result can fit the memory.
|
||||
* @method CommandCursor.prototype.each
|
||||
* @param {CommandCursor~resultCallback} callback The result callback.
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Close the cursor, sending a KillCursor command and emitting close.
|
||||
* @method CommandCursor.prototype.close
|
||||
* @param {CommandCursor~resultCallback} [callback] The result callback.
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Is the cursor closed
|
||||
* @method CommandCursor.prototype.isClosed
|
||||
* @return {boolean}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Clone the cursor
|
||||
* @function CommandCursor.prototype.clone
|
||||
* @return {CommandCursor}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Resets the cursor
|
||||
* @function CommandCursor.prototype.rewind
|
||||
* @return {CommandCursor}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The callback format for the forEach iterator method
|
||||
* @callback CommandCursor~iteratorCallback
|
||||
* @param {Object} doc An emitted document for the iterator
|
||||
*/
|
||||
|
||||
/**
|
||||
* The callback error format for the forEach iterator method
|
||||
* @callback CommandCursor~endCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Iterates over all the documents for this cursor using the iterator, callback pattern.
|
||||
* @method CommandCursor.prototype.forEach
|
||||
* @param {CommandCursor~iteratorCallback} iterator The iteration callback.
|
||||
* @param {CommandCursor~endCallback} callback The end callback.
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
CommandCursor.INIT = 0;
|
||||
CommandCursor.OPEN = 1;
|
||||
CommandCursor.CLOSED = 2;
|
||||
|
||||
module.exports = CommandCursor;
|
||||
836
server/node_modules/mongodb/lib/cursor.js
generated
vendored
Executable file
836
server/node_modules/mongodb/lib/cursor.js
generated
vendored
Executable file
@@ -0,0 +1,836 @@
|
||||
"use strict";
|
||||
|
||||
var inherits = require('util').inherits
|
||||
, f = require('util').format
|
||||
, toError = require('./utils').toError
|
||||
, getSingleProperty = require('./utils').getSingleProperty
|
||||
, formattedOrderClause = require('./utils').formattedOrderClause
|
||||
, handleCallback = require('./utils').handleCallback
|
||||
, Logger = require('mongodb-core').Logger
|
||||
, EventEmitter = require('events').EventEmitter
|
||||
, ReadPreference = require('./read_preference')
|
||||
, MongoError = require('mongodb-core').MongoError
|
||||
, Readable = require('stream').Readable || require('readable-stream').Readable
|
||||
, CoreCursor = require('mongodb-core').Cursor
|
||||
, Query = require('mongodb-core').Query
|
||||
, CoreReadPreference = require('mongodb-core').ReadPreference;
|
||||
|
||||
/**
|
||||
* @fileOverview The **Cursor** class is an internal class that embodies a cursor on MongoDB
|
||||
* allowing for iteration over the results returned from the underlying query. It supports
|
||||
* one by one document iteration, conversion to an array or can be iterated as a Node 0.10.X
|
||||
* or higher stream
|
||||
*
|
||||
* **CURSORS Cannot directly be instantiated**
|
||||
* @example
|
||||
* var MongoClient = require('mongodb').MongoClient,
|
||||
* test = require('assert');
|
||||
* // Connection url
|
||||
* var url = 'mongodb://localhost:27017/test';
|
||||
* // Connect using MongoClient
|
||||
* MongoClient.connect(url, function(err, db) {
|
||||
* // Create a collection we want to drop later
|
||||
* var col = db.collection('createIndexExample1');
|
||||
* // Insert a bunch of documents
|
||||
* col.insert([{a:1, b:1}
|
||||
* , {a:2, b:2}, {a:3, b:3}
|
||||
* , {a:4, b:4}], {w:1}, function(err, result) {
|
||||
* test.equal(null, err);
|
||||
*
|
||||
* // Show that duplicate records got dropped
|
||||
* col.find({}).toArray(function(err, items) {
|
||||
* test.equal(null, err);
|
||||
* test.equal(4, items.length);
|
||||
* db.close();
|
||||
* });
|
||||
* });
|
||||
* });
|
||||
*/
|
||||
|
||||
/**
|
||||
* Namespace provided by the mongodb-core and node.js
|
||||
* @external CoreCursor
|
||||
* @external Readable
|
||||
*/
|
||||
|
||||
// Flags allowed for cursor
|
||||
var flags = ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'exhaust', 'partial'];
|
||||
|
||||
/**
|
||||
* Creates a new Cursor instance (INTERNAL TYPE, do not instantiate directly)
|
||||
* @class
|
||||
* @extends external:CoreCursor
|
||||
* @extends external:Readable
|
||||
* @property {string} sortValue Cursor query sort setting.
|
||||
* @property {boolean} timeout Is Cursor able to time out.
|
||||
* @property {ReadPreference} readPreference Get cursor ReadPreference.
|
||||
* @fires Cursor#data
|
||||
* @fires Cursor#end
|
||||
* @fires Cursor#close
|
||||
* @fires Cursor#readable
|
||||
* @return {Cursor} a Cursor instance.
|
||||
* @example
|
||||
* Some example
|
||||
*/
|
||||
var Cursor = function(bson, ns, cmd, options, topology, topologyOptions) {
|
||||
CoreCursor.apply(this, Array.prototype.slice.call(arguments, 0));
|
||||
var self = this;
|
||||
var state = Cursor.INIT;
|
||||
var streamOptions = {};
|
||||
|
||||
// Tailable cursor options
|
||||
var numberOfRetries = options.numberOfRetries || 5;
|
||||
var tailableRetryInterval = options.tailableRetryInterval || 500;
|
||||
var currentNumberOfRetries = numberOfRetries;
|
||||
// MaxTimeMS
|
||||
var maxTimeMS = null;
|
||||
|
||||
// Set up
|
||||
Readable.call(this, {objectMode: true});
|
||||
|
||||
// Internal cursor state
|
||||
this.s = {
|
||||
// MaxTimeMS
|
||||
maxTimeMS: null
|
||||
// Tailable cursor options
|
||||
, numberOfRetries: numberOfRetries
|
||||
, tailableRetryInterval: tailableRetryInterval
|
||||
, currentNumberOfRetries: currentNumberOfRetries
|
||||
// State
|
||||
, state: state
|
||||
// Stream options
|
||||
, streamOptions: streamOptions
|
||||
// BSON
|
||||
, bson: bson
|
||||
// Namespace
|
||||
, ns: ns
|
||||
// Command
|
||||
, cmd: cmd
|
||||
// Options
|
||||
, options: options
|
||||
// Topology
|
||||
, topology: topology
|
||||
// Topology options
|
||||
, topologyOptions: topologyOptions
|
||||
}
|
||||
|
||||
// Legacy fields
|
||||
this.timeout = self.s.options.noCursorTimeout == true;
|
||||
this.sortValue = self.s.cmd.sort;
|
||||
this.readPreference = self.s.options.readPreference;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cursor stream data event, fired for each document in the cursor.
|
||||
*
|
||||
* @event Cursor#data
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Cursor stream end event
|
||||
*
|
||||
* @event Cursor#end
|
||||
* @type {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Cursor stream close event
|
||||
*
|
||||
* @event Cursor#close
|
||||
* @type {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Cursor stream readable event
|
||||
*
|
||||
* @event Cursor#readable
|
||||
* @type {null}
|
||||
*/
|
||||
|
||||
// Inherit from Readable
|
||||
inherits(Cursor, Readable);
|
||||
|
||||
for(var name in CoreCursor.prototype) {
|
||||
Cursor.prototype[name] = CoreCursor.prototype[name];
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the cursor query
|
||||
* @method
|
||||
* @param {object} filter The filter object used for the cursor.
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.filter = function(filter) {
|
||||
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
|
||||
this.s.cmd.query = filter;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a cursor flag to the cursor
|
||||
* @method
|
||||
* @param {string} flag The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'exhaust', 'partial'].
|
||||
* @param {boolean} value The flag boolean value.
|
||||
* @throws {MongoError}
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.addCursorFlag = function(flag, value) {
|
||||
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
|
||||
if(flags.indexOf(flag) == -1) throw new MongoError(f("flag % not a supported flag %s", flag, flags));
|
||||
if(typeof value != 'boolean') throw new MongoError(f("flag % must be a boolean value", flag));
|
||||
this.s.cmd[flag] = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a query modifier to the cursor query
|
||||
* @method
|
||||
* @param {string} name The query modifier (must start with $, such as $orderby etc)
|
||||
* @param {boolean} value The flag boolean value.
|
||||
* @throws {MongoError}
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.addQueryModifier = function(name, value) {
|
||||
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
|
||||
if(name[0] != '$') throw new MongoError(f("%s is not a valid query modifier"));
|
||||
// Strip of the $
|
||||
var field = name.substr(1);
|
||||
// Set on the command
|
||||
this.s.cmd[field] = value;
|
||||
// Deal with the special case for sort
|
||||
if(field == 'orderby') this.s.cmd.sort = this.s.cmd[field];
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a comment to the cursor query allowing for tracking the comment in the log.
|
||||
* @method
|
||||
* @param {string} value The comment attached to this query.
|
||||
* @throws {MongoError}
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.comment = function(value) {
|
||||
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
|
||||
this.s.cmd.comment = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)
|
||||
* @method
|
||||
* @param {number} value Number of milliseconds to wait before aborting the query.
|
||||
* @throws {MongoError}
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.maxTimeMS = function(value) {
|
||||
if(typeof value != 'number') throw new MongoError("maxTimeMS must be a number");
|
||||
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
|
||||
this.s.maxTimeMS = value;
|
||||
this.s.cmd.maxTimeMS = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
Cursor.prototype.maxTimeMs = Cursor.prototype.maxTimeMS;
|
||||
|
||||
/**
|
||||
* Sets a field projection for the query.
|
||||
* @method
|
||||
* @param {object} value The field projection object.
|
||||
* @throws {MongoError}
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.project = function(value) {
|
||||
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
|
||||
this.s.cmd.fields = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the sort order of the cursor query.
|
||||
* @method
|
||||
* @param {(string|array|object)} keyOrList The key or keys set for the sort.
|
||||
* @param {number} [direction] The direction of the sorting (1 or -1).
|
||||
* @throws {MongoError}
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.sort = function(keyOrList, direction) {
|
||||
if(this.s.options.tailable) throw new MongoError("Tailable cursor doesn't support sorting");
|
||||
if(this.s.state == Cursor.CLOSED || this.s.state == Cursor.OPEN || this.isDead()) throw new MongoError("Cursor is closed");
|
||||
var order = keyOrList;
|
||||
|
||||
if(direction != null) {
|
||||
order = [[keyOrList, direction]];
|
||||
}
|
||||
|
||||
this.s.cmd.sort = order;
|
||||
this.sortValue = order;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the batch size for the cursor.
|
||||
* @method
|
||||
* @param {number} value The batchSize for the cursor.
|
||||
* @throws {MongoError}
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.batchSize = function(value) {
|
||||
if(this.s.options.tailable) throw new MongoError("Tailable cursor doesn't support limit");
|
||||
if(this.s.state == Cursor.CLOSED || this.isDead()) throw new MongoError("Cursor is closed");
|
||||
if(typeof value != 'number') throw new MongoError("batchSize requires an integer");
|
||||
this.s.cmd.batchSize = value;
|
||||
this.setCursorBatchSize(value);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the limit for the cursor.
|
||||
* @method
|
||||
* @param {number} value The limit for the cursor query.
|
||||
* @throws {MongoError}
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.limit = function(value) {
|
||||
if(this.s.options.tailable) throw new MongoError("Tailable cursor doesn't support limit");
|
||||
if(this.s.state == Cursor.OPEN || this.s.state == Cursor.CLOSED || this.isDead()) throw new MongoError("Cursor is closed");
|
||||
if(typeof value != 'number') throw new MongoError("limit requires an integer");
|
||||
this.s.cmd.limit = value;
|
||||
// this.cursorLimit = value;
|
||||
this.setCursorLimit(value);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the skip for the cursor.
|
||||
* @method
|
||||
* @param {number} value The skip for the cursor query.
|
||||
* @throws {MongoError}
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.skip = function(value) {
|
||||
if(this.s.options.tailable) throw new MongoError("Tailable cursor doesn't support skip");
|
||||
if(this.s.state == Cursor.OPEN || this.s.state == Cursor.CLOSED || this.isDead()) throw new MongoError("Cursor is closed");
|
||||
if(typeof value != 'number') throw new MongoError("skip requires an integer");
|
||||
this.s.cmd.skip = value;
|
||||
this.setCursorSkip(value);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback Cursor~resultCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {(object|null)} result The result object if the command was executed successfully.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get the next available document from the cursor, returns null if no more documents are available.
|
||||
* @function external:CoreCursor#next
|
||||
* @param {Cursor~resultCallback} callback The result callback.
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Set the new batchSize of the cursor
|
||||
* @function Cursor.prototype.setBatchSize
|
||||
* @param {number} value The new batchSize for the cursor
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get the batchSize of the cursor
|
||||
* @function Cursor.prototype.batchSize
|
||||
* @param {number} value The current batchSize for the cursor
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Set the new skip value of the cursor
|
||||
* @function Cursor.prototype.setCursorSkip
|
||||
* @param {number} value The new skip for the cursor
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get the skip value of the cursor
|
||||
* @function Cursor.prototype.cursorSkip
|
||||
* @param {number} value The current skip value for the cursor
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Set the new limit value of the cursor
|
||||
* @function Cursor.prototype.setCursorLimit
|
||||
* @param {number} value The new limit for the cursor
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get the limit value of the cursor
|
||||
* @function Cursor.prototype.cursorLimit
|
||||
* @param {number} value The current limit value for the cursor
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Clone the cursor
|
||||
* @function external:CoreCursor#clone
|
||||
* @return {Cursor}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Resets the cursor
|
||||
* @function external:CoreCursor#rewind
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get the next available document from the cursor, returns null if no more documents are available.
|
||||
* @method
|
||||
* @param {Cursor~resultCallback} callback The result callback.
|
||||
* @throws {MongoError}
|
||||
* @deprecated
|
||||
* @return {null}
|
||||
*/
|
||||
Cursor.prototype.nextObject = function(callback) {
|
||||
var self = this;
|
||||
if(this.s.state == Cursor.CLOSED || self.isDead()) return handleCallback(callback, new MongoError("Cursor is closed"));
|
||||
if(this.s.state == Cursor.INIT && this.s.cmd.sort) {
|
||||
try {
|
||||
this.s.cmd.sort = formattedOrderClause(this.s.cmd.sort);
|
||||
} catch(err) {
|
||||
return handleCallback(callback, err);
|
||||
}
|
||||
}
|
||||
|
||||
// Get the next object
|
||||
self.next(function(err, doc) {
|
||||
if(err && err.tailable && self.s.currentNumberOfRetries == 0) return callback(err);
|
||||
if(err && err.tailable && self.s.currentNumberOfRetries > 0) {
|
||||
self.s.currentNumberOfRetries = self.s.currentNumberOfRetries - 1;
|
||||
return setTimeout(function() {
|
||||
self.nextObject(callback);
|
||||
}, self.s.tailableRetryInterval);
|
||||
}
|
||||
|
||||
self.s.state = Cursor.OPEN;
|
||||
if(err) return handleCallback(callback, err);
|
||||
handleCallback(callback, null, doc);
|
||||
});
|
||||
}
|
||||
|
||||
// Trampoline emptying the number of retrieved items
|
||||
// without incurring a nextTick operation
|
||||
var loop = function(self, callback) {
|
||||
// No more items we are done
|
||||
if(self.bufferedCount() == 0) return;
|
||||
// Get the next document
|
||||
self.next(callback);
|
||||
// Loop
|
||||
return loop;
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterates over all the documents for this cursor. As with **{cursor.toArray}**,
|
||||
* not all of the elements will be iterated if this cursor had been previouly accessed.
|
||||
* In that case, **{cursor.rewind}** can be used to reset the cursor. However, unlike
|
||||
* **{cursor.toArray}**, the cursor will only hold a maximum of batch size elements
|
||||
* at any given time if batch size is specified. Otherwise, the caller is responsible
|
||||
* for making sure that the entire result can fit the memory.
|
||||
* @method
|
||||
* @deprecated
|
||||
* @param {Cursor~resultCallback} callback The result callback.
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
Cursor.prototype.each = function(callback) {
|
||||
// Rewind cursor state
|
||||
this.rewind();
|
||||
// Set current cursor to INIT
|
||||
this.s.state = Cursor.INIT;
|
||||
// Run the query
|
||||
_each(this, callback);
|
||||
};
|
||||
|
||||
// Run the each loop
|
||||
var _each = function(self, callback) {
|
||||
if(!callback) throw new MongoError('callback is mandatory');
|
||||
if(self.isNotified()) return;
|
||||
if(self.s.state == Cursor.CLOSED || self.isDead()) {
|
||||
return handleCallback(callback, new MongoError("Cursor is closed"), null);
|
||||
}
|
||||
|
||||
if(self.s.state == Cursor.INIT) self.s.state = Cursor.OPEN;
|
||||
|
||||
// Define function to avoid global scope escape
|
||||
var fn = null;
|
||||
// Trampoline all the entries
|
||||
if(self.bufferedCount() > 0) {
|
||||
while(fn = loop(self, callback)) fn(self, callback);
|
||||
_each(self, callback);
|
||||
} else {
|
||||
self.next(function(err, item) {
|
||||
if(err) return handleCallback(callback, err);
|
||||
if(item == null) {
|
||||
self.s.state = Cursor.CLOSED;
|
||||
return handleCallback(callback, null, null);
|
||||
}
|
||||
|
||||
if(handleCallback(callback, null, item) == false) return;
|
||||
_each(self, callback);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The callback format for the forEach iterator method
|
||||
* @callback Cursor~iteratorCallback
|
||||
* @param {Object} doc An emitted document for the iterator
|
||||
*/
|
||||
|
||||
/**
|
||||
* The callback error format for the forEach iterator method
|
||||
* @callback Cursor~endCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Iterates over all the documents for this cursor using the iterator, callback pattern.
|
||||
* @method
|
||||
* @param {Cursor~iteratorCallback} iterator The iteration callback.
|
||||
* @param {Cursor~endCallback} callback The end callback.
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
Cursor.prototype.forEach = function(iterator, callback) {
|
||||
this.each(function(err, doc){
|
||||
if(err) { callback(err); return false; }
|
||||
if(doc != null) { iterator(doc); return true; }
|
||||
if(doc == null && callback) {
|
||||
var internalCallback = callback;
|
||||
callback = null;
|
||||
internalCallback(null);
|
||||
return false;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the ReadPreference for the cursor.
|
||||
* @method
|
||||
* @param {(string|ReadPreference)} readPreference The new read preference for the cursor.
|
||||
* @throws {MongoError}
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.setReadPreference = function(r) {
|
||||
if(this.s.state != Cursor.INIT) throw new MongoError('cannot change cursor readPreference after cursor has been accessed');
|
||||
if(r instanceof ReadPreference) {
|
||||
this.s.options.readPreference = new CoreReadPreference(r.mode, r.tags);
|
||||
} else {
|
||||
this.s.options.readPreference = new CoreReadPreference(r);
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback Cursor~toArrayResultCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {object[]} documents All the documents the satisfy the cursor.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Returns an array of documents. The caller is responsible for making sure that there
|
||||
* is enough memory to store the results. Note that the array only contain partial
|
||||
* results when this cursor had been previouly accessed. In that case,
|
||||
* cursor.rewind() can be used to reset the cursor.
|
||||
* @method
|
||||
* @param {Cursor~toArrayResultCallback} callback The result callback.
|
||||
* @throws {MongoError}
|
||||
* @return {null}
|
||||
*/
|
||||
Cursor.prototype.toArray = function(callback) {
|
||||
var self = this;
|
||||
if(!callback) throw new MongoError('callback is mandatory');
|
||||
if(self.s.options.tailable) return handleCallback(callback, new MongoError("Tailable cursor cannot be converted to array"), null);
|
||||
var items = [];
|
||||
|
||||
// Reset cursor
|
||||
this.rewind();
|
||||
self.s.state = Cursor.INIT;
|
||||
|
||||
// Fetch all the documents
|
||||
var fetchDocs = function() {
|
||||
self.next(function(err, doc) {
|
||||
if(err) return handleCallback(callback, err);
|
||||
if(doc == null) {
|
||||
self.s.state = Cursor.CLOSED;
|
||||
return handleCallback(callback, null, items);
|
||||
}
|
||||
|
||||
// Add doc to items
|
||||
items.push(doc)
|
||||
// Get all buffered objects
|
||||
if(self.bufferedCount() > 0) {
|
||||
var a = self.readBufferedDocuments(self.bufferedCount())
|
||||
items = items.concat(a);
|
||||
}
|
||||
|
||||
// Attempt a fetch
|
||||
fetchDocs();
|
||||
})
|
||||
}
|
||||
|
||||
fetchDocs();
|
||||
}
|
||||
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback Cursor~countResultCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {number} count The count of documents.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Get the count of documents for this cursor
|
||||
* @method
|
||||
* @param {boolean} applySkipLimit Should the count command apply limit and skip settings on the cursor or in the passed in options.
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {number} [options.skip=null] The number of documents to skip.
|
||||
* @param {number} [options.limit=null] The maximum amounts to count before aborting.
|
||||
* @param {number} [options.maxTimeMS=null] Number of miliseconds to wait before aborting the query.
|
||||
* @param {string} [options.hint=null] An index name hint for the query.
|
||||
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
|
||||
* @param {Cursor~countResultCallback} callback The result callback.
|
||||
* @return {null}
|
||||
*/
|
||||
Cursor.prototype.count = function(applySkipLimit, opts, callback) {
|
||||
var self = this;
|
||||
if(typeof opts == 'function') callback = opts, opts = {};
|
||||
opts = opts || {};
|
||||
if(self.s.cmd.query == null) callback(new MongoError("count can only be used with find command"));
|
||||
if(typeof applySkipLimit == 'function') {
|
||||
callback = applySkipLimit;
|
||||
applySkipLimit = true;
|
||||
}
|
||||
|
||||
var opts = {};
|
||||
if(applySkipLimit) {
|
||||
if(typeof this.cursorSkip() == 'number') opts.skip = this.cursorSkip();
|
||||
if(typeof this.cursorLimit() == 'number') opts.limit = this.cursorLimit();
|
||||
}
|
||||
|
||||
// Command
|
||||
|
||||
var delimiter = self.s.ns.indexOf('.');
|
||||
|
||||
var command = {
|
||||
'count': self.s.ns.substr(delimiter+1), 'query': self.s.cmd.query
|
||||
}
|
||||
|
||||
// If maxTimeMS set
|
||||
if(typeof maxTimeMS == 'number') {
|
||||
command.maxTimeMS = self.s.maxTimeMS;
|
||||
}
|
||||
|
||||
// Get a server
|
||||
var server = self.s.topology.getServer(opts);
|
||||
// Get a connection
|
||||
var connection = self.s.topology.getConnection(opts);
|
||||
// Get the callbacks
|
||||
var callbacks = server.getCallbacks();
|
||||
|
||||
// Merge in any options
|
||||
if(opts.skip) command.skip = opts.skip;
|
||||
if(opts.limit) command.limit = opts.limit;
|
||||
if(self.s.options.hint) command.hint = self.s.options.hint;
|
||||
|
||||
// Build Query object
|
||||
var query = new Query(self.s.bson, f("%s.$cmd", self.s.ns.substr(0, delimiter)), command, {
|
||||
numberToSkip: 0, numberToReturn: -1
|
||||
, checkKeys: false
|
||||
});
|
||||
|
||||
// Set up callback
|
||||
callbacks.register(query.requestId, function(err, result) {
|
||||
if(err) return handleCallback(callback, err);
|
||||
if(result.documents.length == 1
|
||||
&& (result.documents[0].errmsg
|
||||
|| result.documents[0].err
|
||||
|| result.documents[0]['$err'])) return callback(MongoError.create(result.documents[0]));
|
||||
handleCallback(callback, null, result.documents[0].n);
|
||||
});
|
||||
|
||||
// Write the initial command out
|
||||
connection.write(query.toBin());
|
||||
};
|
||||
|
||||
/**
|
||||
* Close the cursor, sending a KillCursor command and emitting close.
|
||||
* @method
|
||||
* @param {Cursor~resultCallback} [callback] The result callback.
|
||||
* @return {null}
|
||||
*/
|
||||
Cursor.prototype.close = function(callback) {
|
||||
this.s.state = Cursor.CLOSED;
|
||||
// Kill the cursor
|
||||
this.kill();
|
||||
// Emit the close event for the cursor
|
||||
this.emit('close');
|
||||
// Callback if provided
|
||||
if(callback) return handleCallback(callback, null, this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Is the cursor closed
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
Cursor.prototype.isClosed = function() {
|
||||
return this.isDead();
|
||||
}
|
||||
|
||||
Cursor.prototype.destroy = function(err) {
|
||||
this.pause();
|
||||
this.close();
|
||||
if(err) this.emit('error', err);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a modified Readable stream including a possible transform method.
|
||||
* @method
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {function} [options.transform=null] A transformation method applied to each document emitted by the stream.
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.stream = function(options) {
|
||||
this.s.streamOptions = options || {};
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the explain for the cursor
|
||||
* @method
|
||||
* @param {Cursor~resultCallback} [callback] The result callback.
|
||||
* @return {null}
|
||||
*/
|
||||
Cursor.prototype.explain = function(callback) {
|
||||
this.s.cmd.explain = true;
|
||||
this.next(callback);
|
||||
}
|
||||
|
||||
Cursor.prototype._read = function(n) {
|
||||
var self = this;
|
||||
if(self.s.state == Cursor.CLOSED || self.isDead()) {
|
||||
return self.push(null);
|
||||
}
|
||||
|
||||
// Get the next item
|
||||
self.nextObject(function(err, result) {
|
||||
if(err) {
|
||||
if(!self.isDead()) self.close();
|
||||
if(self.listeners('error') && self.listeners('error').length > 0) {
|
||||
self.emit('error', err);
|
||||
}
|
||||
|
||||
// Emit end event
|
||||
return self.emit('end');
|
||||
}
|
||||
|
||||
// If we provided a transformation method
|
||||
if(typeof self.s.streamOptions.transform == 'function' && result != null) {
|
||||
return self.push(self.s.streamOptions.transform(result));
|
||||
}
|
||||
|
||||
// Return the result
|
||||
self.push(result);
|
||||
});
|
||||
}
|
||||
|
||||
Object.defineProperty(Cursor.prototype, 'namespace', {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
if (!this || !this.s) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// TODO: refactor this logic into core
|
||||
var ns = this.s.ns || '';
|
||||
var firstDot = ns.indexOf('.');
|
||||
if (firstDot < 0) {
|
||||
return {
|
||||
database: this.s.ns,
|
||||
collection: ''
|
||||
};
|
||||
}
|
||||
return {
|
||||
database: ns.substr(0, firstDot),
|
||||
collection: ns.substr(firstDot + 1)
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* The read() method pulls some data out of the internal buffer and returns it. If there is no data available, then it will return null.
|
||||
* @function external:Readable#read
|
||||
* @param {number} size Optional argument to specify how much data to read.
|
||||
* @return {(String | Buffer | null)}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Call this function to cause the stream to return strings of the specified encoding instead of Buffer objects.
|
||||
* @function external:Readable#setEncoding
|
||||
* @param {string} encoding The encoding to use.
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* This method will cause the readable stream to resume emitting data events.
|
||||
* @function external:Readable#resume
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* This method will cause a stream in flowing-mode to stop emitting data events. Any data that becomes available will remain in the internal buffer.
|
||||
* @function external:Readable#pause
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* This method pulls all the data out of a readable stream, and writes it to the supplied destination, automatically managing the flow so that the destination is not overwhelmed by a fast readable stream.
|
||||
* @function external:Readable#pipe
|
||||
* @param {Writable} destination The destination for writing data
|
||||
* @param {object} [options] Pipe options
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* This method will remove the hooks set up for a previous pipe() call.
|
||||
* @function external:Readable#unpipe
|
||||
* @param {Writable} [destination] The destination for writing data
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* This is useful in certain cases where a stream is being consumed by a parser, which needs to "un-consume" some data that it has optimistically pulled out of the source, so that the stream can be passed on to some other party.
|
||||
* @function external:Readable#unshift
|
||||
* @param {(Buffer|string)} chunk Chunk of data to unshift onto the read queue.
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Versions of Node prior to v0.10 had streams that did not implement the entire Streams API as it is today. (See "Compatibility" below for more information.)
|
||||
* @function external:Readable#wrap
|
||||
* @param {Stream} stream An "old style" readable stream.
|
||||
* @return {null}
|
||||
*/
|
||||
|
||||
Cursor.INIT = 0;
|
||||
Cursor.OPEN = 1;
|
||||
Cursor.CLOSED = 2;
|
||||
Cursor.GET_MORE = 3;
|
||||
|
||||
module.exports = Cursor;
|
||||
1352
server/node_modules/mongodb/lib/db.js
generated
vendored
Executable file
1352
server/node_modules/mongodb/lib/db.js
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
236
server/node_modules/mongodb/lib/gridfs/chunk.js
generated
vendored
Executable file
236
server/node_modules/mongodb/lib/gridfs/chunk.js
generated
vendored
Executable file
@@ -0,0 +1,236 @@
|
||||
"use strict";
|
||||
|
||||
var Binary = require('mongodb-core').BSON.Binary,
|
||||
ObjectID = require('mongodb-core').BSON.ObjectID;
|
||||
|
||||
/**
|
||||
* Class for representing a single chunk in GridFS.
|
||||
*
|
||||
* @class
|
||||
*
|
||||
* @param file {GridStore} The {@link GridStore} object holding this chunk.
|
||||
* @param mongoObject {object} The mongo object representation of this chunk.
|
||||
*
|
||||
* @throws Error when the type of data field for {@link mongoObject} is not
|
||||
* supported. Currently supported types for data field are instances of
|
||||
* {@link String}, {@link Array}, {@link Binary} and {@link Binary}
|
||||
* from the bson module
|
||||
*
|
||||
* @see Chunk#buildMongoObject
|
||||
*/
|
||||
var Chunk = function(file, mongoObject, writeConcern) {
|
||||
if(!(this instanceof Chunk)) return new Chunk(file, mongoObject);
|
||||
|
||||
this.file = file;
|
||||
var self = this;
|
||||
var mongoObjectFinal = mongoObject == null ? {} : mongoObject;
|
||||
this.writeConcern = writeConcern || {w:1};
|
||||
this.objectId = mongoObjectFinal._id == null ? new ObjectID() : mongoObjectFinal._id;
|
||||
this.chunkNumber = mongoObjectFinal.n == null ? 0 : mongoObjectFinal.n;
|
||||
this.data = new Binary();
|
||||
|
||||
if(mongoObjectFinal.data == null) {
|
||||
} else if(typeof mongoObjectFinal.data == "string") {
|
||||
var buffer = new Buffer(mongoObjectFinal.data.length);
|
||||
buffer.write(mongoObjectFinal.data, 'binary', 0);
|
||||
this.data = new Binary(buffer);
|
||||
} else if(Array.isArray(mongoObjectFinal.data)) {
|
||||
var buffer = new Buffer(mongoObjectFinal.data.length);
|
||||
buffer.write(mongoObjectFinal.data.join(''), 'binary', 0);
|
||||
this.data = new Binary(buffer);
|
||||
} else if(mongoObjectFinal.data._bsontype === 'Binary') {
|
||||
this.data = mongoObjectFinal.data;
|
||||
} else if(Buffer.isBuffer(mongoObjectFinal.data)) {
|
||||
} else {
|
||||
throw Error("Illegal chunk format");
|
||||
}
|
||||
|
||||
// Update position
|
||||
this.internalPosition = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Writes a data to this object and advance the read/write head.
|
||||
*
|
||||
* @param data {string} the data to write
|
||||
* @param callback {function(*, GridStore)} This will be called after executing
|
||||
* this method. The first parameter will contain null and the second one
|
||||
* will contain a reference to this object.
|
||||
*/
|
||||
Chunk.prototype.write = function(data, callback) {
|
||||
this.data.write(data, this.internalPosition);
|
||||
this.internalPosition = this.data.length();
|
||||
if(callback != null) return callback(null, this);
|
||||
return this;
|
||||
};
|
||||
|
||||
/**
|
||||
* Reads data and advances the read/write head.
|
||||
*
|
||||
* @param length {number} The length of data to read.
|
||||
*
|
||||
* @return {string} The data read if the given length will not exceed the end of
|
||||
* the chunk. Returns an empty String otherwise.
|
||||
*/
|
||||
Chunk.prototype.read = function(length) {
|
||||
// Default to full read if no index defined
|
||||
length = length == null || length == 0 ? this.length() : length;
|
||||
|
||||
if(this.length() - this.internalPosition + 1 >= length) {
|
||||
var data = this.data.read(this.internalPosition, length);
|
||||
this.internalPosition = this.internalPosition + length;
|
||||
return data;
|
||||
} else {
|
||||
return '';
|
||||
}
|
||||
};
|
||||
|
||||
Chunk.prototype.readSlice = function(length) {
|
||||
if ((this.length() - this.internalPosition) >= length) {
|
||||
var data = null;
|
||||
if (this.data.buffer != null) { //Pure BSON
|
||||
data = this.data.buffer.slice(this.internalPosition, this.internalPosition + length);
|
||||
} else { //Native BSON
|
||||
data = new Buffer(length);
|
||||
length = this.data.readInto(data, this.internalPosition);
|
||||
}
|
||||
this.internalPosition = this.internalPosition + length;
|
||||
return data;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks if the read/write head is at the end.
|
||||
*
|
||||
* @return {boolean} Whether the read/write head has reached the end of this
|
||||
* chunk.
|
||||
*/
|
||||
Chunk.prototype.eof = function() {
|
||||
return this.internalPosition == this.length() ? true : false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Reads one character from the data of this chunk and advances the read/write
|
||||
* head.
|
||||
*
|
||||
* @return {string} a single character data read if the the read/write head is
|
||||
* not at the end of the chunk. Returns an empty String otherwise.
|
||||
*/
|
||||
Chunk.prototype.getc = function() {
|
||||
return this.read(1);
|
||||
};
|
||||
|
||||
/**
|
||||
* Clears the contents of the data in this chunk and resets the read/write head
|
||||
* to the initial position.
|
||||
*/
|
||||
Chunk.prototype.rewind = function() {
|
||||
this.internalPosition = 0;
|
||||
this.data = new Binary();
|
||||
};
|
||||
|
||||
/**
|
||||
* Saves this chunk to the database. Also overwrites existing entries having the
|
||||
* same id as this chunk.
|
||||
*
|
||||
* @param callback {function(*, GridStore)} This will be called after executing
|
||||
* this method. The first parameter will contain null and the second one
|
||||
* will contain a reference to this object.
|
||||
*/
|
||||
Chunk.prototype.save = function(options, callback) {
|
||||
var self = this;
|
||||
if(typeof options == 'function') {
|
||||
callback = options;
|
||||
options = {};
|
||||
}
|
||||
|
||||
self.file.chunkCollection(function(err, collection) {
|
||||
if(err) return callback(err);
|
||||
|
||||
// Merge the options
|
||||
var writeOptions = {};
|
||||
for(var name in options) writeOptions[name] = options[name];
|
||||
for(var name in self.writeConcern) writeOptions[name] = self.writeConcern[name];
|
||||
|
||||
// collection.remove({'_id':self.objectId}, self.writeConcern, function(err, result) {
|
||||
collection.remove({'_id':self.objectId}, writeOptions, function(err, result) {
|
||||
if(err) return callback(err);
|
||||
|
||||
if(self.data.length() > 0) {
|
||||
self.buildMongoObject(function(mongoObject) {
|
||||
var options = {forceServerObjectId:true};
|
||||
for(var name in self.writeConcern) {
|
||||
options[name] = self.writeConcern[name];
|
||||
}
|
||||
|
||||
collection.insert(mongoObject, writeOptions, function(err, collection) {
|
||||
callback(err, self);
|
||||
});
|
||||
});
|
||||
} else {
|
||||
callback(null, self);
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a mongoDB object representation of this chunk.
|
||||
*
|
||||
* @param callback {function(Object)} This will be called after executing this
|
||||
* method. The object will be passed to the first parameter and will have
|
||||
* the structure:
|
||||
*
|
||||
* <pre><code>
|
||||
* {
|
||||
* '_id' : , // {number} id for this chunk
|
||||
* 'files_id' : , // {number} foreign key to the file collection
|
||||
* 'n' : , // {number} chunk number
|
||||
* 'data' : , // {bson#Binary} the chunk data itself
|
||||
* }
|
||||
* </code></pre>
|
||||
*
|
||||
* @see <a href="http://www.mongodb.org/display/DOCS/GridFS+Specification#GridFSSpecification-{{chunks}}">MongoDB GridFS Chunk Object Structure</a>
|
||||
*/
|
||||
Chunk.prototype.buildMongoObject = function(callback) {
|
||||
var mongoObject = {
|
||||
'files_id': this.file.fileId,
|
||||
'n': this.chunkNumber,
|
||||
'data': this.data};
|
||||
// If we are saving using a specific ObjectId
|
||||
if(this.objectId != null) mongoObject._id = this.objectId;
|
||||
|
||||
callback(mongoObject);
|
||||
};
|
||||
|
||||
/**
|
||||
* @return {number} the length of the data
|
||||
*/
|
||||
Chunk.prototype.length = function() {
|
||||
return this.data.length();
|
||||
};
|
||||
|
||||
/**
|
||||
* The position of the read/write head
|
||||
* @name position
|
||||
* @lends Chunk#
|
||||
* @field
|
||||
*/
|
||||
Object.defineProperty(Chunk.prototype, "position", { enumerable: true
|
||||
, get: function () {
|
||||
return this.internalPosition;
|
||||
}
|
||||
, set: function(value) {
|
||||
this.internalPosition = value;
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* The default chunk size
|
||||
* @constant
|
||||
*/
|
||||
Chunk.DEFAULT_CHUNK_SIZE = 1024 * 255;
|
||||
|
||||
module.exports = Chunk;
|
||||
1582
server/node_modules/mongodb/lib/gridfs/grid_store.js
generated
vendored
Executable file
1582
server/node_modules/mongodb/lib/gridfs/grid_store.js
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
413
server/node_modules/mongodb/lib/mongo_client.js
generated
vendored
Executable file
413
server/node_modules/mongodb/lib/mongo_client.js
generated
vendored
Executable file
@@ -0,0 +1,413 @@
|
||||
"use strict";
|
||||
|
||||
var parse = require('./url_parser')
|
||||
, Server = require('./server')
|
||||
, Mongos = require('./mongos')
|
||||
, ReplSet = require('./replset')
|
||||
, ReadPreference = require('./read_preference')
|
||||
, Db = require('./db');
|
||||
|
||||
/**
|
||||
* @fileOverview The **MongoClient** class is a class that allows for making Connections to MongoDB.
|
||||
*
|
||||
* @example
|
||||
* var MongoClient = require('mongodb').MongoClient,
|
||||
* test = require('assert');
|
||||
* // Connection url
|
||||
* var url = 'mongodb://localhost:27017/test';
|
||||
* // Connect using MongoClient
|
||||
* MongoClient.connect(url, function(err, db) {
|
||||
* // Get an additional db
|
||||
* db.close();
|
||||
* });
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates a new MongoClient instance
|
||||
* @class
|
||||
* @return {MongoClient} a MongoClient instance.
|
||||
*/
|
||||
function MongoClient() {
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback MongoClient~connectCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {Db} db The connected database.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Connect to MongoDB using a url as documented at
|
||||
*
|
||||
* docs.mongodb.org/manual/reference/connection-string/
|
||||
*
|
||||
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
|
||||
*
|
||||
* @method
|
||||
* @param {string} url The connection URI string
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {boolean} [options.uri_decode_auth=false] Uri decode the user name and password for authentication
|
||||
* @param {object} [options.db=null] A hash of options to set on the db object, see **Db constructor**
|
||||
* @param {object} [options.server=null] A hash of options to set on the server objects, see **Server** constructor**
|
||||
* @param {object} [options.replSet=null] A hash of options to set on the replSet object, see **ReplSet** constructor**
|
||||
* @param {object} [options.mongos=null] A hash of options to set on the mongos object, see **Mongos** constructor**
|
||||
* @param {MongoClient~connectCallback} callback The command result callback
|
||||
* @return {null}
|
||||
*/
|
||||
this.connect = MongoClient.connect;
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to MongoDB using a url as documented at
|
||||
*
|
||||
* docs.mongodb.org/manual/reference/connection-string/
|
||||
*
|
||||
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
|
||||
*
|
||||
* @method
|
||||
* @static
|
||||
* @param {string} url The connection URI string
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {boolean} [options.uri_decode_auth=false] Uri decode the user name and password for authentication
|
||||
* @param {object} [options.db=null] A hash of options to set on the db object, see **Db constructor**
|
||||
* @param {object} [options.server=null] A hash of options to set on the server objects, see **Server** constructor**
|
||||
* @param {object} [options.replSet=null] A hash of options to set on the replSet object, see **ReplSet** constructor**
|
||||
* @param {object} [options.mongos=null] A hash of options to set on the mongos object, see **Mongos** constructor**
|
||||
* @param {MongoClient~connectCallback} callback The command result callback
|
||||
* @return {null}
|
||||
*/
|
||||
MongoClient.connect = function(url, options, callback) {
|
||||
var args = Array.prototype.slice.call(arguments, 1);
|
||||
callback = typeof args[args.length - 1] == 'function' ? args.pop() : null;
|
||||
options = args.length ? args.shift() : null;
|
||||
options = options || {};
|
||||
|
||||
// Set default empty server options
|
||||
var serverOptions = options.server || {};
|
||||
var mongosOptions = options.mongos || {};
|
||||
var replSetServersOptions = options.replSet || options.replSetServers || {};
|
||||
var dbOptions = options.db || {};
|
||||
|
||||
// If callback is null throw an exception
|
||||
if(callback == null)
|
||||
throw new Error("no callback function provided");
|
||||
|
||||
// Parse the string
|
||||
var object = parse(url, options);
|
||||
|
||||
// Merge in any options for db in options object
|
||||
if(dbOptions) {
|
||||
for(var name in dbOptions) object.db_options[name] = dbOptions[name];
|
||||
}
|
||||
|
||||
// Added the url to the options
|
||||
object.db_options.url = url;
|
||||
|
||||
// Merge in any options for server in options object
|
||||
if(serverOptions) {
|
||||
for(var name in serverOptions) object.server_options[name] = serverOptions[name];
|
||||
}
|
||||
|
||||
// Merge in any replicaset server options
|
||||
if(replSetServersOptions) {
|
||||
for(var name in replSetServersOptions) object.rs_options[name] = replSetServersOptions[name];
|
||||
}
|
||||
|
||||
if(replSetServersOptions.ssl
|
||||
|| replSetServersOptions.sslValidate
|
||||
|| replSetServersOptions.sslCA
|
||||
|| replSetServersOptions.sslCert
|
||||
|| replSetServersOptions.sslKey
|
||||
|| replSetServersOptions.sslPass) {
|
||||
object.server_options.ssl = replSetServersOptions.ssl;
|
||||
object.server_options.sslValidate = replSetServersOptions.sslValidate;
|
||||
object.server_options.sslCA = replSetServersOptions.sslCA;
|
||||
object.server_options.sslCert = replSetServersOptions.sslCert;
|
||||
object.server_options.sslKey = replSetServersOptions.sslKey;
|
||||
object.server_options.sslPass = replSetServersOptions.sslPass;
|
||||
}
|
||||
|
||||
// Merge in any replicaset server options
|
||||
if(mongosOptions) {
|
||||
for(var name in mongosOptions) object.mongos_options[name] = mongosOptions[name];
|
||||
}
|
||||
|
||||
if(typeof object.server_options.poolSize == 'number') {
|
||||
if(!object.mongos_options.poolSize) object.mongos_options.poolSize = object.server_options.poolSize;
|
||||
if(!object.rs_options.poolSize) object.rs_options.poolSize = object.server_options.poolSize;
|
||||
}
|
||||
|
||||
if(mongosOptions.ssl
|
||||
|| mongosOptions.sslValidate
|
||||
|| mongosOptions.sslCA
|
||||
|| mongosOptions.sslCert
|
||||
|| mongosOptions.sslKey
|
||||
|| mongosOptions.sslPass) {
|
||||
object.server_options.ssl = mongosOptions.ssl;
|
||||
object.server_options.sslValidate = mongosOptions.sslValidate;
|
||||
object.server_options.sslCA = mongosOptions.sslCA;
|
||||
object.server_options.sslCert = mongosOptions.sslCert;
|
||||
object.server_options.sslKey = mongosOptions.sslKey;
|
||||
object.server_options.sslPass = mongosOptions.sslPass;
|
||||
}
|
||||
|
||||
// We need to ensure that the list of servers are only either direct members or mongos
|
||||
// they cannot be a mix of monogs and mongod's
|
||||
var totalNumberOfServers = object.servers.length;
|
||||
var totalNumberOfMongosServers = 0;
|
||||
var totalNumberOfMongodServers = 0;
|
||||
var serverConfig = null;
|
||||
var errorServers = {};
|
||||
|
||||
// Failure modes
|
||||
if(object.servers.length == 0) throw new Error("connection string must contain at least one seed host");
|
||||
|
||||
// If we have no db setting for the native parser try to set the c++ one first
|
||||
object.db_options.native_parser = _setNativeParser(object.db_options);
|
||||
// If no auto_reconnect is set, set it to true as default for single servers
|
||||
if(typeof object.server_options.auto_reconnect != 'boolean') {
|
||||
object.server_options.auto_reconnect = true;
|
||||
}
|
||||
|
||||
// If we have more than a server, it could be replicaset or mongos list
|
||||
// need to verify that it's one or the other and fail if it's a mix
|
||||
// Connect to all servers and run ismaster
|
||||
for(var i = 0; i < object.servers.length; i++) {
|
||||
// Set up socket options
|
||||
var providedSocketOptions = object.server_options.socketOptions || {};
|
||||
|
||||
var _server_options = {
|
||||
poolSize:1
|
||||
, socketOptions: {
|
||||
connectTimeoutMS: providedSocketOptions.connectTimeoutMS || 30000
|
||||
, socketTimeoutMS: providedSocketOptions.socketTimeoutMS || 30000
|
||||
}
|
||||
, auto_reconnect:false};
|
||||
|
||||
// Ensure we have ssl setup for the servers
|
||||
if(object.server_options.ssl) {
|
||||
_server_options.ssl = object.server_options.ssl;
|
||||
_server_options.sslValidate = object.server_options.sslValidate;
|
||||
_server_options.sslCA = object.server_options.sslCA;
|
||||
_server_options.sslCert = object.server_options.sslCert;
|
||||
_server_options.sslKey = object.server_options.sslKey;
|
||||
_server_options.sslPass = object.server_options.sslPass;
|
||||
} else if(object.rs_options.ssl) {
|
||||
_server_options.ssl = object.rs_options.ssl;
|
||||
_server_options.sslValidate = object.rs_options.sslValidate;
|
||||
_server_options.sslCA = object.rs_options.sslCA;
|
||||
_server_options.sslCert = object.rs_options.sslCert;
|
||||
_server_options.sslKey = object.rs_options.sslKey;
|
||||
_server_options.sslPass = object.rs_options.sslPass;
|
||||
}
|
||||
|
||||
// Error
|
||||
var error = null;
|
||||
// Set up the Server object
|
||||
var _server = object.servers[i].domain_socket
|
||||
? new Server(object.servers[i].domain_socket, _server_options)
|
||||
: new Server(object.servers[i].host, object.servers[i].port, _server_options);
|
||||
|
||||
var setName;
|
||||
|
||||
var connectFunction = function(__server) {
|
||||
// Attempt connect
|
||||
new Db(object.dbName, __server, {w:1, native_parser:false}).open(function(err, db) {
|
||||
// Update number of servers
|
||||
totalNumberOfServers = totalNumberOfServers - 1;
|
||||
// If no error do the correct checks
|
||||
if(!err) {
|
||||
// Close the connection
|
||||
db.close();
|
||||
var isMasterDoc = db.serverConfig.isMasterDoc;
|
||||
// Check what type of server we have
|
||||
if(isMasterDoc.setName) {
|
||||
totalNumberOfMongodServers++;
|
||||
setName = isMasterDoc.setName;
|
||||
}
|
||||
if(isMasterDoc.msg && isMasterDoc.msg == "isdbgrid") totalNumberOfMongosServers++;
|
||||
} else {
|
||||
error = err;
|
||||
errorServers[__server.host + ":" + __server.port] = __server;
|
||||
}
|
||||
|
||||
if(totalNumberOfServers == 0) {
|
||||
// Error out
|
||||
if(totalNumberOfMongodServers == 0 && totalNumberOfMongosServers == 0 && error) {
|
||||
return callback(error, null);
|
||||
}
|
||||
|
||||
// If we have a mix of mongod and mongos, throw an error
|
||||
if(totalNumberOfMongosServers > 0 && totalNumberOfMongodServers > 0) {
|
||||
if(db) db.close();
|
||||
return process.nextTick(function() {
|
||||
try {
|
||||
callback(new Error("cannot combine a list of replicaset seeds and mongos seeds"));
|
||||
} catch (err) {
|
||||
throw err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if(totalNumberOfMongodServers == 0
|
||||
&& totalNumberOfMongosServers == 0
|
||||
&& object.servers.length == 1) {
|
||||
var obj = object.servers[0];
|
||||
serverConfig = obj.domain_socket ?
|
||||
new Server(obj.domain_socket, object.server_options)
|
||||
: new Server(obj.host, obj.port, object.server_options);
|
||||
} else if(totalNumberOfMongodServers > 0 || totalNumberOfMongosServers > 0) {
|
||||
var finalServers = object.servers
|
||||
.filter(function(serverObj) {
|
||||
return errorServers[serverObj.host + ":" + serverObj.port] == null;
|
||||
})
|
||||
.map(function(serverObj) {
|
||||
return new Server(serverObj.host, serverObj.port, object.server_options);
|
||||
});
|
||||
// Clean out any error servers
|
||||
errorServers = {};
|
||||
// Set up the final configuration
|
||||
if(totalNumberOfMongodServers > 0) {
|
||||
try {
|
||||
if (totalNumberOfMongodServers == 1) {
|
||||
object.rs_options.replicaSet = object.rs_options.replicaSet || setName;
|
||||
}
|
||||
serverConfig = new ReplSet(finalServers, object.rs_options);
|
||||
} catch(err) {
|
||||
return callback(err, null);
|
||||
}
|
||||
} else {
|
||||
serverConfig = new Mongos(finalServers, object.mongos_options);
|
||||
}
|
||||
}
|
||||
|
||||
if(serverConfig == null) {
|
||||
return process.nextTick(function() {
|
||||
try {
|
||||
callback(new Error("Could not locate any valid servers in initial seed list"));
|
||||
} catch (err) {
|
||||
if(db) db.close();
|
||||
throw err
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Ensure no firing of open event before we are ready
|
||||
serverConfig.emitOpen = false;
|
||||
// Set up all options etc and connect to the database
|
||||
_finishConnecting(serverConfig, object, options, callback)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Wrap the context of the call
|
||||
connectFunction(_server);
|
||||
}
|
||||
}
|
||||
|
||||
var _setNativeParser = function(db_options) {
|
||||
if(typeof db_options.native_parser == 'boolean') return db_options.native_parser;
|
||||
|
||||
try {
|
||||
require('mongodb-core').BSON.BSONNative.BSON;
|
||||
return true;
|
||||
} catch(err) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
var _finishConnecting = function(serverConfig, object, options, callback) {
|
||||
// If we have a readPreference passed in by the db options
|
||||
if(typeof object.db_options.readPreference == 'string') {
|
||||
object.db_options.readPreference = new ReadPreference(object.db_options.readPreference);
|
||||
} else if(typeof object.db_options.read_preference == 'string') {
|
||||
object.db_options.readPreference = new ReadPreference(object.db_options.read_preference);
|
||||
}
|
||||
|
||||
// Do we have readPreference tags
|
||||
if(object.db_options.readPreference && object.db_options.readPreferenceTags) {
|
||||
object.db_options.readPreference.tags = object.db_options.readPreferenceTags;
|
||||
} else if(object.db_options.readPreference && object.db_options.read_preference_tags) {
|
||||
object.db_options.readPreference.tags = object.db_options.read_preference_tags;
|
||||
}
|
||||
|
||||
// Get the socketTimeoutMS
|
||||
var socketTimeoutMS = object.server_options.socketOptions.socketTimeoutMS || 0;
|
||||
|
||||
// If we have a replset, override with replicaset socket timeout option if available
|
||||
if(serverConfig instanceof ReplSet) {
|
||||
socketTimeoutMS = object.rs_options.socketOptions.socketTimeoutMS || socketTimeoutMS;
|
||||
}
|
||||
|
||||
// Set socketTimeout to the same as the connectTimeoutMS or 30 sec
|
||||
serverConfig.connectTimeoutMS = serverConfig.connectTimeoutMS || 30000;
|
||||
serverConfig.socketTimeoutMS = serverConfig.connectTimeoutMS;
|
||||
|
||||
// Set up the db options
|
||||
var db = new Db(object.dbName, serverConfig, object.db_options);
|
||||
// Open the db
|
||||
db.open(function(err, db){
|
||||
|
||||
if(err) {
|
||||
return process.nextTick(function() {
|
||||
try {
|
||||
callback(err, null);
|
||||
} catch (err) {
|
||||
if(db) db.close();
|
||||
throw err
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Reset the socket timeout
|
||||
serverConfig.socketTimeoutMS = socketTimeoutMS || 0;
|
||||
|
||||
// Return object
|
||||
if(err == null && object.auth){
|
||||
// What db to authenticate against
|
||||
var authentication_db = db;
|
||||
if(object.db_options && object.db_options.authSource) {
|
||||
authentication_db = db.db(object.db_options.authSource);
|
||||
}
|
||||
|
||||
// Build options object
|
||||
var options = {};
|
||||
if(object.db_options.authMechanism) options.authMechanism = object.db_options.authMechanism;
|
||||
if(object.db_options.gssapiServiceName) options.gssapiServiceName = object.db_options.gssapiServiceName;
|
||||
|
||||
// Authenticate
|
||||
authentication_db.authenticate(object.auth.user, object.auth.password, options, function(err, success){
|
||||
if(success){
|
||||
process.nextTick(function() {
|
||||
try {
|
||||
callback(null, db);
|
||||
} catch (err) {
|
||||
if(db) db.close();
|
||||
throw err
|
||||
}
|
||||
});
|
||||
} else {
|
||||
if(db) db.close();
|
||||
process.nextTick(function() {
|
||||
try {
|
||||
callback(err ? err : new Error('Could not authenticate user ' + object.auth[0]), null);
|
||||
} catch (err) {
|
||||
if(db) db.close();
|
||||
throw err
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
} else {
|
||||
process.nextTick(function() {
|
||||
try {
|
||||
callback(err, db);
|
||||
} catch (err) {
|
||||
if(db) db.close();
|
||||
throw err
|
||||
}
|
||||
})
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = MongoClient
|
||||
454
server/node_modules/mongodb/lib/mongos.js
generated
vendored
Executable file
454
server/node_modules/mongodb/lib/mongos.js
generated
vendored
Executable file
@@ -0,0 +1,454 @@
|
||||
"use strict";
|
||||
|
||||
var EventEmitter = require('events').EventEmitter
|
||||
, inherits = require('util').inherits
|
||||
, f = require('util').format
|
||||
, ServerCapabilities = require('./topology_base').ServerCapabilities
|
||||
, MongoCR = require('mongodb-core').MongoCR
|
||||
, CMongos = require('mongodb-core').Mongos
|
||||
, Cursor = require('./cursor')
|
||||
, Server = require('./server')
|
||||
, Store = require('./topology_base').Store
|
||||
, shallowClone = require('./utils').shallowClone;
|
||||
|
||||
/**
|
||||
* @fileOverview The **Mongos** class is a class that represents a Mongos Proxy topology and is
|
||||
* used to construct connections.
|
||||
*
|
||||
* **Mongos Should not be used, use MongoClient.connect**
|
||||
* @example
|
||||
* var Db = require('mongodb').Db,
|
||||
* Mongos = require('mongodb').Mongos,
|
||||
* Server = require('mongodb').Server,
|
||||
* test = require('assert');
|
||||
* // Connect using Mongos
|
||||
* var server = new Server('localhost', 27017);
|
||||
* var db = new Db('test', new Mongos([server]));
|
||||
* db.open(function(err, db) {
|
||||
* // Get an additional db
|
||||
* db.close();
|
||||
* });
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates a new Mongos instance
|
||||
* @class
|
||||
* @deprecated
|
||||
* @param {Server[]} servers A seedlist of servers participating in the replicaset.
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {booelan} [options.ha=true] Turn on high availability monitoring.
|
||||
* @param {number} [options.haInterval=5000] Time between each replicaset status check.
|
||||
* @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons.
|
||||
* @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support)
|
||||
* @param {object} [options.sslValidate=true] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {array} [options.sslCA=null] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {(Buffer|string)} [options.sslCert=null] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {(Buffer|string)} [options.sslKey=null] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {(Buffer|string)} [options.sslPass=null] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {object} [options.socketOptions=null] Socket options
|
||||
* @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option.
|
||||
* @param {number} [options.socketOptions.keepAlive=0] TCP KeepAlive on the socket with a X ms delay before start.
|
||||
* @param {number} [options.socketOptions.connectTimeoutMS=0] TCP Connection timeout setting
|
||||
* @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting
|
||||
* @fires Mongos#connect
|
||||
* @fires Mongos#ha
|
||||
* @fires Mongos#joined
|
||||
* @fires Mongos#left
|
||||
* @fires Mongos#fullsetup
|
||||
* @fires Mongos#open
|
||||
* @fires Mongos#close
|
||||
* @fires Mongos#error
|
||||
* @fires Mongos#timeout
|
||||
* @fires Mongos#parseError
|
||||
* @return {Mongos} a Mongos instance.
|
||||
*/
|
||||
var Mongos = function(servers, options) {
|
||||
if(!(this instanceof Mongos)) return new Mongos(servers, options);
|
||||
options = options || {};
|
||||
var self = this;
|
||||
|
||||
// Ensure all the instances are Server
|
||||
for(var i = 0; i < servers.length; i++) {
|
||||
if(!(servers[i] instanceof Server)) {
|
||||
throw new MongoError("all seed list instances must be of the Server type");
|
||||
}
|
||||
}
|
||||
|
||||
// Store option defaults
|
||||
var storeOptions = {
|
||||
force: false
|
||||
, bufferMaxEntries: -1
|
||||
}
|
||||
|
||||
// Shared global store
|
||||
var store = options.store || new Store(self, storeOptions);
|
||||
|
||||
// Set up event emitter
|
||||
EventEmitter.call(this);
|
||||
|
||||
// Debug tag
|
||||
var tag = options.tag;
|
||||
|
||||
// Build seed list
|
||||
var seedlist = servers.map(function(x) {
|
||||
return {host: x.host, port: x.port}
|
||||
});
|
||||
|
||||
// Final options
|
||||
var finalOptions = shallowClone(options);
|
||||
|
||||
// Default values
|
||||
finalOptions.size = typeof options.poolSize == 'number' ? options.poolSize : 5;
|
||||
finalOptions.reconnect = typeof options.auto_reconnect == 'boolean' ? options.auto_reconnect : true;
|
||||
finalOptions.emitError = typeof options.emitError == 'boolean' ? options.emitError : true;
|
||||
finalOptions.cursorFactory = Cursor;
|
||||
|
||||
// Add the store
|
||||
finalOptions.disconnectHandler = store;
|
||||
|
||||
// Socket options passed down
|
||||
if(options.socketOptions) {
|
||||
if(options.socketOptions.connectTimeoutMS) {
|
||||
this.connectTimeoutMS = options.socketOptions.connectTimeoutMS;
|
||||
finalOptions.connectionTimeout = options.socketOptions.connectTimeoutMS;
|
||||
}
|
||||
if(options.socketOptions.socketTimeoutMS)
|
||||
finalOptions.socketTimeout = options.socketOptions.socketTimeoutMS;
|
||||
}
|
||||
|
||||
// Are we running in debug mode
|
||||
var debug = typeof options.debug == 'boolean' ? options.debug : false;
|
||||
if(debug) {
|
||||
finalOptions.debug = debug;
|
||||
}
|
||||
|
||||
// Map keep alive setting
|
||||
if(options.socketOptions && typeof options.socketOptions.keepAlive == 'number') {
|
||||
finalOptions.keepAlive = true;
|
||||
if(typeof options.socketOptions.keepAlive == 'number') {
|
||||
finalOptions.keepAliveInitialDelay = options.socketOptions.keepAlive;
|
||||
}
|
||||
}
|
||||
|
||||
// Connection timeout
|
||||
if(options.socketOptions && typeof options.socketOptions.connectionTimeout == 'number') {
|
||||
finalOptions.connectionTimeout = options.socketOptions.connectionTimeout;
|
||||
}
|
||||
|
||||
// Socket timeout
|
||||
if(options.socketOptions && typeof options.socketOptions.socketTimeout == 'number') {
|
||||
finalOptions.socketTimeout = options.socketOptions.socketTimeout;
|
||||
}
|
||||
|
||||
// noDelay
|
||||
if(options.socketOptions && typeof options.socketOptions.noDelay == 'boolean') {
|
||||
finalOptions.noDelay = options.socketOptions.noDelay;
|
||||
}
|
||||
|
||||
if(typeof options.secondaryAcceptableLatencyMS == 'number') {
|
||||
finalOptions.acceptableLatency = options.secondaryAcceptableLatencyMS;
|
||||
}
|
||||
|
||||
// Add the non connection store
|
||||
finalOptions.disconnectHandler = store;
|
||||
|
||||
// Create the Mongos
|
||||
var mongos = new CMongos(seedlist, finalOptions)
|
||||
// Server capabilities
|
||||
var sCapabilities = null;
|
||||
// Add auth prbufferMaxEntriesoviders
|
||||
mongos.addAuthProvider('mongocr', new MongoCR());
|
||||
|
||||
// Internal state
|
||||
this.s = {
|
||||
// Create the Mongos
|
||||
mongos: mongos
|
||||
// Server capabilities
|
||||
, sCapabilities: sCapabilities
|
||||
// Debug turned on
|
||||
, debug: debug
|
||||
// Store option defaults
|
||||
, storeOptions: storeOptions
|
||||
// Cloned options
|
||||
, clonedOptions: finalOptions
|
||||
// Actual store of callbacks
|
||||
, store: store
|
||||
// Options
|
||||
, options: options
|
||||
}
|
||||
|
||||
|
||||
// Last ismaster
|
||||
Object.defineProperty(this, 'isMasterDoc', {
|
||||
enumerable:true, get: function() { return self.s.mongos.lastIsMaster(); }
|
||||
});
|
||||
|
||||
// Last ismaster
|
||||
Object.defineProperty(this, 'numberOfConnectedServers', {
|
||||
enumerable:true, get: function() { return self.s.mongos.connectedServers().length; }
|
||||
});
|
||||
|
||||
// BSON property
|
||||
Object.defineProperty(this, 'bson', {
|
||||
enumerable: true, get: function() {
|
||||
return self.s.mongos.bson;
|
||||
}
|
||||
});
|
||||
|
||||
Object.defineProperty(this, 'haInterval', {
|
||||
enumerable:true, get: function() { return self.s.mongos.haInterval; }
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
inherits(Mongos, EventEmitter);
|
||||
|
||||
// Connect
|
||||
Mongos.prototype.connect = function(db, _options, callback) {
|
||||
var self = this;
|
||||
if('function' === typeof _options) callback = _options, _options = {};
|
||||
if(_options == null) _options = {};
|
||||
if(!('function' === typeof callback)) callback = null;
|
||||
self.s.options = _options;
|
||||
|
||||
// Update bufferMaxEntries
|
||||
self.s.storeOptions.bufferMaxEntries = db.bufferMaxEntries;
|
||||
|
||||
// Error handler
|
||||
var connectErrorHandler = function(event) {
|
||||
return function(err) {
|
||||
// Remove all event handlers
|
||||
var events = ['timeout', 'error', 'close'];
|
||||
events.forEach(function(e) {
|
||||
self.removeListener(e, connectErrorHandler);
|
||||
});
|
||||
|
||||
self.s.mongos.removeListener('connect', connectErrorHandler);
|
||||
|
||||
// Try to callback
|
||||
try {
|
||||
callback(err);
|
||||
} catch(err) {
|
||||
process.nextTick(function() { throw err; })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Actual handler
|
||||
var errorHandler = function(event) {
|
||||
return function(err) {
|
||||
if(event != 'error') {
|
||||
self.emit(event, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Error handler
|
||||
var reconnectHandler = function(err) {
|
||||
self.emit('reconnect');
|
||||
self.s.store.execute();
|
||||
}
|
||||
|
||||
// Connect handler
|
||||
var connectHandler = function() {
|
||||
// Clear out all the current handlers left over
|
||||
["timeout", "error", "close"].forEach(function(e) {
|
||||
self.s.mongos.removeAllListeners(e);
|
||||
});
|
||||
|
||||
// Set up listeners
|
||||
self.s.mongos.once('timeout', errorHandler('timeout'));
|
||||
self.s.mongos.once('error', errorHandler('error'));
|
||||
self.s.mongos.once('close', errorHandler('close'));
|
||||
|
||||
// relay the event
|
||||
var relay = function(event) {
|
||||
return function(t, server) {
|
||||
self.emit(event, t, server);
|
||||
}
|
||||
}
|
||||
|
||||
// Set up serverConfig listeners
|
||||
self.s.mongos.on('joined', relay('joined'));
|
||||
self.s.mongos.on('left', relay('left'));
|
||||
self.s.mongos.on('fullsetup', relay('fullsetup'));
|
||||
|
||||
// Emit open event
|
||||
self.emit('open', null, self);
|
||||
|
||||
// Return correctly
|
||||
try {
|
||||
callback(null, self);
|
||||
} catch(err) {
|
||||
process.nextTick(function() { throw err; })
|
||||
}
|
||||
}
|
||||
|
||||
// Set up listeners
|
||||
self.s.mongos.once('timeout', connectErrorHandler('timeout'));
|
||||
self.s.mongos.once('error', connectErrorHandler('error'));
|
||||
self.s.mongos.once('close', connectErrorHandler('close'));
|
||||
self.s.mongos.once('connect', connectHandler);
|
||||
// Reconnect server
|
||||
self.s.mongos.on('reconnect', reconnectHandler);
|
||||
|
||||
// Start connection
|
||||
self.s.mongos.connect(_options);
|
||||
}
|
||||
|
||||
Mongos.prototype.parserType = function() {
|
||||
return this.s.mongos.parserType();
|
||||
}
|
||||
|
||||
// Server capabilities
|
||||
Mongos.prototype.capabilities = function() {
|
||||
if(this.s.sCapabilities) return this.s.sCapabilities;
|
||||
this.s.sCapabilities = new ServerCapabilities(this.s.mongos.lastIsMaster());
|
||||
return this.s.sCapabilities;
|
||||
}
|
||||
|
||||
// Command
|
||||
Mongos.prototype.command = function(ns, cmd, options, callback) {
|
||||
this.s.mongos.command(ns, cmd, options, callback);
|
||||
}
|
||||
|
||||
// Insert
|
||||
Mongos.prototype.insert = function(ns, ops, options, callback) {
|
||||
this.s.mongos.insert(ns, ops, options, function(e, m) {
|
||||
callback(e, m)
|
||||
});
|
||||
}
|
||||
|
||||
// Update
|
||||
Mongos.prototype.update = function(ns, ops, options, callback) {
|
||||
this.s.mongos.update(ns, ops, options, callback);
|
||||
}
|
||||
|
||||
// Remove
|
||||
Mongos.prototype.remove = function(ns, ops, options, callback) {
|
||||
this.s.mongos.remove(ns, ops, options, callback);
|
||||
}
|
||||
|
||||
// IsConnected
|
||||
Mongos.prototype.isConnected = function() {
|
||||
return this.s.mongos.isConnected();
|
||||
}
|
||||
|
||||
// Insert
|
||||
Mongos.prototype.cursor = function(ns, cmd, options) {
|
||||
options.disconnectHandler = this.s.store;
|
||||
return this.s.mongos.cursor(ns, cmd, options);
|
||||
}
|
||||
|
||||
Mongos.prototype.setBSONParserType = function(type) {
|
||||
return this.s.mongos.setBSONParserType(type);
|
||||
}
|
||||
|
||||
Mongos.prototype.lastIsMaster = function() {
|
||||
return this.s.mongos.lastIsMaster();
|
||||
}
|
||||
|
||||
Mongos.prototype.close = function(forceClosed) {
|
||||
this.s.mongos.destroy();
|
||||
// We need to wash out all stored processes
|
||||
if(forceClosed == true) {
|
||||
this.s.storeOptions.force = forceClosed;
|
||||
this.s.store.flush();
|
||||
}
|
||||
}
|
||||
|
||||
Mongos.prototype.auth = function() {
|
||||
var args = Array.prototype.slice.call(arguments, 0);
|
||||
this.s.mongos.auth.apply(this.s.mongos, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* All raw connections
|
||||
* @method
|
||||
* @return {array}
|
||||
*/
|
||||
Mongos.prototype.connections = function() {
|
||||
return this.s.mongos.connections();
|
||||
}
|
||||
|
||||
/**
|
||||
* A mongos connect event, used to verify that the connection is up and running
|
||||
*
|
||||
* @event Mongos#connect
|
||||
* @type {Mongos}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The mongos high availability event
|
||||
*
|
||||
* @event Mongos#ha
|
||||
* @type {function}
|
||||
* @param {string} type The stage in the high availability event (start|end)
|
||||
* @param {boolean} data.norepeat This is a repeating high availability process or a single execution only
|
||||
* @param {number} data.id The id for this high availability request
|
||||
* @param {object} data.state An object containing the information about the current replicaset
|
||||
*/
|
||||
|
||||
/**
|
||||
* A server member left the mongos set
|
||||
*
|
||||
* @event Mongos#left
|
||||
* @type {function}
|
||||
* @param {string} type The type of member that left (primary|secondary|arbiter)
|
||||
* @param {Server} server The server object that left
|
||||
*/
|
||||
|
||||
/**
|
||||
* A server member joined the mongos set
|
||||
*
|
||||
* @event Mongos#joined
|
||||
* @type {function}
|
||||
* @param {string} type The type of member that joined (primary|secondary|arbiter)
|
||||
* @param {Server} server The server object that joined
|
||||
*/
|
||||
|
||||
/**
|
||||
* Mongos fullsetup event, emitted when all proxies in the topology have been connected to.
|
||||
*
|
||||
* @event Mongos#fullsetup
|
||||
* @type {Mongos}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Mongos open event, emitted when mongos can start processing commands.
|
||||
*
|
||||
* @event Mongos#open
|
||||
* @type {Mongos}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Mongos close event
|
||||
*
|
||||
* @event Mongos#close
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Mongos error event, emitted if there is an error listener.
|
||||
*
|
||||
* @event Mongos#error
|
||||
* @type {MongoError}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Mongos timeout event
|
||||
*
|
||||
* @event Mongos#timeout
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Mongos parseError event
|
||||
*
|
||||
* @event Mongos#parseError
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
module.exports = Mongos;
|
||||
104
server/node_modules/mongodb/lib/read_preference.js
generated
vendored
Executable file
104
server/node_modules/mongodb/lib/read_preference.js
generated
vendored
Executable file
@@ -0,0 +1,104 @@
|
||||
"use strict";
|
||||
|
||||
/**
|
||||
* @fileOverview The **ReadPreference** class is a class that represents a MongoDB ReadPreference and is
|
||||
* used to construct connections.
|
||||
*
|
||||
* @example
|
||||
* var Db = require('mongodb').Db,
|
||||
* ReplSet = require('mongodb').ReplSet,
|
||||
* Server = require('mongodb').Server,
|
||||
* ReadPreference = require('mongodb').ReadPreference,
|
||||
* test = require('assert');
|
||||
* // Connect using ReplSet
|
||||
* var server = new Server('localhost', 27017);
|
||||
* var db = new Db('test', new ReplSet([server]));
|
||||
* db.open(function(err, db) {
|
||||
* test.equal(null, err);
|
||||
* // Perform a read
|
||||
* var cursor = db.collection('t').find({});
|
||||
* cursor.setReadPreference(ReadPreference.PRIMARY);
|
||||
* cursor.toArray(function(err, docs) {
|
||||
* test.equal(null, err);
|
||||
* db.close();
|
||||
* });
|
||||
* });
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates a new ReadPreference instance
|
||||
*
|
||||
* Read Preferences
|
||||
* - **ReadPreference.PRIMARY**, Read from primary only. All operations produce an error (throw an exception where applicable) if primary is unavailable. Cannot be combined with tags (This is the default.).
|
||||
* - **ReadPreference.PRIMARY_PREFERRED**, Read from primary if available, otherwise a secondary.
|
||||
* - **ReadPreference.SECONDARY**, Read from secondary if available, otherwise error.
|
||||
* - **ReadPreference.SECONDARY_PREFERRED**, Read from a secondary if available, otherwise read from the primary.
|
||||
* - **ReadPreference.NEAREST**, All modes read from among the nearest candidates, but unlike other modes, NEAREST will include both the primary and all secondaries in the random selection.
|
||||
*
|
||||
* @class
|
||||
* @param {string} mode The ReadPreference mode as listed above.
|
||||
* @param {object} tags An object representing read preference tags.
|
||||
* @property {string} mode The ReadPreference mode.
|
||||
* @property {object} tags The ReadPreference tags.
|
||||
* @return {ReadPreference} a ReadPreference instance.
|
||||
*/
|
||||
var ReadPreference = function(mode, tags) {
|
||||
if(!(this instanceof ReadPreference))
|
||||
return new ReadPreference(mode, tags);
|
||||
this._type = 'ReadPreference';
|
||||
this.mode = mode;
|
||||
this.tags = tags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate if a mode is legal
|
||||
*
|
||||
* @method
|
||||
* @param {string} mode The string representing the read preference mode.
|
||||
* @return {boolean}
|
||||
*/
|
||||
ReadPreference.isValid = function(_mode) {
|
||||
return (_mode == ReadPreference.PRIMARY || _mode == ReadPreference.PRIMARY_PREFERRED
|
||||
|| _mode == ReadPreference.SECONDARY || _mode == ReadPreference.SECONDARY_PREFERRED
|
||||
|| _mode == ReadPreference.NEAREST
|
||||
|| _mode == true || _mode == false || _mode == null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate if a mode is legal
|
||||
*
|
||||
* @method
|
||||
* @param {string} mode The string representing the read preference mode.
|
||||
* @return {boolean}
|
||||
*/
|
||||
ReadPreference.prototype.isValid = function(mode) {
|
||||
var _mode = typeof mode == 'string' ? mode : this.mode;
|
||||
return ReadPreference.isValid(_mode);
|
||||
}
|
||||
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
ReadPreference.prototype.toObject = function() {
|
||||
var object = {mode:this.mode};
|
||||
|
||||
if(this.tags != null) {
|
||||
object['tags'] = this.tags;
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
ReadPreference.PRIMARY = 'primary';
|
||||
ReadPreference.PRIMARY_PREFERRED = 'primaryPreferred';
|
||||
ReadPreference.SECONDARY = 'secondary';
|
||||
ReadPreference.SECONDARY_PREFERRED = 'secondaryPreferred';
|
||||
ReadPreference.NEAREST = 'nearest'
|
||||
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
module.exports = ReadPreference;
|
||||
527
server/node_modules/mongodb/lib/replset.js
generated
vendored
Executable file
527
server/node_modules/mongodb/lib/replset.js
generated
vendored
Executable file
@@ -0,0 +1,527 @@
|
||||
"use strict";
|
||||
|
||||
var EventEmitter = require('events').EventEmitter
|
||||
, inherits = require('util').inherits
|
||||
, f = require('util').format
|
||||
, Server = require('./server')
|
||||
, Mongos = require('./mongos')
|
||||
, Cursor = require('./cursor')
|
||||
, ReadPreference = require('./read_preference')
|
||||
, MongoCR = require('mongodb-core').MongoCR
|
||||
, MongoError = require('mongodb-core').MongoError
|
||||
, ServerCapabilities = require('./topology_base').ServerCapabilities
|
||||
, Store = require('./topology_base').Store
|
||||
, CServer = require('mongodb-core').Server
|
||||
, CReplSet = require('mongodb-core').ReplSet
|
||||
, CoreReadPreference = require('mongodb-core').ReadPreference
|
||||
, shallowClone = require('./utils').shallowClone;
|
||||
|
||||
/**
|
||||
* @fileOverview The **ReplSet** class is a class that represents a Replicaset topology and is
|
||||
* used to construct connections.
|
||||
*
|
||||
* **ReplSet Should not be used, use MongoClient.connect**
|
||||
* @example
|
||||
* var Db = require('mongodb').Db,
|
||||
* ReplSet = require('mongodb').ReplSet,
|
||||
* Server = require('mongodb').Server,
|
||||
* test = require('assert');
|
||||
* // Connect using ReplSet
|
||||
* var server = new Server('localhost', 27017);
|
||||
* var db = new Db('test', new ReplSet([server]));
|
||||
* db.open(function(err, db) {
|
||||
* // Get an additional db
|
||||
* db.close();
|
||||
* });
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates a new ReplSet instance
|
||||
* @class
|
||||
* @deprecated
|
||||
* @param {Server[]} servers A seedlist of servers participating in the replicaset.
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {booelan} [options.ha=true] Turn on high availability monitoring.
|
||||
* @param {number} [options.haInterval=5000] Time between each replicaset status check.
|
||||
* @param {string} options.replicaSet The name of the replicaset to connect to.
|
||||
* @param {number} [options.secondaryAcceptableLatencyMS=15] Sets the range of servers to pick when using NEAREST (lowest ping ms + the latency fence, ex: range of 1 to (1 + 15) ms)
|
||||
* @param {boolean} [options.connectWithNoPrimary=false] Sets if the driver should connect even if no primary is available
|
||||
* @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons.
|
||||
* @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support)
|
||||
* @param {object} [options.sslValidate=true] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {array} [options.sslCA=null] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {(Buffer|string)} [options.sslCert=null] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {(Buffer|string)} [options.sslKey=null] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {(Buffer|string)} [options.sslPass=null] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {object} [options.socketOptions=null] Socket options
|
||||
* @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option.
|
||||
* @param {number} [options.socketOptions.keepAlive=0] TCP KeepAlive on the socket with a X ms delay before start.
|
||||
* @param {number} [options.socketOptions.connectTimeoutMS=0] TCP Connection timeout setting
|
||||
* @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting
|
||||
* @fires ReplSet#connect
|
||||
* @fires ReplSet#ha
|
||||
* @fires ReplSet#joined
|
||||
* @fires ReplSet#left
|
||||
* @fires ReplSet#fullsetup
|
||||
* @fires ReplSet#open
|
||||
* @fires ReplSet#close
|
||||
* @fires ReplSet#error
|
||||
* @fires ReplSet#timeout
|
||||
* @fires ReplSet#parseError
|
||||
* @return {ReplSet} a ReplSet instance.
|
||||
*/
|
||||
var ReplSet = function(servers, options) {
|
||||
if(!(this instanceof ReplSet)) return new ReplSet(servers, options);
|
||||
options = options || {};
|
||||
var self = this;
|
||||
|
||||
// Ensure all the instances are Server
|
||||
for(var i = 0; i < servers.length; i++) {
|
||||
if(!(servers[i] instanceof Server)) {
|
||||
throw new MongoError("all seed list instances must be of the Server type");
|
||||
}
|
||||
}
|
||||
|
||||
// Store option defaults
|
||||
var storeOptions = {
|
||||
force: false
|
||||
, bufferMaxEntries: -1
|
||||
}
|
||||
|
||||
// Shared global store
|
||||
var store = options.store || new Store(self, storeOptions);
|
||||
|
||||
// Set up event emitter
|
||||
EventEmitter.call(this);
|
||||
|
||||
// Debug tag
|
||||
var tag = options.tag;
|
||||
|
||||
// Build seed list
|
||||
var seedlist = servers.map(function(x) {
|
||||
return {host: x.host, port: x.port}
|
||||
});
|
||||
|
||||
// Final options
|
||||
var finalOptions = shallowClone(options);
|
||||
|
||||
// Default values
|
||||
finalOptions.size = typeof options.poolSize == 'number' ? options.poolSize : 5;
|
||||
finalOptions.reconnect = typeof options.auto_reconnect == 'boolean' ? options.auto_reconnect : true;
|
||||
finalOptions.emitError = typeof options.emitError == 'boolean' ? options.emitError : true;
|
||||
finalOptions.cursorFactory = Cursor;
|
||||
|
||||
// Add the store
|
||||
finalOptions.disconnectHandler = store;
|
||||
|
||||
// Socket options passed down
|
||||
if(options.socketOptions) {
|
||||
if(options.socketOptions.connectTimeoutMS) {
|
||||
this.connectTimeoutMS = options.socketOptions.connectTimeoutMS;
|
||||
finalOptions.connectionTimeout = options.socketOptions.connectTimeoutMS;
|
||||
}
|
||||
|
||||
if(options.socketOptions.socketTimeoutMS) {
|
||||
finalOptions.socketTimeout = options.socketOptions.socketTimeoutMS;
|
||||
}
|
||||
}
|
||||
|
||||
// Get the name
|
||||
var replicaSet = options.replicaSet || options.rs_name;
|
||||
|
||||
// Set up options
|
||||
finalOptions.setName = replicaSet;
|
||||
|
||||
// Are we running in debug mode
|
||||
var debug = typeof options.debug == 'boolean' ? options.debug : false;
|
||||
if(debug) {
|
||||
finalOptions.debug = debug;
|
||||
}
|
||||
|
||||
// Map keep alive setting
|
||||
if(options.socketOptions && typeof options.socketOptions.keepAlive == 'number') {
|
||||
finalOptions.keepAlive = true;
|
||||
if(typeof options.socketOptions.keepAlive == 'number') {
|
||||
finalOptions.keepAliveInitialDelay = options.socketOptions.keepAlive;
|
||||
}
|
||||
}
|
||||
|
||||
// Connection timeout
|
||||
if(options.socketOptions && typeof options.socketOptions.connectionTimeout == 'number') {
|
||||
finalOptions.connectionTimeout = options.socketOptions.connectionTimeout;
|
||||
}
|
||||
|
||||
// Socket timeout
|
||||
if(options.socketOptions && typeof options.socketOptions.socketTimeout == 'number') {
|
||||
finalOptions.socketTimeout = options.socketOptions.socketTimeout;
|
||||
}
|
||||
|
||||
// noDelay
|
||||
if(options.socketOptions && typeof options.socketOptions.noDelay == 'boolean') {
|
||||
finalOptions.noDelay = options.socketOptions.noDelay;
|
||||
}
|
||||
|
||||
if(typeof options.secondaryAcceptableLatencyMS == 'number') {
|
||||
finalOptions.acceptableLatency = options.secondaryAcceptableLatencyMS;
|
||||
}
|
||||
|
||||
if(options.connectWithNoPrimary == true) {
|
||||
finalOptions.secondaryOnlyConnectionAllowed = true;
|
||||
}
|
||||
|
||||
// Add the non connection store
|
||||
finalOptions.disconnectHandler = store;
|
||||
|
||||
// Translate the options
|
||||
if(options.sslCA) finalOptions.ca = options.sslCA;
|
||||
if(typeof options.sslValidate == 'boolean') finalOptions.rejectUnauthorized = options.sslValidate;
|
||||
if(options.sslKey) finalOptions.key = options.sslKey;
|
||||
if(options.sslCert) finalOptions.cert = options.sslCert;
|
||||
if(options.sslPass) finalOptions.passphrase = options.sslPass;
|
||||
|
||||
// Create the ReplSet
|
||||
var replset = new CReplSet(seedlist, finalOptions)
|
||||
// Server capabilities
|
||||
var sCapabilities = null;
|
||||
// Add auth prbufferMaxEntriesoviders
|
||||
replset.addAuthProvider('mongocr', new MongoCR());
|
||||
|
||||
// Listen to reconnect event
|
||||
replset.on('reconnect', function() {
|
||||
self.emit('reconnect');
|
||||
store.execute();
|
||||
});
|
||||
|
||||
// Internal state
|
||||
this.s = {
|
||||
// Replicaset
|
||||
replset: replset
|
||||
// Server capabilities
|
||||
, sCapabilities: null
|
||||
// Debug tag
|
||||
, tag: options.tag
|
||||
// Store options
|
||||
, storeOptions: storeOptions
|
||||
// Cloned options
|
||||
, clonedOptions: finalOptions
|
||||
// Store
|
||||
, store: store
|
||||
// Options
|
||||
, options: options
|
||||
}
|
||||
|
||||
// Debug
|
||||
if(debug) {
|
||||
// Last ismaster
|
||||
Object.defineProperty(this, 'replset', {
|
||||
enumerable:true, get: function() { return replset; }
|
||||
});
|
||||
}
|
||||
|
||||
// Last ismaster
|
||||
Object.defineProperty(this, 'isMasterDoc', {
|
||||
enumerable:true, get: function() { return replset.lastIsMaster(); }
|
||||
});
|
||||
|
||||
// BSON property
|
||||
Object.defineProperty(this, 'bson', {
|
||||
enumerable: true, get: function() {
|
||||
return replset.bson;
|
||||
}
|
||||
});
|
||||
|
||||
Object.defineProperty(this, 'haInterval', {
|
||||
enumerable:true, get: function() { return replset.haInterval; }
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
inherits(ReplSet, EventEmitter);
|
||||
|
||||
// Ensure the right read Preference object
|
||||
var translateReadPreference = function(options) {
|
||||
if(typeof options.readPreference == 'string') {
|
||||
options.readPreference = new CoreReadPreference(options.readPreference);
|
||||
} else if(options.readPreference instanceof ReadPreference) {
|
||||
options.readPreference = new CoreReadPreference(options.readPreference.mode
|
||||
, options.readPreference.tags);
|
||||
}
|
||||
|
||||
return options;
|
||||
}
|
||||
|
||||
ReplSet.prototype.parserType = function() {
|
||||
return this.s.replset.parserType();
|
||||
}
|
||||
|
||||
// Connect method
|
||||
ReplSet.prototype.connect = function(db, _options, callback) {
|
||||
var self = this;
|
||||
if('function' === typeof _options) callback = _options, _options = {};
|
||||
if(_options == null) _options = {};
|
||||
if(!('function' === typeof callback)) callback = null;
|
||||
self.s.options = _options;
|
||||
|
||||
// Update bufferMaxEntries
|
||||
self.s.storeOptions.bufferMaxEntries = db.bufferMaxEntries;
|
||||
|
||||
// Actual handler
|
||||
var errorHandler = function(event) {
|
||||
return function(err) {
|
||||
if(event != 'error') {
|
||||
self.emit(event, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Connect handler
|
||||
var connectHandler = function() {
|
||||
// Clear out all the current handlers left over
|
||||
["timeout", "error", "close"].forEach(function(e) {
|
||||
self.s.replset.removeAllListeners(e);
|
||||
});
|
||||
|
||||
// Set up listeners
|
||||
self.s.replset.once('timeout', errorHandler('timeout'));
|
||||
self.s.replset.once('error', errorHandler('error'));
|
||||
self.s.replset.once('close', errorHandler('close'));
|
||||
|
||||
// relay the event
|
||||
var relay = function(event) {
|
||||
return function(t, server) {
|
||||
self.emit(event, t, server);
|
||||
}
|
||||
}
|
||||
|
||||
// Replset events relay
|
||||
var replsetRelay = function(event) {
|
||||
return function(t, server) {
|
||||
self.emit(event, t, server.lastIsMaster(), server);
|
||||
}
|
||||
}
|
||||
|
||||
// Relay ha
|
||||
var relayHa = function(t, state) {
|
||||
self.emit('ha', t, state);
|
||||
|
||||
if(t == 'start') {
|
||||
self.emit('ha_connect', t, state);
|
||||
} else if(t == 'end') {
|
||||
self.emit('ha_ismaster', t, state);
|
||||
}
|
||||
}
|
||||
|
||||
// Set up serverConfig listeners
|
||||
self.s.replset.on('joined', replsetRelay('joined'));
|
||||
self.s.replset.on('left', relay('left'));
|
||||
self.s.replset.on('ping', relay('ping'));
|
||||
self.s.replset.on('ha', relayHa);
|
||||
|
||||
self.s.replset.on('fullsetup', function(topology) {
|
||||
self.emit('fullsetup', null, self);
|
||||
});
|
||||
|
||||
self.s.replset.on('all', function(topology) {
|
||||
self.emit('all', null, self);
|
||||
});
|
||||
|
||||
// Emit open event
|
||||
self.emit('open', null, self);
|
||||
|
||||
// Return correctly
|
||||
try {
|
||||
callback(null, self);
|
||||
} catch(err) {
|
||||
process.nextTick(function() { throw err; })
|
||||
}
|
||||
}
|
||||
|
||||
// Error handler
|
||||
var connectErrorHandler = function(event) {
|
||||
return function(err) {
|
||||
['timeout', 'error', 'close'].forEach(function(e) {
|
||||
self.s.replset.removeListener(e, connectErrorHandler);
|
||||
});
|
||||
|
||||
self.s.replset.removeListener('connect', connectErrorHandler);
|
||||
// Destroy the replset
|
||||
self.s.replset.destroy();
|
||||
|
||||
// Try to callback
|
||||
try {
|
||||
callback(err);
|
||||
} catch(err) {
|
||||
if(!self.s.replset.isConnected())
|
||||
process.nextTick(function() { throw err; })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set up listeners
|
||||
self.s.replset.once('timeout', connectErrorHandler('timeout'));
|
||||
self.s.replset.once('error', connectErrorHandler('error'));
|
||||
self.s.replset.once('close', connectErrorHandler('close'));
|
||||
self.s.replset.once('connect', connectHandler);
|
||||
|
||||
// Start connection
|
||||
self.s.replset.connect(_options);
|
||||
}
|
||||
|
||||
// Server capabilities
|
||||
ReplSet.prototype.capabilities = function() {
|
||||
if(this.s.sCapabilities) return this.s.sCapabilities;
|
||||
this.s.sCapabilities = new ServerCapabilities(this.s.replset.lastIsMaster());
|
||||
return this.s.sCapabilities;
|
||||
}
|
||||
|
||||
// Command
|
||||
ReplSet.prototype.command = function(ns, cmd, options, callback) {
|
||||
options = translateReadPreference(options);
|
||||
this.s.replset.command(ns, cmd, options, callback);
|
||||
}
|
||||
|
||||
// Insert
|
||||
ReplSet.prototype.insert = function(ns, ops, options, callback) {
|
||||
this.s.replset.insert(ns, ops, options, callback);
|
||||
}
|
||||
|
||||
// Update
|
||||
ReplSet.prototype.update = function(ns, ops, options, callback) {
|
||||
this.s.replset.update(ns, ops, options, callback);
|
||||
}
|
||||
|
||||
// Remove
|
||||
ReplSet.prototype.remove = function(ns, ops, options, callback) {
|
||||
this.s.replset.remove(ns, ops, options, callback);
|
||||
}
|
||||
|
||||
// IsConnected
|
||||
ReplSet.prototype.isConnected = function() {
|
||||
return this.s.replset.isConnected();
|
||||
}
|
||||
|
||||
ReplSet.prototype.setBSONParserType = function(type) {
|
||||
return this.s.replset.setBSONParserType(type);
|
||||
}
|
||||
|
||||
// Insert
|
||||
ReplSet.prototype.cursor = function(ns, cmd, options) {
|
||||
options = translateReadPreference(options);
|
||||
options.disconnectHandler = this.s.store;
|
||||
return this.s.replset.cursor(ns, cmd, options);
|
||||
}
|
||||
|
||||
ReplSet.prototype.lastIsMaster = function() {
|
||||
return this.s.replset.lastIsMaster();
|
||||
}
|
||||
|
||||
ReplSet.prototype.close = function(forceClosed) {
|
||||
var self = this;
|
||||
this.s.replset.destroy();
|
||||
// We need to wash out all stored processes
|
||||
if(forceClosed == true) {
|
||||
this.s.storeOptions.force = forceClosed;
|
||||
this.s.store.flush();
|
||||
}
|
||||
|
||||
var events = ['timeout', 'error', 'close', 'joined', 'left'];
|
||||
events.forEach(function(e) {
|
||||
self.removeAllListeners(e);
|
||||
});
|
||||
}
|
||||
|
||||
ReplSet.prototype.auth = function() {
|
||||
var args = Array.prototype.slice.call(arguments, 0);
|
||||
this.s.replset.auth.apply(this.s.replset, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* All raw connections
|
||||
* @method
|
||||
* @return {array}
|
||||
*/
|
||||
ReplSet.prototype.connections = function() {
|
||||
return this.s.replset.connections();
|
||||
}
|
||||
|
||||
/**
|
||||
* A replset connect event, used to verify that the connection is up and running
|
||||
*
|
||||
* @event ReplSet#connect
|
||||
* @type {ReplSet}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The replset high availability event
|
||||
*
|
||||
* @event ReplSet#ha
|
||||
* @type {function}
|
||||
* @param {string} type The stage in the high availability event (start|end)
|
||||
* @param {boolean} data.norepeat This is a repeating high availability process or a single execution only
|
||||
* @param {number} data.id The id for this high availability request
|
||||
* @param {object} data.state An object containing the information about the current replicaset
|
||||
*/
|
||||
|
||||
/**
|
||||
* A server member left the replicaset
|
||||
*
|
||||
* @event ReplSet#left
|
||||
* @type {function}
|
||||
* @param {string} type The type of member that left (primary|secondary|arbiter)
|
||||
* @param {Server} server The server object that left
|
||||
*/
|
||||
|
||||
/**
|
||||
* A server member joined the replicaset
|
||||
*
|
||||
* @event ReplSet#joined
|
||||
* @type {function}
|
||||
* @param {string} type The type of member that joined (primary|secondary|arbiter)
|
||||
* @param {Server} server The server object that joined
|
||||
*/
|
||||
|
||||
/**
|
||||
* ReplSet open event, emitted when replicaset can start processing commands.
|
||||
*
|
||||
* @event ReplSet#open
|
||||
* @type {Replset}
|
||||
*/
|
||||
|
||||
/**
|
||||
* ReplSet fullsetup event, emitted when all servers in the topology have been connected to.
|
||||
*
|
||||
* @event ReplSet#fullsetup
|
||||
* @type {Replset}
|
||||
*/
|
||||
|
||||
/**
|
||||
* ReplSet close event
|
||||
*
|
||||
* @event ReplSet#close
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* ReplSet error event, emitted if there is an error listener.
|
||||
*
|
||||
* @event ReplSet#error
|
||||
* @type {MongoError}
|
||||
*/
|
||||
|
||||
/**
|
||||
* ReplSet timeout event
|
||||
*
|
||||
* @event ReplSet#timeout
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* ReplSet parseError event
|
||||
*
|
||||
* @event ReplSet#parseError
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
module.exports = ReplSet;
|
||||
408
server/node_modules/mongodb/lib/server.js
generated
vendored
Executable file
408
server/node_modules/mongodb/lib/server.js
generated
vendored
Executable file
@@ -0,0 +1,408 @@
|
||||
"use strict";
|
||||
|
||||
var EventEmitter = require('events').EventEmitter
|
||||
, inherits = require('util').inherits
|
||||
, CServer = require('mongodb-core').Server
|
||||
, Cursor = require('./cursor')
|
||||
, f = require('util').format
|
||||
, ServerCapabilities = require('./topology_base').ServerCapabilities
|
||||
, Store = require('./topology_base').Store
|
||||
, MongoError = require('mongodb-core').MongoError
|
||||
, shallowClone = require('./utils').shallowClone;
|
||||
|
||||
/**
|
||||
* @fileOverview The **Server** class is a class that represents a single server topology and is
|
||||
* used to construct connections.
|
||||
*
|
||||
* **Server Should not be used, use MongoClient.connect**
|
||||
* @example
|
||||
* var Db = require('mongodb').Db,
|
||||
* Server = require('mongodb').Server,
|
||||
* test = require('assert');
|
||||
* // Connect using single Server
|
||||
* var db = new Db('test', new Server('localhost', 27017););
|
||||
* db.open(function(err, db) {
|
||||
* // Get an additional db
|
||||
* db.close();
|
||||
* });
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates a new Server instance
|
||||
* @class
|
||||
* @deprecated
|
||||
* @param {string} host The host for the server, can be either an IP4, IP6 or domain socket style host.
|
||||
* @param {number} [port] The server port if IP4.
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons.
|
||||
* @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support)
|
||||
* @param {object} [options.sslValidate=true] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {array} [options.sslCA=null] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {(Buffer|string)} [options.sslCert=null] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {(Buffer|string)} [options.sslKey=null] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {(Buffer|string)} [options.sslPass=null] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher)
|
||||
* @param {object} [options.socketOptions=null] Socket options
|
||||
* @param {boolean} [options.socketOptions.autoReconnect=false] Reconnect on error.
|
||||
* @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option.
|
||||
* @param {number} [options.socketOptions.keepAlive=0] TCP KeepAlive on the socket with a X ms delay before start.
|
||||
* @param {number} [options.socketOptions.connectTimeoutMS=0] TCP Connection timeout setting
|
||||
* @param {number} [options.socketOptions.socketTimeoutMS=0] TCP Socket timeout setting
|
||||
* @param {number} [options.reconnectTries=30] Server attempt to reconnect #times
|
||||
* @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries
|
||||
* @fires Server#connect
|
||||
* @fires Server#close
|
||||
* @fires Server#error
|
||||
* @fires Server#timeout
|
||||
* @fires Server#parseError
|
||||
* @fires Server#reconnect
|
||||
* @return {Server} a Server instance.
|
||||
*/
|
||||
var Server = function(host, port, options) {
|
||||
options = options || {};
|
||||
if(!(this instanceof Server)) return new Server(host, port, options);
|
||||
EventEmitter.call(this);
|
||||
var self = this;
|
||||
|
||||
// Store option defaults
|
||||
var storeOptions = {
|
||||
force: false
|
||||
, bufferMaxEntries: -1
|
||||
}
|
||||
|
||||
// Shared global store
|
||||
var store = options.store || new Store(self, storeOptions);
|
||||
|
||||
// Detect if we have a socket connection
|
||||
if(host.indexOf('\/') != -1) {
|
||||
if(port != null && typeof port == 'object') {
|
||||
options = port;
|
||||
port = null;
|
||||
}
|
||||
} else if(port == null) {
|
||||
throw new MongoError('port must be specified');
|
||||
}
|
||||
|
||||
// Clone options
|
||||
var clonedOptions = shallowClone(options);
|
||||
clonedOptions.host = host;
|
||||
clonedOptions.port = port;
|
||||
|
||||
// Reconnect
|
||||
var reconnect = typeof options.auto_reconnect == 'boolean' ? options.auto_reconnect : true;
|
||||
reconnect = typeof options.autoReconnect == 'boolean' ? options.autoReconnect : reconnect;
|
||||
var emitError = typeof options.emitError == 'boolean' ? options.emitError : true;
|
||||
var poolSize = typeof options.poolSize == 'number' ? options.poolSize : 5;
|
||||
|
||||
// Socket options passed down
|
||||
if(options.socketOptions) {
|
||||
if(options.socketOptions.connectTimeoutMS) {
|
||||
this.connectTimeoutMS = options.socketOptions.connectTimeoutMS;
|
||||
clonedOptions.connectionTimeout = options.socketOptions.connectTimeoutMS;
|
||||
}
|
||||
|
||||
if(options.socketOptions.socketTimeoutMS) {
|
||||
clonedOptions.socketTimeout = options.socketOptions.socketTimeoutMS;
|
||||
}
|
||||
|
||||
if(typeof options.socketOptions.keepAlive == 'number') {
|
||||
clonedOptions.keepAliveInitialDelay = options.socketOptions.keepAlive;
|
||||
clonedOptions.keepAlive = true;
|
||||
}
|
||||
|
||||
if(typeof options.socketOptions.noDelay == 'boolean') {
|
||||
clonedOptions.noDelay = options.socketOptions.noDelay;
|
||||
}
|
||||
}
|
||||
|
||||
// Add the cursor factory function
|
||||
clonedOptions.cursorFactory = Cursor;
|
||||
clonedOptions.reconnect = reconnect;
|
||||
clonedOptions.emitError = emitError;
|
||||
clonedOptions.size = poolSize;
|
||||
|
||||
// Translate the options
|
||||
if(clonedOptions.sslCA) clonedOptions.ca = clonedOptions.sslCA;
|
||||
if(typeof clonedOptions.sslValidate == 'boolean') clonedOptions.rejectUnauthorized = clonedOptions.sslValidate;
|
||||
if(clonedOptions.sslKey) clonedOptions.key = clonedOptions.sslKey;
|
||||
if(clonedOptions.sslCert) clonedOptions.cert = clonedOptions.sslCert;
|
||||
if(clonedOptions.sslPass) clonedOptions.passphrase = clonedOptions.sslPass;
|
||||
|
||||
// Add the non connection store
|
||||
clonedOptions.disconnectHandler = store;
|
||||
|
||||
// Create an instance of a server instance from mongodb-core
|
||||
var server = new CServer(clonedOptions);
|
||||
// Server capabilities
|
||||
var sCapabilities = null;
|
||||
|
||||
// Define the internal properties
|
||||
this.s = {
|
||||
// Create an instance of a server instance from mongodb-core
|
||||
server: server
|
||||
// Server capabilities
|
||||
, sCapabilities: null
|
||||
// Cloned options
|
||||
, clonedOptions: clonedOptions
|
||||
// Reconnect
|
||||
, reconnect: reconnect
|
||||
// Emit error
|
||||
, emitError: emitError
|
||||
// Pool size
|
||||
, poolSize: poolSize
|
||||
// Store Options
|
||||
, storeOptions: storeOptions
|
||||
// Store
|
||||
, store: store
|
||||
// Host
|
||||
, host: host
|
||||
// Port
|
||||
, port: port
|
||||
// Options
|
||||
, options: options
|
||||
}
|
||||
|
||||
// BSON property
|
||||
Object.defineProperty(this, 'bson', {
|
||||
enumerable: true, get: function() {
|
||||
return self.s.server.bson;
|
||||
}
|
||||
});
|
||||
|
||||
// Last ismaster
|
||||
Object.defineProperty(this, 'isMasterDoc', {
|
||||
enumerable:true, get: function() {
|
||||
return self.s.server.lastIsMaster();
|
||||
}
|
||||
});
|
||||
|
||||
// Last ismaster
|
||||
Object.defineProperty(this, 'poolSize', {
|
||||
enumerable:true, get: function() { return self.s.server.connections().length; }
|
||||
});
|
||||
|
||||
Object.defineProperty(this, 'autoReconnect', {
|
||||
enumerable:true, get: function() { return self.s.reconnect; }
|
||||
});
|
||||
|
||||
Object.defineProperty(this, 'host', {
|
||||
enumerable:true, get: function() { return self.s.host; }
|
||||
});
|
||||
|
||||
Object.defineProperty(this, 'port', {
|
||||
enumerable:true, get: function() { return self.s.port; }
|
||||
});
|
||||
}
|
||||
|
||||
inherits(Server, EventEmitter);
|
||||
|
||||
Server.prototype.parserType = function() {
|
||||
return this.s.server.parserType();
|
||||
}
|
||||
|
||||
// Connect
|
||||
Server.prototype.connect = function(db, _options, callback) {
|
||||
var self = this;
|
||||
if('function' === typeof _options) callback = _options, _options = {};
|
||||
if(_options == null) _options = {};
|
||||
if(!('function' === typeof callback)) callback = null;
|
||||
self.s.options = _options;
|
||||
|
||||
// Update bufferMaxEntries
|
||||
self.s.storeOptions.bufferMaxEntries = db.bufferMaxEntries;
|
||||
|
||||
// Error handler
|
||||
var connectErrorHandler = function(event) {
|
||||
return function(err) {
|
||||
// Remove all event handlers
|
||||
var events = ['timeout', 'error', 'close'];
|
||||
events.forEach(function(e) {
|
||||
self.s.server.removeListener(e, connectHandlers[e]);
|
||||
});
|
||||
|
||||
self.s.server.removeListener('connect', connectErrorHandler);
|
||||
|
||||
// Try to callback
|
||||
try {
|
||||
callback(err);
|
||||
} catch(err) {
|
||||
process.nextTick(function() { throw err; })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Actual handler
|
||||
var errorHandler = function(event) {
|
||||
return function(err) {
|
||||
if(event != 'error') {
|
||||
self.emit(event, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Error handler
|
||||
var reconnectHandler = function(err) {
|
||||
self.emit('reconnect', self);
|
||||
self.s.store.execute();
|
||||
}
|
||||
|
||||
// Destroy called on topology, perform cleanup
|
||||
var destroyHandler = function() {
|
||||
self.s.store.flush();
|
||||
}
|
||||
|
||||
// Connect handler
|
||||
var connectHandler = function() {
|
||||
// Clear out all the current handlers left over
|
||||
["timeout", "error", "close"].forEach(function(e) {
|
||||
self.s.server.removeAllListeners(e);
|
||||
});
|
||||
|
||||
// Set up listeners
|
||||
self.s.server.once('timeout', errorHandler('timeout'));
|
||||
self.s.server.once('error', errorHandler('error'));
|
||||
self.s.server.once('close', errorHandler('close'));
|
||||
// Only called on destroy
|
||||
self.s.server.once('destroy', destroyHandler);
|
||||
|
||||
// Emit open event
|
||||
self.emit('open', null, self);
|
||||
|
||||
// Return correctly
|
||||
try {
|
||||
callback(null, self);
|
||||
} catch(err) {
|
||||
process.nextTick(function() { throw err; })
|
||||
}
|
||||
}
|
||||
|
||||
// Set up listeners
|
||||
var connectHandlers = {
|
||||
timeout: connectErrorHandler('timeout'),
|
||||
error: connectErrorHandler('error'),
|
||||
close: connectErrorHandler('close')
|
||||
};
|
||||
|
||||
// Add the event handlers
|
||||
self.s.server.once('timeout', connectHandlers.timeout);
|
||||
self.s.server.once('error', connectHandlers.error);
|
||||
self.s.server.once('close', connectHandlers.close);
|
||||
self.s.server.once('connect', connectHandler);
|
||||
// Reconnect server
|
||||
self.s.server.on('reconnect', reconnectHandler);
|
||||
|
||||
// Start connection
|
||||
self.s.server.connect(_options);
|
||||
}
|
||||
|
||||
// Server capabilities
|
||||
Server.prototype.capabilities = function() {
|
||||
if(this.s.sCapabilities) return this.s.sCapabilities;
|
||||
this.s.sCapabilities = new ServerCapabilities(this.s.server.lastIsMaster());
|
||||
return this.s.sCapabilities;
|
||||
}
|
||||
|
||||
// Command
|
||||
Server.prototype.command = function(ns, cmd, options, callback) {
|
||||
this.s.server.command(ns, cmd, options, callback);
|
||||
}
|
||||
|
||||
// Insert
|
||||
Server.prototype.insert = function(ns, ops, options, callback) {
|
||||
this.s.server.insert(ns, ops, options, callback);
|
||||
}
|
||||
|
||||
// Update
|
||||
Server.prototype.update = function(ns, ops, options, callback) {
|
||||
this.s.server.update(ns, ops, options, callback);
|
||||
}
|
||||
|
||||
// Remove
|
||||
Server.prototype.remove = function(ns, ops, options, callback) {
|
||||
this.s.server.remove(ns, ops, options, callback);
|
||||
}
|
||||
|
||||
// IsConnected
|
||||
Server.prototype.isConnected = function() {
|
||||
return this.s.server.isConnected();
|
||||
}
|
||||
|
||||
// Insert
|
||||
Server.prototype.cursor = function(ns, cmd, options) {
|
||||
options.disconnectHandler = this.s.store;
|
||||
return this.s.server.cursor(ns, cmd, options);
|
||||
}
|
||||
|
||||
Server.prototype.setBSONParserType = function(type) {
|
||||
return this.s.server.setBSONParserType(type);
|
||||
}
|
||||
|
||||
Server.prototype.lastIsMaster = function() {
|
||||
return this.s.server.lastIsMaster();
|
||||
}
|
||||
|
||||
Server.prototype.close = function(forceClosed) {
|
||||
this.s.server.destroy();
|
||||
// We need to wash out all stored processes
|
||||
if(forceClosed == true) {
|
||||
this.s.storeOptions.force = forceClosed;
|
||||
this.s.store.flush();
|
||||
}
|
||||
}
|
||||
|
||||
Server.prototype.auth = function() {
|
||||
var args = Array.prototype.slice.call(arguments, 0);
|
||||
this.s.server.auth.apply(this.s.server, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* All raw connections
|
||||
* @method
|
||||
* @return {array}
|
||||
*/
|
||||
Server.prototype.connections = function() {
|
||||
return this.s.server.connections();
|
||||
}
|
||||
|
||||
/**
|
||||
* Server connect event
|
||||
*
|
||||
* @event Server#connect
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Server close event
|
||||
*
|
||||
* @event Server#close
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Server reconnect event
|
||||
*
|
||||
* @event Server#reconnect
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Server error event
|
||||
*
|
||||
* @event Server#error
|
||||
* @type {MongoError}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Server timeout event
|
||||
*
|
||||
* @event Server#timeout
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Server parseError event
|
||||
*
|
||||
* @event Server#parseError
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
module.exports = Server;
|
||||
140
server/node_modules/mongodb/lib/topology_base.js
generated
vendored
Executable file
140
server/node_modules/mongodb/lib/topology_base.js
generated
vendored
Executable file
@@ -0,0 +1,140 @@
|
||||
"use strict";
|
||||
|
||||
var MongoError = require('mongodb-core').MongoError
|
||||
, f = require('util').format;
|
||||
|
||||
// The store of ops
|
||||
var Store = function(topology, storeOptions) {
|
||||
var self = this;
|
||||
var storedOps = [];
|
||||
storeOptions = storeOptions || {force:false, bufferMaxEntries: -1}
|
||||
|
||||
// Internal state
|
||||
this.s = {
|
||||
storedOps: storedOps
|
||||
, storeOptions: storeOptions
|
||||
, topology: topology
|
||||
}
|
||||
|
||||
Object.defineProperty(this, 'length', {
|
||||
enumerable:true, get: function() { return self.s.storedOps.length; }
|
||||
});
|
||||
}
|
||||
|
||||
Store.prototype.add = function(opType, ns, ops, options, callback) {
|
||||
if(this.s.storeOptions.force) return callback(new MongoError("db closed by application"));
|
||||
if(this.s.storeOptions.bufferMaxEntries == 0) return callback(new MongoError(f("no connection available for operation and number of stored operation > %s", this.s.storeOptions.bufferMaxEntries)));
|
||||
if(this.s.storeOptions.bufferMaxEntries > 0 && this.s.storedOps.length > this.s.storeOptions.bufferMaxEntries) {
|
||||
while(this.s.storedOps.length > 0) {
|
||||
var op = this.s.storedOps.shift();
|
||||
op.c(new MongoError(f("no connection available for operation and number of stored operation > %s", this.s.storeOptions.bufferMaxEntries)));
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
this.s.storedOps.push({t: opType, n: ns, o: ops, op: options, c: callback})
|
||||
}
|
||||
|
||||
Store.prototype.addObjectAndMethod = function(opType, object, method, params, callback) {
|
||||
if(this.s.storeOptions.force) return callback(new MongoError("db closed by application"));
|
||||
if(this.s.storeOptions.bufferMaxEntries == 0) return callback(new MongoError(f("no connection available for operation and number of stored operation > %s", this.s.storeOptions.bufferMaxEntries)));
|
||||
if(this.s.storeOptions.bufferMaxEntries > 0 && this.s.storedOps.length > this.s.storeOptions.bufferMaxEntries) {
|
||||
while(this.s.storedOps.length > 0) {
|
||||
var op = this.s.storedOps.shift();
|
||||
op.c(new MongoError(f("no connection available for operation and number of stored operation > %s", this.s.storeOptions.bufferMaxEntries)));
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
this.s.storedOps.push({t: opType, m: method, o: object, p: params, c: callback})
|
||||
}
|
||||
|
||||
Store.prototype.flush = function() {
|
||||
while(this.s.storedOps.length > 0) {
|
||||
this.s.storedOps.shift().c(new MongoError(f("no connection available for operation")));
|
||||
}
|
||||
}
|
||||
|
||||
Store.prototype.execute = function() {
|
||||
// Get current ops
|
||||
var ops = this.s.storedOps;
|
||||
// Reset the ops
|
||||
this.s.storedOps = [];
|
||||
|
||||
// Execute all the stored ops
|
||||
while(ops.length > 0) {
|
||||
var op = ops.shift();
|
||||
|
||||
if(op.t == 'cursor') {
|
||||
op.o[op.m].apply(op.o, op.p);
|
||||
} else {
|
||||
this.s.topology[op.t](op.n, op.o, op.op, op.c);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Store.prototype.all = function() {
|
||||
return this.s.storedOps;
|
||||
}
|
||||
|
||||
// Server capabilities
|
||||
var ServerCapabilities = function(ismaster) {
|
||||
var setup_get_property = function(object, name, value) {
|
||||
Object.defineProperty(object, name, {
|
||||
enumerable: true
|
||||
, get: function () { return value; }
|
||||
});
|
||||
}
|
||||
|
||||
// Capabilities
|
||||
var aggregationCursor = false;
|
||||
var writeCommands = false;
|
||||
var textSearch = false;
|
||||
var authCommands = false;
|
||||
var listCollections = false;
|
||||
var listIndexes = false;
|
||||
var maxNumberOfDocsInBatch = ismaster.maxWriteBatchSize || 1000;
|
||||
|
||||
if(ismaster.minWireVersion >= 0) {
|
||||
textSearch = true;
|
||||
}
|
||||
|
||||
if(ismaster.maxWireVersion >= 1) {
|
||||
aggregationCursor = true;
|
||||
authCommands = true;
|
||||
}
|
||||
|
||||
if(ismaster.maxWireVersion >= 2) {
|
||||
writeCommands = true;
|
||||
}
|
||||
|
||||
if(ismaster.maxWireVersion >= 3) {
|
||||
listCollections = true;
|
||||
listIndexes = true;
|
||||
}
|
||||
|
||||
// If no min or max wire version set to 0
|
||||
if(ismaster.minWireVersion == null) {
|
||||
ismaster.minWireVersion = 0;
|
||||
}
|
||||
|
||||
if(ismaster.maxWireVersion == null) {
|
||||
ismaster.maxWireVersion = 0;
|
||||
}
|
||||
|
||||
// Map up read only parameters
|
||||
setup_get_property(this, "hasAggregationCursor", aggregationCursor);
|
||||
setup_get_property(this, "hasWriteCommands", writeCommands);
|
||||
setup_get_property(this, "hasTextSearch", textSearch);
|
||||
setup_get_property(this, "hasAuthCommands", authCommands);
|
||||
setup_get_property(this, "hasListCollectionsCommand", listCollections);
|
||||
setup_get_property(this, "hasListIndexesCommand", listIndexes);
|
||||
setup_get_property(this, "minWireVersion", ismaster.minWireVersion);
|
||||
setup_get_property(this, "maxWireVersion", ismaster.maxWireVersion);
|
||||
setup_get_property(this, "maxNumberOfDocsInBatch", maxNumberOfDocsInBatch);
|
||||
}
|
||||
|
||||
exports.Store = Store;
|
||||
exports.ServerCapabilities = ServerCapabilities;
|
||||
281
server/node_modules/mongodb/lib/url_parser.js
generated
vendored
Executable file
281
server/node_modules/mongodb/lib/url_parser.js
generated
vendored
Executable file
@@ -0,0 +1,281 @@
|
||||
"use strict";
|
||||
|
||||
var ReadPreference = require('./read_preference');
|
||||
|
||||
module.exports = function(url, options) {
|
||||
// Ensure we have a default options object if none set
|
||||
options = options || {};
|
||||
// Variables
|
||||
var connection_part = '';
|
||||
var auth_part = '';
|
||||
var query_string_part = '';
|
||||
var dbName = 'admin';
|
||||
|
||||
// Must start with mongodb
|
||||
if(url.indexOf("mongodb://") != 0)
|
||||
throw Error("URL must be in the format mongodb://user:pass@host:port/dbname");
|
||||
// If we have a ? mark cut the query elements off
|
||||
if(url.indexOf("?") != -1) {
|
||||
query_string_part = url.substr(url.indexOf("?") + 1);
|
||||
connection_part = url.substring("mongodb://".length, url.indexOf("?"))
|
||||
} else {
|
||||
connection_part = url.substring("mongodb://".length);
|
||||
}
|
||||
|
||||
// Check if we have auth params
|
||||
if(connection_part.indexOf("@") != -1) {
|
||||
auth_part = connection_part.split("@")[0];
|
||||
connection_part = connection_part.split("@")[1];
|
||||
}
|
||||
|
||||
// Check if the connection string has a db
|
||||
if(connection_part.indexOf(".sock") != -1) {
|
||||
if(connection_part.indexOf(".sock/") != -1) {
|
||||
dbName = connection_part.split(".sock/")[1];
|
||||
connection_part = connection_part.split("/", connection_part.indexOf(".sock") + ".sock".length);
|
||||
}
|
||||
} else if(connection_part.indexOf("/") != -1) {
|
||||
dbName = connection_part.split("/")[1];
|
||||
connection_part = connection_part.split("/")[0];
|
||||
}
|
||||
|
||||
// Result object
|
||||
var object = {};
|
||||
|
||||
// Pick apart the authentication part of the string
|
||||
var authPart = auth_part || '';
|
||||
var auth = authPart.split(':', 2);
|
||||
|
||||
// Decode the URI components
|
||||
auth[0] = decodeURIComponent(auth[0]);
|
||||
if(auth[1]){
|
||||
auth[1] = decodeURIComponent(auth[1]);
|
||||
}
|
||||
|
||||
// Add auth to final object if we have 2 elements
|
||||
if(auth.length == 2) object.auth = {user: auth[0], password: auth[1]};
|
||||
|
||||
// Variables used for temporary storage
|
||||
var hostPart;
|
||||
var urlOptions;
|
||||
var servers;
|
||||
var serverOptions = {socketOptions: {}};
|
||||
var dbOptions = {read_preference_tags: []};
|
||||
var replSetServersOptions = {socketOptions: {}};
|
||||
// Add server options to final object
|
||||
object.server_options = serverOptions;
|
||||
object.db_options = dbOptions;
|
||||
object.rs_options = replSetServersOptions;
|
||||
object.mongos_options = {};
|
||||
|
||||
// Let's check if we are using a domain socket
|
||||
if(url.match(/\.sock/)) {
|
||||
// Split out the socket part
|
||||
var domainSocket = url.substring(
|
||||
url.indexOf("mongodb://") + "mongodb://".length
|
||||
, url.lastIndexOf(".sock") + ".sock".length);
|
||||
// Clean out any auth stuff if any
|
||||
if(domainSocket.indexOf("@") != -1) domainSocket = domainSocket.split("@")[1];
|
||||
servers = [{domain_socket: domainSocket}];
|
||||
} else {
|
||||
// Split up the db
|
||||
hostPart = connection_part;
|
||||
// Parse all server results
|
||||
servers = hostPart.split(',').map(function(h) {
|
||||
var _host, _port, ipv6match;
|
||||
//check if it matches [IPv6]:port, where the port number is optional
|
||||
if ((ipv6match = /\[([^\]]+)\](?:\:(.+))?/.exec(h))) {
|
||||
_host = ipv6match[1];
|
||||
_port = parseInt(ipv6match[2], 10) || 27017;
|
||||
} else {
|
||||
//otherwise assume it's IPv4, or plain hostname
|
||||
var hostPort = h.split(':', 2);
|
||||
_host = hostPort[0] || 'localhost';
|
||||
_port = hostPort[1] != null ? parseInt(hostPort[1], 10) : 27017;
|
||||
// Check for localhost?safe=true style case
|
||||
if(_host.indexOf("?") != -1) _host = _host.split(/\?/)[0];
|
||||
}
|
||||
// Return the mapped object
|
||||
return {host: _host, port: _port};
|
||||
});
|
||||
}
|
||||
|
||||
// Get the db name
|
||||
object.dbName = dbName || 'admin';
|
||||
// Split up all the options
|
||||
urlOptions = (query_string_part || '').split(/[&;]/);
|
||||
// Ugh, we have to figure out which options go to which constructor manually.
|
||||
urlOptions.forEach(function(opt) {
|
||||
if(!opt) return;
|
||||
var splitOpt = opt.split('='), name = splitOpt[0], value = splitOpt[1];
|
||||
// Options implementations
|
||||
switch(name) {
|
||||
case 'slaveOk':
|
||||
case 'slave_ok':
|
||||
serverOptions.slave_ok = (value == 'true');
|
||||
dbOptions.slaveOk = (value == 'true');
|
||||
break;
|
||||
case 'maxPoolSize':
|
||||
case 'poolSize':
|
||||
serverOptions.poolSize = parseInt(value, 10);
|
||||
replSetServersOptions.poolSize = parseInt(value, 10);
|
||||
break;
|
||||
case 'autoReconnect':
|
||||
case 'auto_reconnect':
|
||||
serverOptions.auto_reconnect = (value == 'true');
|
||||
break;
|
||||
case 'minPoolSize':
|
||||
throw new Error("minPoolSize not supported");
|
||||
case 'maxIdleTimeMS':
|
||||
throw new Error("maxIdleTimeMS not supported");
|
||||
case 'waitQueueMultiple':
|
||||
throw new Error("waitQueueMultiple not supported");
|
||||
case 'waitQueueTimeoutMS':
|
||||
throw new Error("waitQueueTimeoutMS not supported");
|
||||
case 'uuidRepresentation':
|
||||
throw new Error("uuidRepresentation not supported");
|
||||
case 'ssl':
|
||||
if(value == 'prefer') {
|
||||
serverOptions.ssl = value;
|
||||
replSetServersOptions.ssl = value;
|
||||
break;
|
||||
}
|
||||
serverOptions.ssl = (value == 'true');
|
||||
replSetServersOptions.ssl = (value == 'true');
|
||||
break;
|
||||
case 'replicaSet':
|
||||
case 'rs_name':
|
||||
replSetServersOptions.rs_name = value;
|
||||
break;
|
||||
case 'reconnectWait':
|
||||
replSetServersOptions.reconnectWait = parseInt(value, 10);
|
||||
break;
|
||||
case 'retries':
|
||||
replSetServersOptions.retries = parseInt(value, 10);
|
||||
break;
|
||||
case 'readSecondary':
|
||||
case 'read_secondary':
|
||||
replSetServersOptions.read_secondary = (value == 'true');
|
||||
break;
|
||||
case 'fsync':
|
||||
dbOptions.fsync = (value == 'true');
|
||||
break;
|
||||
case 'journal':
|
||||
dbOptions.j = (value == 'true');
|
||||
break;
|
||||
case 'safe':
|
||||
dbOptions.safe = (value == 'true');
|
||||
break;
|
||||
case 'nativeParser':
|
||||
case 'native_parser':
|
||||
dbOptions.native_parser = (value == 'true');
|
||||
break;
|
||||
case 'connectTimeoutMS':
|
||||
serverOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
|
||||
replSetServersOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
|
||||
break;
|
||||
case 'socketTimeoutMS':
|
||||
serverOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
|
||||
replSetServersOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
|
||||
break;
|
||||
case 'w':
|
||||
dbOptions.w = parseInt(value, 10);
|
||||
if(isNaN(dbOptions.w)) dbOptions.w = value;
|
||||
break;
|
||||
case 'authSource':
|
||||
dbOptions.authSource = value;
|
||||
break;
|
||||
case 'gssapiServiceName':
|
||||
dbOptions.gssapiServiceName = value;
|
||||
break;
|
||||
case 'authMechanism':
|
||||
if(value == 'GSSAPI') {
|
||||
// If no password provided decode only the principal
|
||||
if(object.auth == null) {
|
||||
var urlDecodeAuthPart = decodeURIComponent(authPart);
|
||||
if(urlDecodeAuthPart.indexOf("@") == -1) throw new Error("GSSAPI requires a provided principal");
|
||||
object.auth = {user: urlDecodeAuthPart, password: null};
|
||||
} else {
|
||||
object.auth.user = decodeURIComponent(object.auth.user);
|
||||
}
|
||||
} else if(value == 'MONGODB-X509') {
|
||||
object.auth = {user: decodeURIComponent(authPart)};
|
||||
}
|
||||
|
||||
// Only support GSSAPI or MONGODB-CR for now
|
||||
if(value != 'GSSAPI'
|
||||
&& value != 'MONGODB-X509'
|
||||
&& value != 'MONGODB-CR'
|
||||
&& value != 'SCRAM-SHA-1'
|
||||
&& value != 'PLAIN')
|
||||
throw new Error("only GSSAPI, PLAIN, MONGODB-X509, SCRAM-SHA-1 or MONGODB-CR is supported by authMechanism");
|
||||
|
||||
// Authentication mechanism
|
||||
dbOptions.authMechanism = value;
|
||||
break;
|
||||
case 'authMechanismProperties':
|
||||
// Split up into key, value pairs
|
||||
var values = value.split(',');
|
||||
var o = {};
|
||||
// For each value split into key, value
|
||||
values.forEach(function(x) {
|
||||
var v = x.split(':');
|
||||
o[v[0]] = v[1];
|
||||
});
|
||||
|
||||
// Set all authMechanismProperties
|
||||
dbOptions.authMechanismProperties = o;
|
||||
// Set the service name value
|
||||
if(typeof o.SERVICE_NAME == 'string') dbOptions.gssapiServiceName = o.SERVICE_NAME;
|
||||
break;
|
||||
case 'wtimeoutMS':
|
||||
dbOptions.wtimeout = parseInt(value, 10);
|
||||
break;
|
||||
case 'readPreference':
|
||||
if(!ReadPreference.isValid(value)) throw new Error("readPreference must be either primary/primaryPreferred/secondary/secondaryPreferred/nearest");
|
||||
dbOptions.read_preference = value;
|
||||
break;
|
||||
case 'readPreferenceTags':
|
||||
// Decode the value
|
||||
value = decodeURIComponent(value);
|
||||
// Contains the tag object
|
||||
var tagObject = {};
|
||||
if(value == null || value == '') {
|
||||
dbOptions.read_preference_tags.push(tagObject);
|
||||
break;
|
||||
}
|
||||
|
||||
// Split up the tags
|
||||
var tags = value.split(/\,/);
|
||||
for(var i = 0; i < tags.length; i++) {
|
||||
var parts = tags[i].trim().split(/\:/);
|
||||
tagObject[parts[0]] = parts[1];
|
||||
}
|
||||
|
||||
// Set the preferences tags
|
||||
dbOptions.read_preference_tags.push(tagObject);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
||||
// No tags: should be null (not [])
|
||||
if(dbOptions.read_preference_tags.length === 0) {
|
||||
dbOptions.read_preference_tags = null;
|
||||
}
|
||||
|
||||
// Validate if there are an invalid write concern combinations
|
||||
if((dbOptions.w == -1 || dbOptions.w == 0) && (
|
||||
dbOptions.journal == true
|
||||
|| dbOptions.fsync == true
|
||||
|| dbOptions.safe == true)) throw new Error("w set to -1 or 0 cannot be combined with safe/w/journal/fsync")
|
||||
|
||||
// If no read preference set it to primary
|
||||
if(!dbOptions.read_preference) dbOptions.read_preference = 'primary';
|
||||
|
||||
// Add servers to result
|
||||
object.servers = servers;
|
||||
// Returned parsed object
|
||||
return object;
|
||||
}
|
||||
233
server/node_modules/mongodb/lib/utils.js
generated
vendored
Executable file
233
server/node_modules/mongodb/lib/utils.js
generated
vendored
Executable file
@@ -0,0 +1,233 @@
|
||||
"use strict";
|
||||
|
||||
var MongoError = require('mongodb-core').MongoError
|
||||
|
||||
var shallowClone = function(obj) {
|
||||
var copy = {};
|
||||
for(var name in obj) copy[name] = obj[name];
|
||||
return copy;
|
||||
}
|
||||
|
||||
// Set simple property
|
||||
var getSingleProperty = function(obj, name, value) {
|
||||
Object.defineProperty(obj, name, {
|
||||
enumerable:true,
|
||||
get: function() {
|
||||
return value
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
var formatSortValue = exports.formatSortValue = function(sortDirection) {
|
||||
var value = ("" + sortDirection).toLowerCase();
|
||||
|
||||
switch (value) {
|
||||
case 'ascending':
|
||||
case 'asc':
|
||||
case '1':
|
||||
return 1;
|
||||
case 'descending':
|
||||
case 'desc':
|
||||
case '-1':
|
||||
return -1;
|
||||
default:
|
||||
throw new Error("Illegal sort clause, must be of the form "
|
||||
+ "[['field1', '(ascending|descending)'], "
|
||||
+ "['field2', '(ascending|descending)']]");
|
||||
}
|
||||
};
|
||||
|
||||
var formattedOrderClause = exports.formattedOrderClause = function(sortValue) {
|
||||
var orderBy = {};
|
||||
if(sortValue == null) return null;
|
||||
if (Array.isArray(sortValue)) {
|
||||
if(sortValue.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
for(var i = 0; i < sortValue.length; i++) {
|
||||
if(sortValue[i].constructor == String) {
|
||||
orderBy[sortValue[i]] = 1;
|
||||
} else {
|
||||
orderBy[sortValue[i][0]] = formatSortValue(sortValue[i][1]);
|
||||
}
|
||||
}
|
||||
} else if(sortValue != null && typeof sortValue == 'object') {
|
||||
orderBy = sortValue;
|
||||
} else if (typeof sortValue == 'string') {
|
||||
orderBy[sortValue] = 1;
|
||||
} else {
|
||||
throw new Error("Illegal sort clause, must be of the form " +
|
||||
"[['field1', '(ascending|descending)'], ['field2', '(ascending|descending)']]");
|
||||
}
|
||||
|
||||
return orderBy;
|
||||
};
|
||||
|
||||
var checkCollectionName = function checkCollectionName (collectionName) {
|
||||
if('string' !== typeof collectionName) {
|
||||
throw Error("collection name must be a String");
|
||||
}
|
||||
|
||||
if(!collectionName || collectionName.indexOf('..') != -1) {
|
||||
throw Error("collection names cannot be empty");
|
||||
}
|
||||
|
||||
if(collectionName.indexOf('$') != -1 &&
|
||||
collectionName.match(/((^\$cmd)|(oplog\.\$main))/) == null) {
|
||||
throw Error("collection names must not contain '$'");
|
||||
}
|
||||
|
||||
if(collectionName.match(/^\.|\.$/) != null) {
|
||||
throw Error("collection names must not start or end with '.'");
|
||||
}
|
||||
|
||||
// Validate that we are not passing 0x00 in the colletion name
|
||||
if(!!~collectionName.indexOf("\x00")) {
|
||||
throw new Error("collection names cannot contain a null character");
|
||||
}
|
||||
};
|
||||
|
||||
var handleCallback = function(callback, err, value1, value2) {
|
||||
try {
|
||||
if(callback == null) return;
|
||||
if(value2) return callback(err, value1, value2);
|
||||
return callback(err, value1);
|
||||
} catch(err) {
|
||||
process.nextTick(function() { throw err; });
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrap a Mongo error document in an Error instance
|
||||
* @ignore
|
||||
* @api private
|
||||
*/
|
||||
var toError = function(error) {
|
||||
if (error instanceof Error) return error;
|
||||
|
||||
var msg = error.err || error.errmsg || error.errMessage || error;
|
||||
var e = new MongoError(msg);
|
||||
|
||||
// Get all object keys
|
||||
var keys = typeof error == 'object'
|
||||
? Object.keys(error)
|
||||
: [];
|
||||
|
||||
for(var i = 0; i < keys.length; i++) {
|
||||
e[keys[i]] = error[keys[i]];
|
||||
}
|
||||
|
||||
return e;
|
||||
}
|
||||
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
var normalizeHintField = function normalizeHintField(hint) {
|
||||
var finalHint = null;
|
||||
|
||||
if(typeof hint == 'string') {
|
||||
finalHint = hint;
|
||||
} else if(Array.isArray(hint)) {
|
||||
finalHint = {};
|
||||
|
||||
hint.forEach(function(param) {
|
||||
finalHint[param] = 1;
|
||||
});
|
||||
} else if(hint != null && typeof hint == 'object') {
|
||||
finalHint = {};
|
||||
for (var name in hint) {
|
||||
finalHint[name] = hint[name];
|
||||
}
|
||||
}
|
||||
|
||||
return finalHint;
|
||||
};
|
||||
|
||||
/**
|
||||
* Create index name based on field spec
|
||||
*
|
||||
* @ignore
|
||||
* @api private
|
||||
*/
|
||||
var parseIndexOptions = function(fieldOrSpec) {
|
||||
var fieldHash = {};
|
||||
var indexes = [];
|
||||
var keys;
|
||||
|
||||
// Get all the fields accordingly
|
||||
if('string' == typeof fieldOrSpec) {
|
||||
// 'type'
|
||||
indexes.push(fieldOrSpec + '_' + 1);
|
||||
fieldHash[fieldOrSpec] = 1;
|
||||
} else if(Array.isArray(fieldOrSpec)) {
|
||||
fieldOrSpec.forEach(function(f) {
|
||||
if('string' == typeof f) {
|
||||
// [{location:'2d'}, 'type']
|
||||
indexes.push(f + '_' + 1);
|
||||
fieldHash[f] = 1;
|
||||
} else if(Array.isArray(f)) {
|
||||
// [['location', '2d'],['type', 1]]
|
||||
indexes.push(f[0] + '_' + (f[1] || 1));
|
||||
fieldHash[f[0]] = f[1] || 1;
|
||||
} else if(isObject(f)) {
|
||||
// [{location:'2d'}, {type:1}]
|
||||
keys = Object.keys(f);
|
||||
keys.forEach(function(k) {
|
||||
indexes.push(k + '_' + f[k]);
|
||||
fieldHash[k] = f[k];
|
||||
});
|
||||
} else {
|
||||
// undefined (ignore)
|
||||
}
|
||||
});
|
||||
} else if(isObject(fieldOrSpec)) {
|
||||
// {location:'2d', type:1}
|
||||
keys = Object.keys(fieldOrSpec);
|
||||
keys.forEach(function(key) {
|
||||
indexes.push(key + '_' + fieldOrSpec[key]);
|
||||
fieldHash[key] = fieldOrSpec[key];
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
name: indexes.join("_"), keys: keys, fieldHash: fieldHash
|
||||
}
|
||||
}
|
||||
|
||||
var isObject = exports.isObject = function (arg) {
|
||||
return '[object Object]' == toString.call(arg)
|
||||
}
|
||||
|
||||
var debugOptions = function(debugFields, options) {
|
||||
var finaloptions = {};
|
||||
debugFields.forEach(function(n) {
|
||||
finaloptions[n] = options[n];
|
||||
});
|
||||
|
||||
return finaloptions;
|
||||
}
|
||||
|
||||
var decorateCommand = function(command, options, exclude) {
|
||||
for(var name in options) {
|
||||
if(exclude[name] == null) command[name] = options[name];
|
||||
}
|
||||
|
||||
return command;
|
||||
}
|
||||
|
||||
exports.shallowClone = shallowClone;
|
||||
exports.getSingleProperty = getSingleProperty;
|
||||
exports.checkCollectionName = checkCollectionName;
|
||||
exports.toError = toError;
|
||||
exports.formattedOrderClause = formattedOrderClause;
|
||||
exports.parseIndexOptions = parseIndexOptions;
|
||||
exports.normalizeHintField = normalizeHintField;
|
||||
exports.handleCallback = handleCallback;
|
||||
exports.decorateCommand = decorateCommand;
|
||||
exports.isObject = isObject;
|
||||
exports.debugOptions = debugOptions;
|
||||
Reference in New Issue
Block a user