Started on node.js+socket.io+mongoDB on the backend for more responsivnes

This commit is contained in:
KasperRT
2015-04-09 00:18:13 +02:00
parent 076f8e821f
commit a8a705bd77
1889 changed files with 322175 additions and 68 deletions

393
server/node_modules/mongodb/lib/bulk/common.js generated vendored Executable file
View File

@@ -0,0 +1,393 @@
"use strict";
var utils = require('../utils');
// Error codes
var UNKNOWN_ERROR = 8;
var INVALID_BSON_ERROR = 22;
var WRITE_CONCERN_ERROR = 64;
var MULTIPLE_ERROR = 65;
// Insert types
var INSERT = 1;
var UPDATE = 2;
var REMOVE = 3
// Get write concern
var writeConcern = function(target, col, options) {
if(options.w != null || options.j != null || options.fsync != null) {
target.writeConcern = options;
} else if(col.writeConcern.w != null || col.writeConcern.j != null || col.writeConcern.fsync != null) {
target.writeConcern = col.writeConcern;
}
return target
}
/**
* Helper function to define properties
* @ignore
*/
var defineReadOnlyProperty = function(self, name, value) {
Object.defineProperty(self, name, {
enumerable: true
, get: function() {
return value;
}
});
}
/**
* Keeps the state of a unordered batch so we can rewrite the results
* correctly after command execution
* @ignore
*/
var Batch = function(batchType, originalZeroIndex) {
this.originalZeroIndex = originalZeroIndex;
this.currentIndex = 0;
this.originalIndexes = [];
this.batchType = batchType;
this.operations = [];
this.size = 0;
this.sizeBytes = 0;
}
/**
* Wraps a legacy operation so we can correctly rewrite it's error
* @ignore
*/
var LegacyOp = function(batchType, operation, index) {
this.batchType = batchType;
this.index = index;
this.operation = operation;
}
/**
* Create a new BulkWriteResult instance (INTERNAL TYPE, do not instantiate directly)
*
* @class
* @property {boolean} ok Did bulk operation correctly execute
* @property {number} nInserted number of inserted documents
* @property {number} nUpdated number of documents updated logically
* @property {number} nUpserted Number of upserted documents
* @property {number} nModified Number of documents updated physically on disk
* @property {number} nRemoved Number of removed documents
* @return {BulkWriteResult} a BulkWriteResult instance
*/
var BulkWriteResult = function(bulkResult) {
defineReadOnlyProperty(this, "ok", bulkResult.ok);
defineReadOnlyProperty(this, "nInserted", bulkResult.nInserted);
defineReadOnlyProperty(this, "nUpserted", bulkResult.nUpserted);
defineReadOnlyProperty(this, "nMatched", bulkResult.nMatched);
defineReadOnlyProperty(this, "nModified", bulkResult.nModified);
defineReadOnlyProperty(this, "nRemoved", bulkResult.nRemoved);
/**
* Return an array of inserted ids
*
* @return {object[]}
*/
this.getInsertedIds = function() {
return bulkResult.insertedIds;
}
/**
* Return an array of upserted ids
*
* @return {object[]}
*/
this.getUpsertedIds = function() {
return bulkResult.upserted;
}
/**
* Return the upserted id at position x
*
* @param {number} index the number of the upserted id to return, returns undefined if no result for passed in index
* @return {object}
*/
this.getUpsertedIdAt = function(index) {
return bulkResult.upserted[index];
}
/**
* Return raw internal result
*
* @return {object}
*/
this.getRawResponse = function() {
return bulkResult;
}
/**
* Returns true if the bulk operation contains a write error
*
* @return {boolean}
*/
this.hasWriteErrors = function() {
return bulkResult.writeErrors.length > 0;
}
/**
* Returns the number of write errors off the bulk operation
*
* @return {number}
*/
this.getWriteErrorCount = function() {
return bulkResult.writeErrors.length;
}
/**
* Returns a specific write error object
*
* @return {WriteError}
*/
this.getWriteErrorAt = function(index) {
if(index < bulkResult.writeErrors.length) {
return bulkResult.writeErrors[index];
}
return null;
}
/**
* Retrieve all write errors
*
* @return {object[]}
*/
this.getWriteErrors = function() {
return bulkResult.writeErrors;
}
/**
* Retrieve lastOp if available
*
* @return {object}
*/
this.getLastOp = function() {
return bulkResult.lastOp;
}
/**
* Retrieve the write concern error if any
*
* @return {WriteConcernError}
*/
this.getWriteConcernError = function() {
if(bulkResult.writeConcernErrors.length == 0) {
return null;
} else if(bulkResult.writeConcernErrors.length == 1) {
// Return the error
return bulkResult.writeConcernErrors[0];
} else {
// Combine the errors
var errmsg = "";
for(var i = 0; i < bulkResult.writeConcernErrors.length; i++) {
var err = bulkResult.writeConcernErrors[i];
errmsg = errmsg + err.errmsg;
// TODO: Something better
if(i == 0) errmsg = errmsg + " and ";
}
return new WriteConcernError({ errmsg : errmsg, code : WRITE_CONCERN_ERROR });
}
}
this.toJSON = function() {
return bulkResult;
}
this.toString = function() {
return "BulkWriteResult(" + this.toJSON(bulkResult) + ")";
}
this.isOk = function() {
return bulkResult.ok == 1;
}
}
/**
* Create a new WriteConcernError instance (INTERNAL TYPE, do not instantiate directly)
*
* @class
* @property {number} code Write concern error code.
* @property {string} errmsg Write concern error message.
* @return {WriteConcernError} a WriteConcernError instance
*/
var WriteConcernError = function(err) {
if(!(this instanceof WriteConcernError)) return new WriteConcernError(err);
// Define properties
defineReadOnlyProperty(this, "code", err.code);
defineReadOnlyProperty(this, "errmsg", err.errmsg);
this.toJSON = function() {
return {code: err.code, errmsg: err.errmsg};
}
this.toString = function() {
return "WriteConcernError(" + err.errmsg + ")";
}
}
/**
* Create a new WriteError instance (INTERNAL TYPE, do not instantiate directly)
*
* @class
* @property {number} code Write concern error code.
* @property {number} index Write concern error original bulk operation index.
* @property {string} errmsg Write concern error message.
* @return {WriteConcernError} a WriteConcernError instance
*/
var WriteError = function(err) {
if(!(this instanceof WriteError)) return new WriteError(err);
// Define properties
defineReadOnlyProperty(this, "code", err.code);
defineReadOnlyProperty(this, "index", err.index);
defineReadOnlyProperty(this, "errmsg", err.errmsg);
//
// Define access methods
this.getOperation = function() {
return err.op;
}
this.toJSON = function() {
return {code: err.code, index: err.index, errmsg: err.errmsg, op: err.op};
}
this.toString = function() {
return "WriteError(" + JSON.stringify(this.toJSON()) + ")";
}
}
/**
* Merges results into shared data structure
* @ignore
*/
var mergeBatchResults = function(ordered, batch, bulkResult, err, result) {
// If we have an error set the result to be the err object
if(err) {
result = err;
} else if(result && result.result) {
result = result.result;
} else if(result == null) {
return;
}
// Do we have a top level error stop processing and return
if(result.ok == 0 && bulkResult.ok == 1) {
bulkResult.ok = 0;
// bulkResult.error = utils.toError(result);
var writeError = {
index: 0
, code: result.code || 0
, errmsg: result.message
, op: batch.operations[0]
};
bulkResult.writeErrors.push(new WriteError(writeError));
return;
} else if(result.ok == 0 && bulkResult.ok == 0) {
return;
}
// Add lastop if available
if(result.lastOp) {
bulkResult.lastOp = result.lastOp;
}
// If we have an insert Batch type
if(batch.batchType == INSERT && result.n) {
bulkResult.nInserted = bulkResult.nInserted + result.n;
}
// If we have an insert Batch type
if(batch.batchType == REMOVE && result.n) {
bulkResult.nRemoved = bulkResult.nRemoved + result.n;
}
var nUpserted = 0;
// We have an array of upserted values, we need to rewrite the indexes
if(Array.isArray(result.upserted)) {
nUpserted = result.upserted.length;
for(var i = 0; i < result.upserted.length; i++) {
bulkResult.upserted.push({
index: result.upserted[i].index + batch.originalZeroIndex
, _id: result.upserted[i]._id
});
}
} else if(result.upserted) {
nUpserted = 1;
bulkResult.upserted.push({
index: batch.originalZeroIndex
, _id: result.upserted
});
}
// If we have an update Batch type
if(batch.batchType == UPDATE && result.n) {
var nModified = result.nModified;
bulkResult.nUpserted = bulkResult.nUpserted + nUpserted;
bulkResult.nMatched = bulkResult.nMatched + (result.n - nUpserted);
if(typeof nModified == 'number') {
bulkResult.nModified = bulkResult.nModified + nModified;
} else {
bulkResult.nModified = null;
}
}
if(Array.isArray(result.writeErrors)) {
for(var i = 0; i < result.writeErrors.length; i++) {
var writeError = {
index: batch.originalZeroIndex + result.writeErrors[i].index
, code: result.writeErrors[i].code
, errmsg: result.writeErrors[i].errmsg
, op: batch.operations[result.writeErrors[i].index]
};
bulkResult.writeErrors.push(new WriteError(writeError));
}
}
if(result.writeConcernError) {
bulkResult.writeConcernErrors.push(new WriteConcernError(result.writeConcernError));
}
}
//
// Clone the options
var cloneOptions = function(options) {
var clone = {};
var keys = Object.keys(options);
for(var i = 0; i < keys.length; i++) {
clone[keys[i]] = options[keys[i]];
}
return clone;
}
// Exports symbols
exports.BulkWriteResult = BulkWriteResult;
exports.WriteError = WriteError;
exports.Batch = Batch;
exports.LegacyOp = LegacyOp;
exports.mergeBatchResults = mergeBatchResults;
exports.cloneOptions = cloneOptions;
exports.writeConcern = writeConcern;
exports.INVALID_BSON_ERROR = INVALID_BSON_ERROR;
exports.WRITE_CONCERN_ERROR = WRITE_CONCERN_ERROR;
exports.MULTIPLE_ERROR = MULTIPLE_ERROR;
exports.UNKNOWN_ERROR = UNKNOWN_ERROR;
exports.INSERT = INSERT;
exports.UPDATE = UPDATE;
exports.REMOVE = REMOVE;

470
server/node_modules/mongodb/lib/bulk/ordered.js generated vendored Executable file
View File

@@ -0,0 +1,470 @@
"use strict";
var common = require('./common')
, utils = require('../utils')
, toError = require('../utils').toError
, f = require('util').format
, shallowClone = utils.shallowClone
, WriteError = common.WriteError
, BulkWriteResult = common.BulkWriteResult
, LegacyOp = common.LegacyOp
, ObjectID = require('mongodb-core').BSON.ObjectID
, Batch = common.Batch
, mergeBatchResults = common.mergeBatchResults;
/**
* Create a FindOperatorsOrdered instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @return {FindOperatorsOrdered} a FindOperatorsOrdered instance.
*/
var FindOperatorsOrdered = function(self) {
this.s = self.s;
}
/**
* Add a single update document to the bulk operation
*
* @method
* @param {object} doc update operations
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.update = function(updateDocument) {
// Perform upsert
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
// Establish the update command
var document = {
q: this.s.currentOp.selector
, u: updateDocument
, multi: true
, upsert: upsert
}
// Clear out current Op
this.s.currentOp = null;
// Add the update document to the list
return addToOperationsList(this, common.UPDATE, document);
}
/**
* Add a single update one document to the bulk operation
*
* @method
* @param {object} doc update operations
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.updateOne = function(updateDocument) {
// Perform upsert
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
// Establish the update command
var document = {
q: this.s.currentOp.selector
, u: updateDocument
, multi: false
, upsert: upsert
}
// Clear out current Op
this.s.currentOp = null;
// Add the update document to the list
return addToOperationsList(this, common.UPDATE, document);
}
/**
* Add a replace one operation to the bulk operation
*
* @method
* @param {object} doc the new document to replace the existing one with
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.replaceOne = function(updateDocument) {
this.updateOne(updateDocument);
}
/**
* Upsert modifier for update bulk operation
*
* @method
* @throws {MongoError}
* @return {FindOperatorsOrdered}
*/
FindOperatorsOrdered.prototype.upsert = function() {
this.s.currentOp.upsert = true;
return this;
}
/**
* Add a remove one operation to the bulk operation
*
* @method
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.deleteOne = function() {
// Establish the update command
var document = {
q: this.s.currentOp.selector
, limit: 1
}
// Clear out current Op
this.s.currentOp = null;
// Add the remove document to the list
return addToOperationsList(this, common.REMOVE, document);
}
// Backward compatibility
FindOperatorsOrdered.prototype.removeOne = FindOperatorsOrdered.prototype.deleteOne;
/**
* Add a remove operation to the bulk operation
*
* @method
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
FindOperatorsOrdered.prototype.delete = function() {
// Establish the update command
var document = {
q: this.s.currentOp.selector
, limit: 0
}
// Clear out current Op
this.s.currentOp = null;
// Add the remove document to the list
return addToOperationsList(this, common.REMOVE, document);
}
// Backward compatibility
FindOperatorsOrdered.prototype.remove = FindOperatorsOrdered.prototype.delete;
// Add to internal list of documents
var addToOperationsList = function(_self, docType, document) {
// Get the bsonSize
var bsonSize = _self.s.bson.calculateObjectSize(document, false);
// Throw error if the doc is bigger than the max BSON size
if(bsonSize >= _self.s.maxBatchSizeBytes) throw toError("document is larger than the maximum size " + _self.s.maxBatchSizeBytes);
// Create a new batch object if we don't have a current one
if(_self.s.currentBatch == null) _self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
// Check if we need to create a new batch
if(((_self.s.currentBatchSize + 1) >= _self.s.maxWriteBatchSize)
|| ((_self.s.currentBatchSizeBytes + _self.s.currentBatchSizeBytes) >= _self.s.maxBatchSizeBytes)
|| (_self.s.currentBatch.batchType != docType)) {
// Save the batch to the execution stack
_self.s.batches.push(_self.s.currentBatch);
// Create a new batch
_self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
// Reset the current size trackers
_self.s.currentBatchSize = 0;
_self.s.currentBatchSizeBytes = 0;
} else {
// Update current batch size
_self.s.currentBatchSize = _self.s.currentBatchSize + 1;
_self.s.currentBatchSizeBytes = _self.s.currentBatchSizeBytes + bsonSize;
}
if(docType == common.INSERT) {
_self.s.bulkResult.insertedIds.push({index: _self.s.currentIndex, _id: document._id});
}
// We have an array of documents
if(Array.isArray(document)) {
throw toError("operation passed in cannot be an Array");
} else {
_self.s.currentBatch.originalIndexes.push(_self.s.currentIndex);
_self.s.currentBatch.operations.push(document)
_self.s.currentIndex = _self.s.currentIndex + 1;
}
// Return self
return _self;
}
/**
* Create a new OrderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @property {number} length Get the number of operations in the bulk.
* @return {OrderedBulkOperation} a OrderedBulkOperation instance.
*/
function OrderedBulkOperation(topology, collection, options) {
options = options == null ? {} : options;
// TODO Bring from driver information in isMaster
var self = this;
var executed = false;
// Current item
var currentOp = null;
// Handle to the bson serializer, used to calculate running sizes
var bson = topology.bson;
// Namespace for the operation
var namespace = collection.collectionName;
// Set max byte size
var maxBatchSizeBytes = topology.isMasterDoc.maxBsonObjectSize;
var maxWriteBatchSize = topology.isMasterDoc.maxWriteBatchSize || 1000;
// Get the capabilities
var capabilities = topology.capabilities();
// Get the write concern
var writeConcern = common.writeConcern(shallowClone(options), collection, options);
// Current batch
var currentBatch = null;
var currentIndex = 0;
var currentBatchSize = 0;
var currentBatchSizeBytes = 0;
var batches = [];
// Final results
var bulkResult = {
ok: 1
, writeErrors: []
, writeConcernErrors: []
, insertedIds: []
, nInserted: 0
, nUpserted: 0
, nMatched: 0
, nModified: 0
, nRemoved: 0
, upserted: []
};
// Internal state
this.s = {
// Final result
bulkResult: bulkResult
// Current batch state
, currentBatch: null
, currentIndex: 0
, currentBatchSize: 0
, currentBatchSizeBytes: 0
, batches: []
// Write concern
, writeConcern: writeConcern
// Capabilities
, capabilities: capabilities
// Max batch size options
, maxBatchSizeBytes: maxBatchSizeBytes
, maxWriteBatchSize: maxWriteBatchSize
// Namespace
, namespace: namespace
// BSON
, bson: bson
// Topology
, topology: topology
// Options
, options: options
// Current operation
, currentOp: currentOp
// Executed
, executed: executed
// Collection
, collection: collection
}
}
OrderedBulkOperation.prototype.raw = function(op) {
var key = Object.keys(op)[0];
// Update operations
if((op.updateOne && op.updateOne.q)
|| (op.updateMany && op.updateMany.q)
|| (op.replaceOne && op.replaceOne.q)) {
op[key].multi = op.updateOne || op.replaceOne ? false : true;
return addToOperationsList(this, common.UPDATE, op[key]);
}
// Crud spec update format
if(op.updateOne || op.updateMany || op.replaceOne) {
var multi = op.updateOne || op.replaceOne ? false : true;
var operation = {q: op[key].filter, u: op[key].update || op[key].replacement, multi: multi}
if(op[key].upsert) operation.upsert = true;
return addToOperationsList(this, common.UPDATE, operation);
}
// Remove operations
if(op.removeOne || op.removeMany || (op.deleteOne && op.deleteOne.q) || op.deleteMany && op.deleteMany.q) {
op[key].limit = op.removeOne ? 1 : 0;
return addToOperationsList(this, common.REMOVE, op[key]);
}
// Crud spec delete operations, less efficient
if(op.deleteOne || op.deleteMany) {
var limit = op.deleteOne ? 1 : 0;
var operation = {q: op[key].filter, limit: limit}
return addToOperationsList(this, common.REMOVE, operation);
}
// Insert operations
if(op.insertOne && op.insertOne.document == null) {
if(op.insertOne._id == null) op.insertOne._id = new ObjectID();
return addToOperationsList(this, common.INSERT, op.insertOne);
} else if(op.insertOne && op.insertOne.document) {
if(op.insertOne.document._id == null) op.insertOne.document._id = new ObjectID();
return addToOperationsList(this, common.INSERT, op.insertOne.document);
}
if(op.insertMany) {
for(var i = 0; i < op.insertMany.length; i++) {
if(op.insertMany[i]._id == null) op.insertMany[i]._id = new ObjectID();
addToOperationsList(this, common.INSERT, op.insertMany[i]);
}
return;
}
// No valid type of operation
throw toError("bulkWrite only supports insertOne, insertMany, updateOne, updateMany, removeOne, removeMany, deleteOne, deleteMany");
}
/**
* Add a single insert document to the bulk operation
*
* @param {object} doc the document to insert
* @throws {MongoError}
* @return {OrderedBulkOperation}
*/
OrderedBulkOperation.prototype.insert = function(document) {
if(document._id == null) document._id = new ObjectID();
return addToOperationsList(this, common.INSERT, document);
}
/**
* Initiate a find operation for an update/updateOne/remove/removeOne/replaceOne
*
* @method
* @param {object} selector The selector for the bulk operation.
* @throws {MongoError}
* @return {FindOperatorsOrdered}
*/
OrderedBulkOperation.prototype.find = function(selector) {
if (!selector) {
throw toError("Bulk find operation must specify a selector");
}
// Save a current selector
this.s.currentOp = {
selector: selector
}
return new FindOperatorsOrdered(this);
}
Object.defineProperty(OrderedBulkOperation.prototype, 'length', {
enumerable: true,
get: function() {
return this.s.currentIndex;
}
});
//
// Execute next write command in a chain
var executeCommands = function(self, callback) {
if(self.s.batches.length == 0) {
return callback(null, new BulkWriteResult(self.s.bulkResult));
}
// Ordered execution of the command
var batch = self.s.batches.shift();
var resultHandler = function(err, result) {
// If we have and error
if(err) err.ok = 0;
// Merge the results together
var mergeResult = mergeBatchResults(true, batch, self.s.bulkResult, err, result);
if(mergeResult != null) {
return callback(null, new BulkWriteResult(self.s.bulkResult));
}
// If we are ordered and have errors and they are
// not all replication errors terminate the operation
if(self.s.bulkResult.writeErrors.length > 0) {
return callback(self.s.bulkResult.writeErrors[0], new BulkWriteResult(self.s.bulkResult));
}
// Execute the next command in line
executeCommands(self, callback);
}
var finalOptions = {ordered: true}
if(self.s.writeConcern != null) {
finalOptions.writeConcern = self.s.writeConcern;
}
try {
if(batch.batchType == common.INSERT) {
self.s.topology.insert(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
} else if(batch.batchType == common.UPDATE) {
self.s.topology.update(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
} else if(batch.batchType == common.REMOVE) {
self.s.topology.remove(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
}
} catch(err) {
// Force top level error
err.ok = 0;
// Merge top level error and return
callback(null, mergeBatchResults(false, batch, self.s.bulkResult, err, null));
}
}
/**
* The callback format for results
* @callback OrderedBulkOperation~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {BulkWriteResult} result The bulk write result.
*/
/**
* Execute the ordered bulk operation
*
* @method
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {OrderedBulkOperation~resultCallback} callback The result callback
* @throws {MongoError}
* @return {null}
*/
OrderedBulkOperation.prototype.execute = function(_writeConcern, callback) {
if(this.s.executed) throw new toError("batch cannot be re-executed");
if(typeof _writeConcern == 'function') {
callback = _writeConcern;
} else {
this.s.writeConcern = _writeConcern;
}
// If we have current batch
if(this.s.currentBatch) this.s.batches.push(this.s.currentBatch);
// If we have no operations in the bulk raise an error
if(this.s.batches.length == 0) {
throw toError("Invalid Operation, No operations in bulk");
}
// Execute the commands
return executeCommands(this, callback);
}
/**
* Returns an unordered batch object
* @ignore
*/
var initializeOrderedBulkOp = function(topology, collection, options) {
return new OrderedBulkOperation(topology, collection, options);
}
module.exports = initializeOrderedBulkOp;

482
server/node_modules/mongodb/lib/bulk/unordered.js generated vendored Executable file
View File

@@ -0,0 +1,482 @@
"use strict";
var common = require('./common')
, utils = require('../utils')
, toError = require('../utils').toError
, f = require('util').format
, shallowClone = utils.shallowClone
, WriteError = common.WriteError
, BulkWriteResult = common.BulkWriteResult
, LegacyOp = common.LegacyOp
, ObjectID = require('mongodb-core').BSON.ObjectID
, Batch = common.Batch
, mergeBatchResults = common.mergeBatchResults;
/**
* Create a FindOperatorsUnordered instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @property {number} length Get the number of operations in the bulk.
* @return {FindOperatorsUnordered} a FindOperatorsUnordered instance.
*/
var FindOperatorsUnordered = function(self) {
this.s = self.s;
}
/**
* Add a single update document to the bulk operation
*
* @method
* @param {object} doc update operations
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.update = function(updateDocument) {
// Perform upsert
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
// Establish the update command
var document = {
q: this.s.currentOp.selector
, u: updateDocument
, multi: true
, upsert: upsert
}
// Clear out current Op
this.s.currentOp = null;
// Add the update document to the list
return addToOperationsList(this, common.UPDATE, document);
}
/**
* Add a single update one document to the bulk operation
*
* @method
* @param {object} doc update operations
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.updateOne = function(updateDocument) {
// Perform upsert
var upsert = typeof this.s.currentOp.upsert == 'boolean' ? this.s.currentOp.upsert : false;
// Establish the update command
var document = {
q: this.s.currentOp.selector
, u: updateDocument
, multi: false
, upsert: upsert
}
// Clear out current Op
this.s.currentOp = null;
// Add the update document to the list
return addToOperationsList(this, common.UPDATE, document);
}
/**
* Add a replace one operation to the bulk operation
*
* @method
* @param {object} doc the new document to replace the existing one with
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.replaceOne = function(updateDocument) {
this.updateOne(updateDocument);
}
/**
* Upsert modifier for update bulk operation
*
* @method
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.upsert = function() {
this.s.currentOp.upsert = true;
return this;
}
/**
* Add a remove one operation to the bulk operation
*
* @method
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.removeOne = function() {
// Establish the update command
var document = {
q: this.s.currentOp.selector
, limit: 1
}
// Clear out current Op
this.s.currentOp = null;
// Add the remove document to the list
return addToOperationsList(this, common.REMOVE, document);
}
/**
* Add a remove operation to the bulk operation
*
* @method
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
FindOperatorsUnordered.prototype.remove = function() {
// Establish the update command
var document = {
q: this.s.currentOp.selector
, limit: 0
}
// Clear out current Op
this.s.currentOp = null;
// Add the remove document to the list
return addToOperationsList(this, common.REMOVE, document);
}
//
// Add to the operations list
//
var addToOperationsList = function(_self, docType, document) {
// Get the bsonSize
var bsonSize = _self.s.bson.calculateObjectSize(document, false);
// Throw error if the doc is bigger than the max BSON size
if(bsonSize >= _self.s.maxBatchSizeBytes) throw toError("document is larger than the maximum size " + _self.s.maxBatchSizeBytes);
// Holds the current batch
_self.s.currentBatch = null;
// Get the right type of batch
if(docType == common.INSERT) {
_self.s.currentBatch = _self.s.currentInsertBatch;
} else if(docType == common.UPDATE) {
_self.s.currentBatch = _self.s.currentUpdateBatch;
} else if(docType == common.REMOVE) {
_self.s.currentBatch = _self.s.currentRemoveBatch;
}
// Create a new batch object if we don't have a current one
if(_self.s.currentBatch == null) _self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
// Check if we need to create a new batch
if(((_self.s.currentBatch.size + 1) >= _self.s.maxWriteBatchSize)
|| ((_self.s.currentBatch.sizeBytes + bsonSize) >= _self.s.maxBatchSizeBytes)
|| (_self.s.currentBatch.batchType != docType)) {
// Save the batch to the execution stack
_self.s.batches.push(_self.s.currentBatch);
// Create a new batch
_self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
}
// We have an array of documents
if(Array.isArray(document)) {
throw toError("operation passed in cannot be an Array");
} else {
_self.s.currentBatch.operations.push(document);
_self.s.currentBatch.originalIndexes.push(_self.s.currentIndex);
_self.s.currentIndex = _self.s.currentIndex + 1;
}
// Save back the current Batch to the right type
if(docType == common.INSERT) {
_self.s.currentInsertBatch = _self.s.currentBatch;
_self.s.bulkResult.insertedIds.push({index: _self.s.currentIndex, _id: document._id});
} else if(docType == common.UPDATE) {
_self.s.currentUpdateBatch = _self.s.currentBatch;
} else if(docType == common.REMOVE) {
_self.s.currentRemoveBatch = _self.s.currentBatch;
}
// Update current batch size
_self.s.currentBatch.size = _self.s.currentBatch.size + 1;
_self.s.currentBatch.sizeBytes = _self.s.currentBatch.sizeBytes + bsonSize;
// Return self
return _self;
}
/**
* Create a new UnorderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @return {UnorderedBulkOperation} a UnorderedBulkOperation instance.
*/
var UnorderedBulkOperation = function(topology, collection, options) {
options = options == null ? {} : options;
// Contains reference to self
var self = this;
// Get the namesspace for the write operations
var namespace = collection.collectionName;
// Used to mark operation as executed
var executed = false;
// Current item
// var currentBatch = null;
var currentOp = null;
var currentIndex = 0;
var batches = [];
// The current Batches for the different operations
var currentInsertBatch = null;
var currentUpdateBatch = null;
var currentRemoveBatch = null;
// Handle to the bson serializer, used to calculate running sizes
var bson = topology.bson;
// Get the capabilities
var capabilities = topology.capabilities();
// Set max byte size
var maxBatchSizeBytes = topology.isMasterDoc.maxBsonObjectSize;
var maxWriteBatchSize = topology.isMasterDoc.maxWriteBatchSize || 1000;
// Get the write concern
var writeConcern = common.writeConcern(shallowClone(options), collection, options);
// Final results
var bulkResult = {
ok: 1
, writeErrors: []
, writeConcernErrors: []
, insertedIds: []
, nInserted: 0
, nUpserted: 0
, nMatched: 0
, nModified: 0
, nRemoved: 0
, upserted: []
};
// Internal state
this.s = {
// Final result
bulkResult: bulkResult
// Current batch state
, currentInsertBatch: null
, currentUpdateBatch: null
, currentRemoveBatch: null
, currentBatch: null
, currentIndex: 0
, batches: []
// Write concern
, writeConcern: writeConcern
// Capabilities
, capabilities: capabilities
// Max batch size options
, maxBatchSizeBytes: maxBatchSizeBytes
, maxWriteBatchSize: maxWriteBatchSize
// Namespace
, namespace: namespace
// BSON
, bson: bson
// Topology
, topology: topology
// Options
, options: options
// Current operation
, currentOp: currentOp
// Executed
, executed: executed
// Collection
, collection: collection
}
}
/**
* Add a single insert document to the bulk operation
*
* @param {object} doc the document to insert
* @throws {MongoError}
* @return {UnorderedBulkOperation}
*/
UnorderedBulkOperation.prototype.insert = function(document) {
if(document._id == null) document._id = new ObjectID();
return addToOperationsList(this, common.INSERT, document);
}
/**
* Initiate a find operation for an update/updateOne/remove/removeOne/replaceOne
*
* @method
* @param {object} selector The selector for the bulk operation.
* @throws {MongoError}
* @return {FindOperatorsUnordered}
*/
UnorderedBulkOperation.prototype.find = function(selector) {
if (!selector) {
throw toError("Bulk find operation must specify a selector");
}
// Save a current selector
this.s.currentOp = {
selector: selector
}
return new FindOperatorsUnordered(this);
}
Object.defineProperty(UnorderedBulkOperation.prototype, 'length', {
enumerable: true,
get: function() {
return this.s.currentIndex;
}
});
UnorderedBulkOperation.prototype.raw = function(op) {
var key = Object.keys(op)[0];
// Update operations
if((op.updateOne && op.updateOne.q)
|| (op.updateMany && op.updateMany.q)
|| (op.replaceOne && op.replaceOne.q)) {
op[key].multi = op.updateOne || op.replaceOne ? false : true;
return addToOperationsList(this, common.UPDATE, op[key]);
}
// Crud spec update format
if(op.updateOne || op.updateMany || op.replaceOne) {
var multi = op.updateOne || op.replaceOne ? false : true;
var operation = {q: op[key].filter, u: op[key].update || op[key].replacement, multi: multi}
if(op[key].upsert) operation.upsert = true;
return addToOperationsList(this, common.UPDATE, operation);
}
// Remove operations
if(op.removeOne || op.removeMany || (op.deleteOne && op.deleteOne.q) || op.deleteMany && op.deleteMany.q) {
op[key].limit = op.removeOne ? 1 : 0;
return addToOperationsList(this, common.REMOVE, op[key]);
}
// Crud spec delete operations, less efficient
if(op.deleteOne || op.deleteMany) {
var limit = op.deleteOne ? 1 : 0;
var operation = {q: op[key].filter, limit: limit}
return addToOperationsList(this, common.REMOVE, operation);
}
// Insert operations
if(op.insertOne && op.insertOne.document == null) {
if(op.insertOne._id == null) op.insertOne._id = new ObjectID();
return addToOperationsList(this, common.INSERT, op.insertOne);
} else if(op.insertOne && op.insertOne.document) {
if(op.insertOne.document._id == null) op.insertOne.document._id = new ObjectID();
return addToOperationsList(this, common.INSERT, op.insertOne.document);
}
if(op.insertMany) {
for(var i = 0; i < op.insertMany.length; i++) {
addToOperationsList(this, common.INSERT, op.insertMany[i]);
}
return;
}
// No valid type of operation
throw toError("bulkWrite only supports insertOne, insertMany, updateOne, updateMany, removeOne, removeMany, deleteOne, deleteMany");
}
//
// Execute the command
var executeBatch = function(self, batch, callback) {
var finalOptions = {ordered: false}
if(self.s.writeConcern != null) {
finalOptions.writeConcern = self.s.writeConcern;
}
var resultHandler = function(err, result) {
// If we have and error
if(err) err.ok = 0;
callback(null, mergeBatchResults(false, batch, self.s.bulkResult, err, result));
}
try {
if(batch.batchType == common.INSERT) {
self.s.topology.insert(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
} else if(batch.batchType == common.UPDATE) {
self.s.topology.update(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
} else if(batch.batchType == common.REMOVE) {
self.s.topology.remove(self.s.collection.namespace, batch.operations, finalOptions, resultHandler);
}
} catch(err) {
// Force top level error
err.ok = 0;
// Merge top level error and return
callback(null, mergeBatchResults(false, batch, self.s.bulkResult, err, null));
}
}
//
// Execute all the commands
var executeBatches = function(self, callback) {
var numberOfCommandsToExecute = self.s.batches.length;
// Execute over all the batches
for(var i = 0; i < self.s.batches.length; i++) {
executeBatch(self, self.s.batches[i], function(err, result) {
numberOfCommandsToExecute = numberOfCommandsToExecute - 1;
// Execute
if(numberOfCommandsToExecute == 0) {
var error = self.s.bulkResult.writeErrors.length > 0 ? self.s.bulkResult.writeErrors[0] : null;
callback(error, new BulkWriteResult(self.s.bulkResult));
}
});
}
}
/**
* The callback format for results
* @callback UnorderedBulkOperation~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {BulkWriteResult} result The bulk write result.
*/
/**
* Execute the ordered bulk operation
*
* @method
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {UnorderedBulkOperation~resultCallback} callback The result callback
* @throws {MongoError}
* @return {null}
*/
UnorderedBulkOperation.prototype.execute = function(_writeConcern, callback) {
if(this.s.executed) throw toError("batch cannot be re-executed");
if(typeof _writeConcern == 'function') {
callback = _writeConcern;
} else {
this.s.writeConcern = _writeConcern;
}
// If we have current batch
if(this.s.currentInsertBatch) this.s.batches.push(this.s.currentInsertBatch);
if(this.s.currentUpdateBatch) this.s.batches.push(this.s.currentUpdateBatch);
if(this.s.currentRemoveBatch) this.s.batches.push(this.s.currentRemoveBatch);
// If we have no operations in the bulk raise an error
if(this.s.batches.length == 0) {
throw toError("Invalid Operation, No operations in bulk");
}
// Execute batches
return executeBatches(this, function(err, result) {
callback(err, result);
});
}
/**
* Returns an unordered batch object
* @ignore
*/
var initializeUnorderedBulkOp = function(topology, collection, options) {
return new UnorderedBulkOperation(topology, collection, options);
}
module.exports = initializeUnorderedBulkOp;