mirror of
https://github.com/JonasunderscoreJones/jonas_jones-api.git
synced 2025-10-24 01:29:19 +02:00
some progress
This commit is contained in:
parent
aea93a5527
commit
e3c15bd288
1388 changed files with 306946 additions and 68323 deletions
36
node_modules/mongodb/lib/cmap/auth/auth_provider.js
generated
vendored
Normal file
36
node_modules/mongodb/lib/cmap/auth/auth_provider.js
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AuthProvider = exports.AuthContext = void 0;
|
||||
const error_1 = require("../../error");
|
||||
/** Context used during authentication */
|
||||
class AuthContext {
|
||||
constructor(connection, credentials, options) {
|
||||
this.connection = connection;
|
||||
this.credentials = credentials;
|
||||
this.options = options;
|
||||
}
|
||||
}
|
||||
exports.AuthContext = AuthContext;
|
||||
class AuthProvider {
|
||||
/**
|
||||
* Prepare the handshake document before the initial handshake.
|
||||
*
|
||||
* @param handshakeDoc - The document used for the initial handshake on a connection
|
||||
* @param authContext - Context for authentication flow
|
||||
*/
|
||||
prepare(handshakeDoc, authContext, callback) {
|
||||
callback(undefined, handshakeDoc);
|
||||
}
|
||||
/**
|
||||
* Authenticate
|
||||
*
|
||||
* @param context - A shared context for authentication flow
|
||||
* @param callback - The callback to return the result from the authentication
|
||||
*/
|
||||
auth(context, callback) {
|
||||
// TODO(NODE-3483): Replace this with MongoMethodOverrideError
|
||||
callback(new error_1.MongoRuntimeError('`auth` method must be overridden by subclass'));
|
||||
}
|
||||
}
|
||||
exports.AuthProvider = AuthProvider;
|
||||
//# sourceMappingURL=auth_provider.js.map
|
||||
1
node_modules/mongodb/lib/cmap/auth/auth_provider.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/auth/auth_provider.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"auth_provider.js","sourceRoot":"","sources":["../../../src/cmap/auth/auth_provider.ts"],"names":[],"mappings":";;;AACA,uCAAgD;AAQhD,yCAAyC;AACzC,MAAa,WAAW;IAatB,YACE,UAAsB,EACtB,WAAyC,EACzC,OAA2B;QAE3B,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;QAC7B,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;QAC/B,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;CACF;AAtBD,kCAsBC;AAED,MAAa,YAAY;IACvB;;;;;OAKG;IACH,OAAO,CACL,YAA+B,EAC/B,WAAwB,EACxB,QAAqC;QAErC,QAAQ,CAAC,SAAS,EAAE,YAAY,CAAC,CAAC;IACpC,CAAC;IAED;;;;;OAKG;IACH,IAAI,CAAC,OAAoB,EAAE,QAAkB;QAC3C,8DAA8D;QAC9D,QAAQ,CAAC,IAAI,yBAAiB,CAAC,8CAA8C,CAAC,CAAC,CAAC;IAClF,CAAC;CACF;AAzBD,oCAyBC"}
|
||||
188
node_modules/mongodb/lib/cmap/auth/gssapi.js
generated
vendored
Normal file
188
node_modules/mongodb/lib/cmap/auth/gssapi.js
generated
vendored
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.resolveCname = exports.performGSSAPICanonicalizeHostName = exports.GSSAPI = exports.GSSAPICanonicalizationValue = void 0;
|
||||
const dns = require("dns");
|
||||
const deps_1 = require("../../deps");
|
||||
const error_1 = require("../../error");
|
||||
const utils_1 = require("../../utils");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
/** @public */
|
||||
exports.GSSAPICanonicalizationValue = Object.freeze({
|
||||
on: true,
|
||||
off: false,
|
||||
none: 'none',
|
||||
forward: 'forward',
|
||||
forwardAndReverse: 'forwardAndReverse'
|
||||
});
|
||||
class GSSAPI extends auth_provider_1.AuthProvider {
|
||||
auth(authContext, callback) {
|
||||
const { connection, credentials } = authContext;
|
||||
if (credentials == null)
|
||||
return callback(new error_1.MongoMissingCredentialsError('Credentials required for GSSAPI authentication'));
|
||||
const { username } = credentials;
|
||||
function externalCommand(command, cb) {
|
||||
return connection.command((0, utils_1.ns)('$external.$cmd'), command, undefined, cb);
|
||||
}
|
||||
makeKerberosClient(authContext, (err, client) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
if (client == null)
|
||||
return callback(new error_1.MongoMissingDependencyError('GSSAPI client missing'));
|
||||
client.step('', (err, payload) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
externalCommand(saslStart(payload), (err, result) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
if (result == null)
|
||||
return callback();
|
||||
negotiate(client, 10, result.payload, (err, payload) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
externalCommand(saslContinue(payload, result.conversationId), (err, result) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
if (result == null)
|
||||
return callback();
|
||||
finalize(client, username, result.payload, (err, payload) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
externalCommand({
|
||||
saslContinue: 1,
|
||||
conversationId: result.conversationId,
|
||||
payload
|
||||
}, (err, result) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
callback(undefined, result);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.GSSAPI = GSSAPI;
|
||||
function makeKerberosClient(authContext, callback) {
|
||||
const { hostAddress } = authContext.options;
|
||||
const { credentials } = authContext;
|
||||
if (!hostAddress || typeof hostAddress.host !== 'string' || !credentials) {
|
||||
return callback(new error_1.MongoInvalidArgumentError('Connection must have host and port and credentials defined.'));
|
||||
}
|
||||
if ('kModuleError' in deps_1.Kerberos) {
|
||||
return callback(deps_1.Kerberos['kModuleError']);
|
||||
}
|
||||
const { initializeClient } = deps_1.Kerberos;
|
||||
const { username, password } = credentials;
|
||||
const mechanismProperties = credentials.mechanismProperties;
|
||||
const serviceName = mechanismProperties.SERVICE_NAME ?? 'mongodb';
|
||||
performGSSAPICanonicalizeHostName(hostAddress.host, mechanismProperties, (err, host) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
const initOptions = {};
|
||||
if (password != null) {
|
||||
Object.assign(initOptions, { user: username, password: password });
|
||||
}
|
||||
const spnHost = mechanismProperties.SERVICE_HOST ?? host;
|
||||
let spn = `${serviceName}${process.platform === 'win32' ? '/' : '@'}${spnHost}`;
|
||||
if ('SERVICE_REALM' in mechanismProperties) {
|
||||
spn = `${spn}@${mechanismProperties.SERVICE_REALM}`;
|
||||
}
|
||||
initializeClient(spn, initOptions, (err, client) => {
|
||||
// TODO(NODE-3483)
|
||||
if (err)
|
||||
return callback(new error_1.MongoRuntimeError(err));
|
||||
callback(undefined, client);
|
||||
});
|
||||
});
|
||||
}
|
||||
function saslStart(payload) {
|
||||
return {
|
||||
saslStart: 1,
|
||||
mechanism: 'GSSAPI',
|
||||
payload,
|
||||
autoAuthorize: 1
|
||||
};
|
||||
}
|
||||
function saslContinue(payload, conversationId) {
|
||||
return {
|
||||
saslContinue: 1,
|
||||
conversationId,
|
||||
payload
|
||||
};
|
||||
}
|
||||
function negotiate(client, retries, payload, callback) {
|
||||
client.step(payload, (err, response) => {
|
||||
// Retries exhausted, raise error
|
||||
if (err && retries === 0)
|
||||
return callback(err);
|
||||
// Adjust number of retries and call step again
|
||||
if (err)
|
||||
return negotiate(client, retries - 1, payload, callback);
|
||||
// Return the payload
|
||||
callback(undefined, response || '');
|
||||
});
|
||||
}
|
||||
function finalize(client, user, payload, callback) {
|
||||
// GSS Client Unwrap
|
||||
client.unwrap(payload, (err, response) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
// Wrap the response
|
||||
client.wrap(response || '', { user }, (err, wrapped) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
// Return the payload
|
||||
callback(undefined, wrapped);
|
||||
});
|
||||
});
|
||||
}
|
||||
function performGSSAPICanonicalizeHostName(host, mechanismProperties, callback) {
|
||||
const mode = mechanismProperties.CANONICALIZE_HOST_NAME;
|
||||
if (!mode || mode === exports.GSSAPICanonicalizationValue.none) {
|
||||
return callback(undefined, host);
|
||||
}
|
||||
// If forward and reverse or true
|
||||
if (mode === exports.GSSAPICanonicalizationValue.on ||
|
||||
mode === exports.GSSAPICanonicalizationValue.forwardAndReverse) {
|
||||
// Perform the lookup of the ip address.
|
||||
dns.lookup(host, (error, address) => {
|
||||
// No ip found, return the error.
|
||||
if (error)
|
||||
return callback(error);
|
||||
// Perform a reverse ptr lookup on the ip address.
|
||||
dns.resolvePtr(address, (err, results) => {
|
||||
// This can error as ptr records may not exist for all ips. In this case
|
||||
// fallback to a cname lookup as dns.lookup() does not return the
|
||||
// cname.
|
||||
if (err) {
|
||||
return resolveCname(host, callback);
|
||||
}
|
||||
// If the ptr did not error but had no results, return the host.
|
||||
callback(undefined, results.length > 0 ? results[0] : host);
|
||||
});
|
||||
});
|
||||
}
|
||||
else {
|
||||
// The case for forward is just to resolve the cname as dns.lookup()
|
||||
// will not return it.
|
||||
resolveCname(host, callback);
|
||||
}
|
||||
}
|
||||
exports.performGSSAPICanonicalizeHostName = performGSSAPICanonicalizeHostName;
|
||||
function resolveCname(host, callback) {
|
||||
// Attempt to resolve the host name
|
||||
dns.resolveCname(host, (err, r) => {
|
||||
if (err)
|
||||
return callback(undefined, host);
|
||||
// Get the first resolve host id
|
||||
if (r.length > 0) {
|
||||
return callback(undefined, r[0]);
|
||||
}
|
||||
callback(undefined, host);
|
||||
});
|
||||
}
|
||||
exports.resolveCname = resolveCname;
|
||||
//# sourceMappingURL=gssapi.js.map
|
||||
1
node_modules/mongodb/lib/cmap/auth/gssapi.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/auth/gssapi.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
145
node_modules/mongodb/lib/cmap/auth/mongo_credentials.js
generated
vendored
Normal file
145
node_modules/mongodb/lib/cmap/auth/mongo_credentials.js
generated
vendored
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MongoCredentials = void 0;
|
||||
const error_1 = require("../../error");
|
||||
const gssapi_1 = require("./gssapi");
|
||||
const providers_1 = require("./providers");
|
||||
// https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst
|
||||
function getDefaultAuthMechanism(hello) {
|
||||
if (hello) {
|
||||
// If hello contains saslSupportedMechs, use scram-sha-256
|
||||
// if it is available, else scram-sha-1
|
||||
if (Array.isArray(hello.saslSupportedMechs)) {
|
||||
return hello.saslSupportedMechs.includes(providers_1.AuthMechanism.MONGODB_SCRAM_SHA256)
|
||||
? providers_1.AuthMechanism.MONGODB_SCRAM_SHA256
|
||||
: providers_1.AuthMechanism.MONGODB_SCRAM_SHA1;
|
||||
}
|
||||
// Fallback to legacy selection method. If wire version >= 3, use scram-sha-1
|
||||
if (hello.maxWireVersion >= 3) {
|
||||
return providers_1.AuthMechanism.MONGODB_SCRAM_SHA1;
|
||||
}
|
||||
}
|
||||
// Default for wireprotocol < 3
|
||||
return providers_1.AuthMechanism.MONGODB_CR;
|
||||
}
|
||||
/**
|
||||
* A representation of the credentials used by MongoDB
|
||||
* @public
|
||||
*/
|
||||
class MongoCredentials {
|
||||
constructor(options) {
|
||||
this.username = options.username;
|
||||
this.password = options.password;
|
||||
this.source = options.source;
|
||||
if (!this.source && options.db) {
|
||||
this.source = options.db;
|
||||
}
|
||||
this.mechanism = options.mechanism || providers_1.AuthMechanism.MONGODB_DEFAULT;
|
||||
this.mechanismProperties = options.mechanismProperties || {};
|
||||
if (this.mechanism.match(/MONGODB-AWS/i)) {
|
||||
if (!this.username && process.env.AWS_ACCESS_KEY_ID) {
|
||||
this.username = process.env.AWS_ACCESS_KEY_ID;
|
||||
}
|
||||
if (!this.password && process.env.AWS_SECRET_ACCESS_KEY) {
|
||||
this.password = process.env.AWS_SECRET_ACCESS_KEY;
|
||||
}
|
||||
if (this.mechanismProperties.AWS_SESSION_TOKEN == null &&
|
||||
process.env.AWS_SESSION_TOKEN != null) {
|
||||
this.mechanismProperties = {
|
||||
...this.mechanismProperties,
|
||||
AWS_SESSION_TOKEN: process.env.AWS_SESSION_TOKEN
|
||||
};
|
||||
}
|
||||
}
|
||||
Object.freeze(this.mechanismProperties);
|
||||
Object.freeze(this);
|
||||
}
|
||||
/** Determines if two MongoCredentials objects are equivalent */
|
||||
equals(other) {
|
||||
return (this.mechanism === other.mechanism &&
|
||||
this.username === other.username &&
|
||||
this.password === other.password &&
|
||||
this.source === other.source);
|
||||
}
|
||||
/**
|
||||
* If the authentication mechanism is set to "default", resolves the authMechanism
|
||||
* based on the server version and server supported sasl mechanisms.
|
||||
*
|
||||
* @param hello - A hello response from the server
|
||||
*/
|
||||
resolveAuthMechanism(hello) {
|
||||
// If the mechanism is not "default", then it does not need to be resolved
|
||||
if (this.mechanism.match(/DEFAULT/i)) {
|
||||
return new MongoCredentials({
|
||||
username: this.username,
|
||||
password: this.password,
|
||||
source: this.source,
|
||||
mechanism: getDefaultAuthMechanism(hello),
|
||||
mechanismProperties: this.mechanismProperties
|
||||
});
|
||||
}
|
||||
return this;
|
||||
}
|
||||
validate() {
|
||||
if ((this.mechanism === providers_1.AuthMechanism.MONGODB_GSSAPI ||
|
||||
this.mechanism === providers_1.AuthMechanism.MONGODB_CR ||
|
||||
this.mechanism === providers_1.AuthMechanism.MONGODB_PLAIN ||
|
||||
this.mechanism === providers_1.AuthMechanism.MONGODB_SCRAM_SHA1 ||
|
||||
this.mechanism === providers_1.AuthMechanism.MONGODB_SCRAM_SHA256) &&
|
||||
!this.username) {
|
||||
throw new error_1.MongoMissingCredentialsError(`Username required for mechanism '${this.mechanism}'`);
|
||||
}
|
||||
if (this.mechanism === providers_1.AuthMechanism.MONGODB_OIDC) {
|
||||
if (this.username) {
|
||||
throw new error_1.MongoInvalidArgumentError(`Username not permitted for mechanism '${this.mechanism}'. Use PRINCIPAL_NAME instead.`);
|
||||
}
|
||||
if (this.mechanismProperties.PRINCIPAL_NAME && this.mechanismProperties.DEVICE_NAME) {
|
||||
throw new error_1.MongoInvalidArgumentError(`PRINCIPAL_NAME and DEVICE_NAME may not be used together for mechanism '${this.mechanism}'.`);
|
||||
}
|
||||
if (this.mechanismProperties.DEVICE_NAME && this.mechanismProperties.DEVICE_NAME !== 'aws') {
|
||||
throw new error_1.MongoInvalidArgumentError(`Currently only a DEVICE_NAME of 'aws' is supported for mechanism '${this.mechanism}'.`);
|
||||
}
|
||||
if (this.mechanismProperties.REFRESH_TOKEN_CALLBACK &&
|
||||
!this.mechanismProperties.REQUEST_TOKEN_CALLBACK) {
|
||||
throw new error_1.MongoInvalidArgumentError(`A REQUEST_TOKEN_CALLBACK must be provided when using a REFRESH_TOKEN_CALLBACK for mechanism '${this.mechanism}'`);
|
||||
}
|
||||
if (!this.mechanismProperties.DEVICE_NAME &&
|
||||
!this.mechanismProperties.REQUEST_TOKEN_CALLBACK) {
|
||||
throw new error_1.MongoInvalidArgumentError(`Either a DEVICE_NAME or a REQUEST_TOKEN_CALLBACK must be specified for mechanism '${this.mechanism}'.`);
|
||||
}
|
||||
}
|
||||
if (providers_1.AUTH_MECHS_AUTH_SRC_EXTERNAL.has(this.mechanism)) {
|
||||
if (this.source != null && this.source !== '$external') {
|
||||
// TODO(NODE-3485): Replace this with a MongoAuthValidationError
|
||||
throw new error_1.MongoAPIError(`Invalid source '${this.source}' for mechanism '${this.mechanism}' specified.`);
|
||||
}
|
||||
}
|
||||
if (this.mechanism === providers_1.AuthMechanism.MONGODB_PLAIN && this.source == null) {
|
||||
// TODO(NODE-3485): Replace this with a MongoAuthValidationError
|
||||
throw new error_1.MongoAPIError('PLAIN Authentication Mechanism needs an auth source');
|
||||
}
|
||||
if (this.mechanism === providers_1.AuthMechanism.MONGODB_X509 && this.password != null) {
|
||||
if (this.password === '') {
|
||||
Reflect.set(this, 'password', undefined);
|
||||
return;
|
||||
}
|
||||
// TODO(NODE-3485): Replace this with a MongoAuthValidationError
|
||||
throw new error_1.MongoAPIError(`Password not allowed for mechanism MONGODB-X509`);
|
||||
}
|
||||
const canonicalization = this.mechanismProperties.CANONICALIZE_HOST_NAME ?? false;
|
||||
if (!Object.values(gssapi_1.GSSAPICanonicalizationValue).includes(canonicalization)) {
|
||||
throw new error_1.MongoAPIError(`Invalid CANONICALIZE_HOST_NAME value: ${canonicalization}`);
|
||||
}
|
||||
}
|
||||
static merge(creds, options) {
|
||||
return new MongoCredentials({
|
||||
username: options.username ?? creds?.username ?? '',
|
||||
password: options.password ?? creds?.password ?? '',
|
||||
mechanism: options.mechanism ?? creds?.mechanism ?? providers_1.AuthMechanism.MONGODB_DEFAULT,
|
||||
mechanismProperties: options.mechanismProperties ?? creds?.mechanismProperties ?? {},
|
||||
source: options.source ?? options.db ?? creds?.source ?? 'admin'
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.MongoCredentials = MongoCredentials;
|
||||
//# sourceMappingURL=mongo_credentials.js.map
|
||||
1
node_modules/mongodb/lib/cmap/auth/mongo_credentials.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/auth/mongo_credentials.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"mongo_credentials.js","sourceRoot":"","sources":["../../../src/cmap/auth/mongo_credentials.ts"],"names":[],"mappings":";;;AAEA,uCAIqB;AACrB,qCAAuD;AAEvD,2CAA0E;AAE1E,6EAA6E;AAC7E,SAAS,uBAAuB,CAAC,KAAgB;IAC/C,IAAI,KAAK,EAAE;QACT,0DAA0D;QAC1D,uCAAuC;QACvC,IAAI,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,kBAAkB,CAAC,EAAE;YAC3C,OAAO,KAAK,CAAC,kBAAkB,CAAC,QAAQ,CAAC,yBAAa,CAAC,oBAAoB,CAAC;gBAC1E,CAAC,CAAC,yBAAa,CAAC,oBAAoB;gBACpC,CAAC,CAAC,yBAAa,CAAC,kBAAkB,CAAC;SACtC;QAED,6EAA6E;QAC7E,IAAI,KAAK,CAAC,cAAc,IAAI,CAAC,EAAE;YAC7B,OAAO,yBAAa,CAAC,kBAAkB,CAAC;SACzC;KACF;IAED,+BAA+B;IAC/B,OAAO,yBAAa,CAAC,UAAU,CAAC;AAClC,CAAC;AAiCD;;;GAGG;AACH,MAAa,gBAAgB;IAY3B,YAAY,OAAgC;QAC1C,IAAI,CAAC,QAAQ,GAAG,OAAO,CAAC,QAAQ,CAAC;QACjC,IAAI,CAAC,QAAQ,GAAG,OAAO,CAAC,QAAQ,CAAC;QACjC,IAAI,CAAC,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;QAC7B,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,OAAO,CAAC,EAAE,EAAE;YAC9B,IAAI,CAAC,MAAM,GAAG,OAAO,CAAC,EAAE,CAAC;SAC1B;QACD,IAAI,CAAC,SAAS,GAAG,OAAO,CAAC,SAAS,IAAI,yBAAa,CAAC,eAAe,CAAC;QACpE,IAAI,CAAC,mBAAmB,GAAG,OAAO,CAAC,mBAAmB,IAAI,EAAE,CAAC;QAE7D,IAAI,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,cAAc,CAAC,EAAE;YACxC,IAAI,CAAC,IAAI,CAAC,QAAQ,IAAI,OAAO,CAAC,GAAG,CAAC,iBAAiB,EAAE;gBACnD,IAAI,CAAC,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,iBAAiB,CAAC;aAC/C;YAED,IAAI,CAAC,IAAI,CAAC,QAAQ,IAAI,OAAO,CAAC,GAAG,CAAC,qBAAqB,EAAE;gBACvD,IAAI,CAAC,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,qBAAqB,CAAC;aACnD;YAED,IACE,IAAI,CAAC,mBAAmB,CAAC,iBAAiB,IAAI,IAAI;gBAClD,OAAO,CAAC,GAAG,CAAC,iBAAiB,IAAI,IAAI,EACrC;gBACA,IAAI,CAAC,mBAAmB,GAAG;oBACzB,GAAG,IAAI,CAAC,mBAAmB;oBAC3B,iBAAiB,EAAE,OAAO,CAAC,GAAG,CAAC,iBAAiB;iBACjD,CAAC;aACH;SACF;QAED,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,mBAAmB,CAAC,CAAC;QACxC,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC;IACtB,CAAC;IAED,gEAAgE;IAChE,MAAM,CAAC,KAAuB;QAC5B,OAAO,CACL,IAAI,CAAC,SAAS,KAAK,KAAK,CAAC,SAAS;YAClC,IAAI,CAAC,QAAQ,KAAK,KAAK,CAAC,QAAQ;YAChC,IAAI,CAAC,QAAQ,KAAK,KAAK,CAAC,QAAQ;YAChC,IAAI,CAAC,MAAM,KAAK,KAAK,CAAC,MAAM,CAC7B,CAAC;IACJ,CAAC;IAED;;;;;OAKG;IACH,oBAAoB,CAAC,KAAgB;QACnC,0EAA0E;QAC1E,IAAI,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,UAAU,CAAC,EAAE;YACpC,OAAO,IAAI,gBAAgB,CAAC;gBAC1B,QAAQ,EAAE,IAAI,CAAC,QAAQ;gBACvB,QAAQ,EAAE,IAAI,CAAC,QAAQ;gBACvB,MAAM,EAAE,IAAI,CAAC,MAAM;gBACnB,SAAS,EAAE,uBAAuB,CAAC,KAAK,CAAC;gBACzC,mBAAmB,EAAE,IAAI,CAAC,mBAAmB;aAC9C,CAAC,CAAC;SACJ;QAED,OAAO,IAAI,CAAC;IACd,CAAC;IAED,QAAQ;QACN,IACE,CAAC,IAAI,CAAC,SAAS,KAAK,yBAAa,CAAC,cAAc;YAC9C,IAAI,CAAC,SAAS,KAAK,yBAAa,CAAC,UAAU;YAC3C,IAAI,CAAC,SAAS,KAAK,yBAAa,CAAC,aAAa;YAC9C,IAAI,CAAC,SAAS,KAAK,yBAAa,CAAC,kBAAkB;YACnD,IAAI,CAAC,SAAS,KAAK,yBAAa,CAAC,oBAAoB,CAAC;YACxD,CAAC,IAAI,CAAC,QAAQ,EACd;YACA,MAAM,IAAI,oCAA4B,CAAC,oCAAoC,IAAI,CAAC,SAAS,GAAG,CAAC,CAAC;SAC/F;QAED,IAAI,IAAI,CAAC,SAAS,KAAK,yBAAa,CAAC,YAAY,EAAE;YACjD,IAAI,IAAI,CAAC,QAAQ,EAAE;gBACjB,MAAM,IAAI,iCAAyB,CACjC,yCAAyC,IAAI,CAAC,SAAS,gCAAgC,CACxF,CAAC;aACH;YAED,IAAI,IAAI,CAAC,mBAAmB,CAAC,cAAc,IAAI,IAAI,CAAC,mBAAmB,CAAC,WAAW,EAAE;gBACnF,MAAM,IAAI,iCAAyB,CACjC,0EAA0E,IAAI,CAAC,SAAS,IAAI,CAC7F,CAAC;aACH;YAED,IAAI,IAAI,CAAC,mBAAmB,CAAC,WAAW,IAAI,IAAI,CAAC,mBAAmB,CAAC,WAAW,KAAK,KAAK,EAAE;gBAC1F,MAAM,IAAI,iCAAyB,CACjC,qEAAqE,IAAI,CAAC,SAAS,IAAI,CACxF,CAAC;aACH;YAED,IACE,IAAI,CAAC,mBAAmB,CAAC,sBAAsB;gBAC/C,CAAC,IAAI,CAAC,mBAAmB,CAAC,sBAAsB,EAChD;gBACA,MAAM,IAAI,iCAAyB,CACjC,gGAAgG,IAAI,CAAC,SAAS,GAAG,CAClH,CAAC;aACH;YAED,IACE,CAAC,IAAI,CAAC,mBAAmB,CAAC,WAAW;gBACrC,CAAC,IAAI,CAAC,mBAAmB,CAAC,sBAAsB,EAChD;gBACA,MAAM,IAAI,iCAAyB,CACjC,qFAAqF,IAAI,CAAC,SAAS,IAAI,CACxG,CAAC;aACH;SACF;QAED,IAAI,wCAA4B,CAAC,GAAG,CAAC,IAAI,CAAC,SAAS,CAAC,EAAE;YACpD,IAAI,IAAI,CAAC,MAAM,IAAI,IAAI,IAAI,IAAI,CAAC,MAAM,KAAK,WAAW,EAAE;gBACtD,gEAAgE;gBAChE,MAAM,IAAI,qBAAa,CACrB,mBAAmB,IAAI,CAAC,MAAM,oBAAoB,IAAI,CAAC,SAAS,cAAc,CAC/E,CAAC;aACH;SACF;QAED,IAAI,IAAI,CAAC,SAAS,KAAK,yBAAa,CAAC,aAAa,IAAI,IAAI,CAAC,MAAM,IAAI,IAAI,EAAE;YACzE,gEAAgE;YAChE,MAAM,IAAI,qBAAa,CAAC,qDAAqD,CAAC,CAAC;SAChF;QAED,IAAI,IAAI,CAAC,SAAS,KAAK,yBAAa,CAAC,YAAY,IAAI,IAAI,CAAC,QAAQ,IAAI,IAAI,EAAE;YAC1E,IAAI,IAAI,CAAC,QAAQ,KAAK,EAAE,EAAE;gBACxB,OAAO,CAAC,GAAG,CAAC,IAAI,EAAE,UAAU,EAAE,SAAS,CAAC,CAAC;gBACzC,OAAO;aACR;YACD,gEAAgE;YAChE,MAAM,IAAI,qBAAa,CAAC,iDAAiD,CAAC,CAAC;SAC5E;QAED,MAAM,gBAAgB,GAAG,IAAI,CAAC,mBAAmB,CAAC,sBAAsB,IAAI,KAAK,CAAC;QAClF,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,oCAA2B,CAAC,CAAC,QAAQ,CAAC,gBAAgB,CAAC,EAAE;YAC1E,MAAM,IAAI,qBAAa,CAAC,yCAAyC,gBAAgB,EAAE,CAAC,CAAC;SACtF;IACH,CAAC;IAED,MAAM,CAAC,KAAK,CACV,KAAmC,EACnC,OAAyC;QAEzC,OAAO,IAAI,gBAAgB,CAAC;YAC1B,QAAQ,EAAE,OAAO,CAAC,QAAQ,IAAI,KAAK,EAAE,QAAQ,IAAI,EAAE;YACnD,QAAQ,EAAE,OAAO,CAAC,QAAQ,IAAI,KAAK,EAAE,QAAQ,IAAI,EAAE;YACnD,SAAS,EAAE,OAAO,CAAC,SAAS,IAAI,KAAK,EAAE,SAAS,IAAI,yBAAa,CAAC,eAAe;YACjF,mBAAmB,EAAE,OAAO,CAAC,mBAAmB,IAAI,KAAK,EAAE,mBAAmB,IAAI,EAAE;YACpF,MAAM,EAAE,OAAO,CAAC,MAAM,IAAI,OAAO,CAAC,EAAE,IAAI,KAAK,EAAE,MAAM,IAAI,OAAO;SACjE,CAAC,CAAC;IACL,CAAC;CACF;AAxKD,4CAwKC"}
|
||||
44
node_modules/mongodb/lib/cmap/auth/mongocr.js
generated
vendored
Normal file
44
node_modules/mongodb/lib/cmap/auth/mongocr.js
generated
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MongoCR = void 0;
|
||||
const crypto = require("crypto");
|
||||
const error_1 = require("../../error");
|
||||
const utils_1 = require("../../utils");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
class MongoCR extends auth_provider_1.AuthProvider {
|
||||
auth(authContext, callback) {
|
||||
const { connection, credentials } = authContext;
|
||||
if (!credentials) {
|
||||
return callback(new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.'));
|
||||
}
|
||||
const username = credentials.username;
|
||||
const password = credentials.password;
|
||||
const source = credentials.source;
|
||||
connection.command((0, utils_1.ns)(`${source}.$cmd`), { getnonce: 1 }, undefined, (err, r) => {
|
||||
let nonce = null;
|
||||
let key = null;
|
||||
// Get nonce
|
||||
if (err == null) {
|
||||
nonce = r.nonce;
|
||||
// Use node md5 generator
|
||||
let md5 = crypto.createHash('md5');
|
||||
// Generate keys used for authentication
|
||||
md5.update(`${username}:mongo:${password}`, 'utf8');
|
||||
const hash_password = md5.digest('hex');
|
||||
// Final key
|
||||
md5 = crypto.createHash('md5');
|
||||
md5.update(nonce + username + hash_password, 'utf8');
|
||||
key = md5.digest('hex');
|
||||
}
|
||||
const authenticateCommand = {
|
||||
authenticate: 1,
|
||||
user: username,
|
||||
nonce,
|
||||
key
|
||||
};
|
||||
connection.command((0, utils_1.ns)(`${source}.$cmd`), authenticateCommand, undefined, callback);
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.MongoCR = MongoCR;
|
||||
//# sourceMappingURL=mongocr.js.map
|
||||
1
node_modules/mongodb/lib/cmap/auth/mongocr.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/auth/mongocr.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"mongocr.js","sourceRoot":"","sources":["../../../src/cmap/auth/mongocr.ts"],"names":[],"mappings":";;;AAAA,iCAAiC;AAEjC,uCAA2D;AAC3D,uCAA2C;AAC3C,mDAA4D;AAE5D,MAAa,OAAQ,SAAQ,4BAAY;IAC9B,IAAI,CAAC,WAAwB,EAAE,QAAkB;QACxD,MAAM,EAAE,UAAU,EAAE,WAAW,EAAE,GAAG,WAAW,CAAC;QAChD,IAAI,CAAC,WAAW,EAAE;YAChB,OAAO,QAAQ,CAAC,IAAI,oCAA4B,CAAC,uCAAuC,CAAC,CAAC,CAAC;SAC5F;QACD,MAAM,QAAQ,GAAG,WAAW,CAAC,QAAQ,CAAC;QACtC,MAAM,QAAQ,GAAG,WAAW,CAAC,QAAQ,CAAC;QACtC,MAAM,MAAM,GAAG,WAAW,CAAC,MAAM,CAAC;QAClC,UAAU,CAAC,OAAO,CAAC,IAAA,UAAE,EAAC,GAAG,MAAM,OAAO,CAAC,EAAE,EAAE,QAAQ,EAAE,CAAC,EAAE,EAAE,SAAS,EAAE,CAAC,GAAG,EAAE,CAAC,EAAE,EAAE;YAC9E,IAAI,KAAK,GAAG,IAAI,CAAC;YACjB,IAAI,GAAG,GAAG,IAAI,CAAC;YAEf,YAAY;YACZ,IAAI,GAAG,IAAI,IAAI,EAAE;gBACf,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC;gBAEhB,yBAAyB;gBACzB,IAAI,GAAG,GAAG,MAAM,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC;gBAEnC,wCAAwC;gBACxC,GAAG,CAAC,MAAM,CAAC,GAAG,QAAQ,UAAU,QAAQ,EAAE,EAAE,MAAM,CAAC,CAAC;gBACpD,MAAM,aAAa,GAAG,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;gBAExC,YAAY;gBACZ,GAAG,GAAG,MAAM,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC;gBAC/B,GAAG,CAAC,MAAM,CAAC,KAAK,GAAG,QAAQ,GAAG,aAAa,EAAE,MAAM,CAAC,CAAC;gBACrD,GAAG,GAAG,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;aACzB;YAED,MAAM,mBAAmB,GAAG;gBAC1B,YAAY,EAAE,CAAC;gBACf,IAAI,EAAE,QAAQ;gBACd,KAAK;gBACL,GAAG;aACJ,CAAC;YAEF,UAAU,CAAC,OAAO,CAAC,IAAA,UAAE,EAAC,GAAG,MAAM,OAAO,CAAC,EAAE,mBAAmB,EAAE,SAAS,EAAE,QAAQ,CAAC,CAAC;QACrF,CAAC,CAAC,CAAC;IACL,CAAC;CACF;AAxCD,0BAwCC"}
|
||||
238
node_modules/mongodb/lib/cmap/auth/mongodb_aws.js
generated
vendored
Normal file
238
node_modules/mongodb/lib/cmap/auth/mongodb_aws.js
generated
vendored
Normal file
|
|
@ -0,0 +1,238 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MongoDBAWS = void 0;
|
||||
const crypto = require("crypto");
|
||||
const http = require("http");
|
||||
const url = require("url");
|
||||
const BSON = require("../../bson");
|
||||
const deps_1 = require("../../deps");
|
||||
const error_1 = require("../../error");
|
||||
const utils_1 = require("../../utils");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
const mongo_credentials_1 = require("./mongo_credentials");
|
||||
const providers_1 = require("./providers");
|
||||
const ASCII_N = 110;
|
||||
const AWS_RELATIVE_URI = 'http://169.254.170.2';
|
||||
const AWS_EC2_URI = 'http://169.254.169.254';
|
||||
const AWS_EC2_PATH = '/latest/meta-data/iam/security-credentials';
|
||||
const bsonOptions = {
|
||||
useBigInt64: false,
|
||||
promoteLongs: true,
|
||||
promoteValues: true,
|
||||
promoteBuffers: false,
|
||||
bsonRegExp: false
|
||||
};
|
||||
class MongoDBAWS extends auth_provider_1.AuthProvider {
|
||||
auth(authContext, callback) {
|
||||
const { connection, credentials } = authContext;
|
||||
if (!credentials) {
|
||||
return callback(new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.'));
|
||||
}
|
||||
if ('kModuleError' in deps_1.aws4) {
|
||||
return callback(deps_1.aws4['kModuleError']);
|
||||
}
|
||||
const { sign } = deps_1.aws4;
|
||||
if ((0, utils_1.maxWireVersion)(connection) < 9) {
|
||||
callback(new error_1.MongoCompatibilityError('MONGODB-AWS authentication requires MongoDB version 4.4 or later'));
|
||||
return;
|
||||
}
|
||||
if (!credentials.username) {
|
||||
makeTempCredentials(credentials, (err, tempCredentials) => {
|
||||
if (err || !tempCredentials)
|
||||
return callback(err);
|
||||
authContext.credentials = tempCredentials;
|
||||
this.auth(authContext, callback);
|
||||
});
|
||||
return;
|
||||
}
|
||||
const accessKeyId = credentials.username;
|
||||
const secretAccessKey = credentials.password;
|
||||
const sessionToken = credentials.mechanismProperties.AWS_SESSION_TOKEN;
|
||||
// If all three defined, include sessionToken, else include username and pass, else no credentials
|
||||
const awsCredentials = accessKeyId && secretAccessKey && sessionToken
|
||||
? { accessKeyId, secretAccessKey, sessionToken }
|
||||
: accessKeyId && secretAccessKey
|
||||
? { accessKeyId, secretAccessKey }
|
||||
: undefined;
|
||||
const db = credentials.source;
|
||||
crypto.randomBytes(32, (err, nonce) => {
|
||||
if (err) {
|
||||
callback(err);
|
||||
return;
|
||||
}
|
||||
const saslStart = {
|
||||
saslStart: 1,
|
||||
mechanism: 'MONGODB-AWS',
|
||||
payload: BSON.serialize({ r: nonce, p: ASCII_N }, bsonOptions)
|
||||
};
|
||||
connection.command((0, utils_1.ns)(`${db}.$cmd`), saslStart, undefined, (err, res) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
const serverResponse = BSON.deserialize(res.payload.buffer, bsonOptions);
|
||||
const host = serverResponse.h;
|
||||
const serverNonce = serverResponse.s.buffer;
|
||||
if (serverNonce.length !== 64) {
|
||||
callback(
|
||||
// TODO(NODE-3483)
|
||||
new error_1.MongoRuntimeError(`Invalid server nonce length ${serverNonce.length}, expected 64`));
|
||||
return;
|
||||
}
|
||||
if (!utils_1.ByteUtils.equals(serverNonce.subarray(0, nonce.byteLength), nonce)) {
|
||||
// throw because the serverNonce's leading 32 bytes must equal the client nonce's 32 bytes
|
||||
// https://github.com/mongodb/specifications/blob/875446db44aade414011731840831f38a6c668df/source/auth/auth.rst#id11
|
||||
// TODO(NODE-3483)
|
||||
callback(new error_1.MongoRuntimeError('Server nonce does not begin with client nonce'));
|
||||
return;
|
||||
}
|
||||
if (host.length < 1 || host.length > 255 || host.indexOf('..') !== -1) {
|
||||
// TODO(NODE-3483)
|
||||
callback(new error_1.MongoRuntimeError(`Server returned an invalid host: "${host}"`));
|
||||
return;
|
||||
}
|
||||
const body = 'Action=GetCallerIdentity&Version=2011-06-15';
|
||||
const options = sign({
|
||||
method: 'POST',
|
||||
host,
|
||||
region: deriveRegion(serverResponse.h),
|
||||
service: 'sts',
|
||||
headers: {
|
||||
'Content-Type': 'application/x-www-form-urlencoded',
|
||||
'Content-Length': body.length,
|
||||
'X-MongoDB-Server-Nonce': utils_1.ByteUtils.toBase64(serverNonce),
|
||||
'X-MongoDB-GS2-CB-Flag': 'n'
|
||||
},
|
||||
path: '/',
|
||||
body
|
||||
}, awsCredentials);
|
||||
const payload = {
|
||||
a: options.headers.Authorization,
|
||||
d: options.headers['X-Amz-Date']
|
||||
};
|
||||
if (sessionToken) {
|
||||
payload.t = sessionToken;
|
||||
}
|
||||
const saslContinue = {
|
||||
saslContinue: 1,
|
||||
conversationId: 1,
|
||||
payload: BSON.serialize(payload, bsonOptions)
|
||||
};
|
||||
connection.command((0, utils_1.ns)(`${db}.$cmd`), saslContinue, undefined, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.MongoDBAWS = MongoDBAWS;
|
||||
function makeTempCredentials(credentials, callback) {
|
||||
function done(creds) {
|
||||
if (!creds.AccessKeyId || !creds.SecretAccessKey || !creds.Token) {
|
||||
callback(new error_1.MongoMissingCredentialsError('Could not obtain temporary MONGODB-AWS credentials'));
|
||||
return;
|
||||
}
|
||||
callback(undefined, new mongo_credentials_1.MongoCredentials({
|
||||
username: creds.AccessKeyId,
|
||||
password: creds.SecretAccessKey,
|
||||
source: credentials.source,
|
||||
mechanism: providers_1.AuthMechanism.MONGODB_AWS,
|
||||
mechanismProperties: {
|
||||
AWS_SESSION_TOKEN: creds.Token
|
||||
}
|
||||
}));
|
||||
}
|
||||
const credentialProvider = (0, deps_1.getAwsCredentialProvider)();
|
||||
// Check if the AWS credential provider from the SDK is present. If not,
|
||||
// use the old method.
|
||||
if ('kModuleError' in credentialProvider) {
|
||||
// If the environment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI
|
||||
// is set then drivers MUST assume that it was set by an AWS ECS agent
|
||||
if (process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI) {
|
||||
request(`${AWS_RELATIVE_URI}${process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI}`, undefined, (err, res) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
done(res);
|
||||
});
|
||||
return;
|
||||
}
|
||||
// Otherwise assume we are on an EC2 instance
|
||||
// get a token
|
||||
request(`${AWS_EC2_URI}/latest/api/token`, { method: 'PUT', json: false, headers: { 'X-aws-ec2-metadata-token-ttl-seconds': 30 } }, (err, token) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
// get role name
|
||||
request(`${AWS_EC2_URI}/${AWS_EC2_PATH}`, { json: false, headers: { 'X-aws-ec2-metadata-token': token } }, (err, roleName) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
// get temp credentials
|
||||
request(`${AWS_EC2_URI}/${AWS_EC2_PATH}/${roleName}`, { headers: { 'X-aws-ec2-metadata-token': token } }, (err, creds) => {
|
||||
if (err)
|
||||
return callback(err);
|
||||
done(creds);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
else {
|
||||
/*
|
||||
* Creates a credential provider that will attempt to find credentials from the
|
||||
* following sources (listed in order of precedence):
|
||||
*
|
||||
* - Environment variables exposed via process.env
|
||||
* - SSO credentials from token cache
|
||||
* - Web identity token credentials
|
||||
* - Shared credentials and config ini files
|
||||
* - The EC2/ECS Instance Metadata Service
|
||||
*/
|
||||
const { fromNodeProviderChain } = credentialProvider;
|
||||
const provider = fromNodeProviderChain();
|
||||
provider()
|
||||
.then((creds) => {
|
||||
done({
|
||||
AccessKeyId: creds.accessKeyId,
|
||||
SecretAccessKey: creds.secretAccessKey,
|
||||
Token: creds.sessionToken,
|
||||
Expiration: creds.expiration
|
||||
});
|
||||
})
|
||||
.catch((error) => {
|
||||
callback(new error_1.MongoAWSError(error.message));
|
||||
});
|
||||
}
|
||||
}
|
||||
function deriveRegion(host) {
|
||||
const parts = host.split('.');
|
||||
if (parts.length === 1 || parts[1] === 'amazonaws') {
|
||||
return 'us-east-1';
|
||||
}
|
||||
return parts[1];
|
||||
}
|
||||
function request(uri, _options, callback) {
|
||||
const options = Object.assign({
|
||||
method: 'GET',
|
||||
timeout: 10000,
|
||||
json: true
|
||||
}, url.parse(uri), _options);
|
||||
const req = http.request(options, res => {
|
||||
res.setEncoding('utf8');
|
||||
let data = '';
|
||||
res.on('data', d => (data += d));
|
||||
res.on('end', () => {
|
||||
if (options.json === false) {
|
||||
callback(undefined, data);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const parsed = JSON.parse(data);
|
||||
callback(undefined, parsed);
|
||||
}
|
||||
catch (err) {
|
||||
// TODO(NODE-3483)
|
||||
callback(new error_1.MongoRuntimeError(`Invalid JSON response: "${data}"`));
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('timeout', () => {
|
||||
req.destroy(new error_1.MongoAWSError(`AWS request to ${uri} timed out after ${options.timeout} ms`));
|
||||
});
|
||||
req.on('error', err => callback(err));
|
||||
req.end();
|
||||
}
|
||||
//# sourceMappingURL=mongodb_aws.js.map
|
||||
1
node_modules/mongodb/lib/cmap/auth/mongodb_aws.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/auth/mongodb_aws.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
3
node_modules/mongodb/lib/cmap/auth/mongodb_oidc.js
generated
vendored
Normal file
3
node_modules/mongodb/lib/cmap/auth/mongodb_oidc.js
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
//# sourceMappingURL=mongodb_oidc.js.map
|
||||
1
node_modules/mongodb/lib/cmap/auth/mongodb_oidc.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/auth/mongodb_oidc.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"mongodb_oidc.js","sourceRoot":"","sources":["../../../src/cmap/auth/mongodb_oidc.ts"],"names":[],"mappings":""}
|
||||
27
node_modules/mongodb/lib/cmap/auth/plain.js
generated
vendored
Normal file
27
node_modules/mongodb/lib/cmap/auth/plain.js
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Plain = void 0;
|
||||
const bson_1 = require("../../bson");
|
||||
const error_1 = require("../../error");
|
||||
const utils_1 = require("../../utils");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
class Plain extends auth_provider_1.AuthProvider {
|
||||
auth(authContext, callback) {
|
||||
const { connection, credentials } = authContext;
|
||||
if (!credentials) {
|
||||
return callback(new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.'));
|
||||
}
|
||||
const username = credentials.username;
|
||||
const password = credentials.password;
|
||||
const payload = new bson_1.Binary(Buffer.from(`\x00${username}\x00${password}`));
|
||||
const command = {
|
||||
saslStart: 1,
|
||||
mechanism: 'PLAIN',
|
||||
payload: payload,
|
||||
autoAuthorize: 1
|
||||
};
|
||||
connection.command((0, utils_1.ns)('$external.$cmd'), command, undefined, callback);
|
||||
}
|
||||
}
|
||||
exports.Plain = Plain;
|
||||
//# sourceMappingURL=plain.js.map
|
||||
1
node_modules/mongodb/lib/cmap/auth/plain.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/auth/plain.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"plain.js","sourceRoot":"","sources":["../../../src/cmap/auth/plain.ts"],"names":[],"mappings":";;;AAAA,qCAAoC;AACpC,uCAA2D;AAC3D,uCAA2C;AAC3C,mDAA4D;AAE5D,MAAa,KAAM,SAAQ,4BAAY;IAC5B,IAAI,CAAC,WAAwB,EAAE,QAAkB;QACxD,MAAM,EAAE,UAAU,EAAE,WAAW,EAAE,GAAG,WAAW,CAAC;QAChD,IAAI,CAAC,WAAW,EAAE;YAChB,OAAO,QAAQ,CAAC,IAAI,oCAA4B,CAAC,uCAAuC,CAAC,CAAC,CAAC;SAC5F;QACD,MAAM,QAAQ,GAAG,WAAW,CAAC,QAAQ,CAAC;QACtC,MAAM,QAAQ,GAAG,WAAW,CAAC,QAAQ,CAAC;QAEtC,MAAM,OAAO,GAAG,IAAI,aAAM,CAAC,MAAM,CAAC,IAAI,CAAC,OAAO,QAAQ,OAAO,QAAQ,EAAE,CAAC,CAAC,CAAC;QAC1E,MAAM,OAAO,GAAG;YACd,SAAS,EAAE,CAAC;YACZ,SAAS,EAAE,OAAO;YAClB,OAAO,EAAE,OAAO;YAChB,aAAa,EAAE,CAAC;SACjB,CAAC;QAEF,UAAU,CAAC,OAAO,CAAC,IAAA,UAAE,EAAC,gBAAgB,CAAC,EAAE,OAAO,EAAE,SAAS,EAAE,QAAQ,CAAC,CAAC;IACzE,CAAC;CACF;AAnBD,sBAmBC"}
|
||||
24
node_modules/mongodb/lib/cmap/auth/providers.js
generated
vendored
Normal file
24
node_modules/mongodb/lib/cmap/auth/providers.js
generated
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AUTH_MECHS_AUTH_SRC_EXTERNAL = exports.AuthMechanism = void 0;
|
||||
/** @public */
|
||||
exports.AuthMechanism = Object.freeze({
|
||||
MONGODB_AWS: 'MONGODB-AWS',
|
||||
MONGODB_CR: 'MONGODB-CR',
|
||||
MONGODB_DEFAULT: 'DEFAULT',
|
||||
MONGODB_GSSAPI: 'GSSAPI',
|
||||
MONGODB_PLAIN: 'PLAIN',
|
||||
MONGODB_SCRAM_SHA1: 'SCRAM-SHA-1',
|
||||
MONGODB_SCRAM_SHA256: 'SCRAM-SHA-256',
|
||||
MONGODB_X509: 'MONGODB-X509',
|
||||
/** @internal TODO: NODE-5035: Make mechanism public. */
|
||||
MONGODB_OIDC: 'MONGODB-OIDC'
|
||||
});
|
||||
/** @internal */
|
||||
exports.AUTH_MECHS_AUTH_SRC_EXTERNAL = new Set([
|
||||
exports.AuthMechanism.MONGODB_GSSAPI,
|
||||
exports.AuthMechanism.MONGODB_AWS,
|
||||
exports.AuthMechanism.MONGODB_OIDC,
|
||||
exports.AuthMechanism.MONGODB_X509
|
||||
]);
|
||||
//# sourceMappingURL=providers.js.map
|
||||
1
node_modules/mongodb/lib/cmap/auth/providers.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/auth/providers.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"providers.js","sourceRoot":"","sources":["../../../src/cmap/auth/providers.ts"],"names":[],"mappings":";;;AAAA,cAAc;AACD,QAAA,aAAa,GAAG,MAAM,CAAC,MAAM,CAAC;IACzC,WAAW,EAAE,aAAa;IAC1B,UAAU,EAAE,YAAY;IACxB,eAAe,EAAE,SAAS;IAC1B,cAAc,EAAE,QAAQ;IACxB,aAAa,EAAE,OAAO;IACtB,kBAAkB,EAAE,aAAa;IACjC,oBAAoB,EAAE,eAAe;IACrC,YAAY,EAAE,cAAc;IAC5B,wDAAwD;IACxD,YAAY,EAAE,cAAc;CACpB,CAAC,CAAC;AAKZ,gBAAgB;AACH,QAAA,4BAA4B,GAAG,IAAI,GAAG,CAAgB;IACjE,qBAAa,CAAC,cAAc;IAC5B,qBAAa,CAAC,WAAW;IACzB,qBAAa,CAAC,YAAY;IAC1B,qBAAa,CAAC,YAAY;CAC3B,CAAC,CAAC"}
|
||||
288
node_modules/mongodb/lib/cmap/auth/scram.js
generated
vendored
Normal file
288
node_modules/mongodb/lib/cmap/auth/scram.js
generated
vendored
Normal file
|
|
@ -0,0 +1,288 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ScramSHA256 = exports.ScramSHA1 = void 0;
|
||||
const crypto = require("crypto");
|
||||
const bson_1 = require("../../bson");
|
||||
const deps_1 = require("../../deps");
|
||||
const error_1 = require("../../error");
|
||||
const utils_1 = require("../../utils");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
const providers_1 = require("./providers");
|
||||
class ScramSHA extends auth_provider_1.AuthProvider {
|
||||
constructor(cryptoMethod) {
|
||||
super();
|
||||
this.cryptoMethod = cryptoMethod || 'sha1';
|
||||
}
|
||||
prepare(handshakeDoc, authContext, callback) {
|
||||
const cryptoMethod = this.cryptoMethod;
|
||||
const credentials = authContext.credentials;
|
||||
if (!credentials) {
|
||||
return callback(new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.'));
|
||||
}
|
||||
if (cryptoMethod === 'sha256' && deps_1.saslprep == null) {
|
||||
(0, utils_1.emitWarning)('Warning: no saslprep library specified. Passwords will not be sanitized');
|
||||
}
|
||||
crypto.randomBytes(24, (err, nonce) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
// store the nonce for later use
|
||||
Object.assign(authContext, { nonce });
|
||||
const request = Object.assign({}, handshakeDoc, {
|
||||
speculativeAuthenticate: Object.assign(makeFirstMessage(cryptoMethod, credentials, nonce), {
|
||||
db: credentials.source
|
||||
})
|
||||
});
|
||||
callback(undefined, request);
|
||||
});
|
||||
}
|
||||
auth(authContext, callback) {
|
||||
const response = authContext.response;
|
||||
if (response && response.speculativeAuthenticate) {
|
||||
continueScramConversation(this.cryptoMethod, response.speculativeAuthenticate, authContext, callback);
|
||||
return;
|
||||
}
|
||||
executeScram(this.cryptoMethod, authContext, callback);
|
||||
}
|
||||
}
|
||||
function cleanUsername(username) {
|
||||
return username.replace('=', '=3D').replace(',', '=2C');
|
||||
}
|
||||
function clientFirstMessageBare(username, nonce) {
|
||||
// NOTE: This is done b/c Javascript uses UTF-16, but the server is hashing in UTF-8.
|
||||
// Since the username is not sasl-prep-d, we need to do this here.
|
||||
return Buffer.concat([
|
||||
Buffer.from('n=', 'utf8'),
|
||||
Buffer.from(username, 'utf8'),
|
||||
Buffer.from(',r=', 'utf8'),
|
||||
Buffer.from(nonce.toString('base64'), 'utf8')
|
||||
]);
|
||||
}
|
||||
function makeFirstMessage(cryptoMethod, credentials, nonce) {
|
||||
const username = cleanUsername(credentials.username);
|
||||
const mechanism = cryptoMethod === 'sha1' ? providers_1.AuthMechanism.MONGODB_SCRAM_SHA1 : providers_1.AuthMechanism.MONGODB_SCRAM_SHA256;
|
||||
// NOTE: This is done b/c Javascript uses UTF-16, but the server is hashing in UTF-8.
|
||||
// Since the username is not sasl-prep-d, we need to do this here.
|
||||
return {
|
||||
saslStart: 1,
|
||||
mechanism,
|
||||
payload: new bson_1.Binary(Buffer.concat([Buffer.from('n,,', 'utf8'), clientFirstMessageBare(username, nonce)])),
|
||||
autoAuthorize: 1,
|
||||
options: { skipEmptyExchange: true }
|
||||
};
|
||||
}
|
||||
function executeScram(cryptoMethod, authContext, callback) {
|
||||
const { connection, credentials } = authContext;
|
||||
if (!credentials) {
|
||||
return callback(new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.'));
|
||||
}
|
||||
if (!authContext.nonce) {
|
||||
return callback(new error_1.MongoInvalidArgumentError('AuthContext must contain a valid nonce property'));
|
||||
}
|
||||
const nonce = authContext.nonce;
|
||||
const db = credentials.source;
|
||||
const saslStartCmd = makeFirstMessage(cryptoMethod, credentials, nonce);
|
||||
connection.command((0, utils_1.ns)(`${db}.$cmd`), saslStartCmd, undefined, (_err, result) => {
|
||||
const err = resolveError(_err, result);
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
continueScramConversation(cryptoMethod, result, authContext, callback);
|
||||
});
|
||||
}
|
||||
function continueScramConversation(cryptoMethod, response, authContext, callback) {
|
||||
const connection = authContext.connection;
|
||||
const credentials = authContext.credentials;
|
||||
if (!credentials) {
|
||||
return callback(new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.'));
|
||||
}
|
||||
if (!authContext.nonce) {
|
||||
return callback(new error_1.MongoInvalidArgumentError('Unable to continue SCRAM without valid nonce'));
|
||||
}
|
||||
const nonce = authContext.nonce;
|
||||
const db = credentials.source;
|
||||
const username = cleanUsername(credentials.username);
|
||||
const password = credentials.password;
|
||||
let processedPassword;
|
||||
if (cryptoMethod === 'sha256') {
|
||||
processedPassword = 'kModuleError' in deps_1.saslprep ? password : (0, deps_1.saslprep)(password);
|
||||
}
|
||||
else {
|
||||
try {
|
||||
processedPassword = passwordDigest(username, password);
|
||||
}
|
||||
catch (e) {
|
||||
return callback(e);
|
||||
}
|
||||
}
|
||||
const payload = Buffer.isBuffer(response.payload)
|
||||
? new bson_1.Binary(response.payload)
|
||||
: response.payload;
|
||||
const dict = parsePayload(payload.value());
|
||||
const iterations = parseInt(dict.i, 10);
|
||||
if (iterations && iterations < 4096) {
|
||||
callback(
|
||||
// TODO(NODE-3483)
|
||||
new error_1.MongoRuntimeError(`Server returned an invalid iteration count ${iterations}`), false);
|
||||
return;
|
||||
}
|
||||
const salt = dict.s;
|
||||
const rnonce = dict.r;
|
||||
if (rnonce.startsWith('nonce')) {
|
||||
// TODO(NODE-3483)
|
||||
callback(new error_1.MongoRuntimeError(`Server returned an invalid nonce: ${rnonce}`), false);
|
||||
return;
|
||||
}
|
||||
// Set up start of proof
|
||||
const withoutProof = `c=biws,r=${rnonce}`;
|
||||
const saltedPassword = HI(processedPassword, Buffer.from(salt, 'base64'), iterations, cryptoMethod);
|
||||
const clientKey = HMAC(cryptoMethod, saltedPassword, 'Client Key');
|
||||
const serverKey = HMAC(cryptoMethod, saltedPassword, 'Server Key');
|
||||
const storedKey = H(cryptoMethod, clientKey);
|
||||
const authMessage = [clientFirstMessageBare(username, nonce), payload.value(), withoutProof].join(',');
|
||||
const clientSignature = HMAC(cryptoMethod, storedKey, authMessage);
|
||||
const clientProof = `p=${xor(clientKey, clientSignature)}`;
|
||||
const clientFinal = [withoutProof, clientProof].join(',');
|
||||
const serverSignature = HMAC(cryptoMethod, serverKey, authMessage);
|
||||
const saslContinueCmd = {
|
||||
saslContinue: 1,
|
||||
conversationId: response.conversationId,
|
||||
payload: new bson_1.Binary(Buffer.from(clientFinal))
|
||||
};
|
||||
connection.command((0, utils_1.ns)(`${db}.$cmd`), saslContinueCmd, undefined, (_err, r) => {
|
||||
const err = resolveError(_err, r);
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
const parsedResponse = parsePayload(r.payload.value());
|
||||
if (!compareDigest(Buffer.from(parsedResponse.v, 'base64'), serverSignature)) {
|
||||
callback(new error_1.MongoRuntimeError('Server returned an invalid signature'));
|
||||
return;
|
||||
}
|
||||
if (!r || r.done !== false) {
|
||||
return callback(err, r);
|
||||
}
|
||||
const retrySaslContinueCmd = {
|
||||
saslContinue: 1,
|
||||
conversationId: r.conversationId,
|
||||
payload: Buffer.alloc(0)
|
||||
};
|
||||
connection.command((0, utils_1.ns)(`${db}.$cmd`), retrySaslContinueCmd, undefined, callback);
|
||||
});
|
||||
}
|
||||
function parsePayload(payload) {
|
||||
const dict = {};
|
||||
const parts = payload.split(',');
|
||||
for (let i = 0; i < parts.length; i++) {
|
||||
const valueParts = parts[i].split('=');
|
||||
dict[valueParts[0]] = valueParts[1];
|
||||
}
|
||||
return dict;
|
||||
}
|
||||
function passwordDigest(username, password) {
|
||||
if (typeof username !== 'string') {
|
||||
throw new error_1.MongoInvalidArgumentError('Username must be a string');
|
||||
}
|
||||
if (typeof password !== 'string') {
|
||||
throw new error_1.MongoInvalidArgumentError('Password must be a string');
|
||||
}
|
||||
if (password.length === 0) {
|
||||
throw new error_1.MongoInvalidArgumentError('Password cannot be empty');
|
||||
}
|
||||
let md5;
|
||||
try {
|
||||
md5 = crypto.createHash('md5');
|
||||
}
|
||||
catch (err) {
|
||||
if (crypto.getFips()) {
|
||||
// This error is (slightly) more helpful than what comes from OpenSSL directly, e.g.
|
||||
// 'Error: error:060800C8:digital envelope routines:EVP_DigestInit_ex:disabled for FIPS'
|
||||
throw new Error('Auth mechanism SCRAM-SHA-1 is not supported in FIPS mode');
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
md5.update(`${username}:mongo:${password}`, 'utf8');
|
||||
return md5.digest('hex');
|
||||
}
|
||||
// XOR two buffers
|
||||
function xor(a, b) {
|
||||
if (!Buffer.isBuffer(a)) {
|
||||
a = Buffer.from(a);
|
||||
}
|
||||
if (!Buffer.isBuffer(b)) {
|
||||
b = Buffer.from(b);
|
||||
}
|
||||
const length = Math.max(a.length, b.length);
|
||||
const res = [];
|
||||
for (let i = 0; i < length; i += 1) {
|
||||
res.push(a[i] ^ b[i]);
|
||||
}
|
||||
return Buffer.from(res).toString('base64');
|
||||
}
|
||||
function H(method, text) {
|
||||
return crypto.createHash(method).update(text).digest();
|
||||
}
|
||||
function HMAC(method, key, text) {
|
||||
return crypto.createHmac(method, key).update(text).digest();
|
||||
}
|
||||
let _hiCache = {};
|
||||
let _hiCacheCount = 0;
|
||||
function _hiCachePurge() {
|
||||
_hiCache = {};
|
||||
_hiCacheCount = 0;
|
||||
}
|
||||
const hiLengthMap = {
|
||||
sha256: 32,
|
||||
sha1: 20
|
||||
};
|
||||
function HI(data, salt, iterations, cryptoMethod) {
|
||||
// omit the work if already generated
|
||||
const key = [data, salt.toString('base64'), iterations].join('_');
|
||||
if (_hiCache[key] != null) {
|
||||
return _hiCache[key];
|
||||
}
|
||||
// generate the salt
|
||||
const saltedData = crypto.pbkdf2Sync(data, salt, iterations, hiLengthMap[cryptoMethod], cryptoMethod);
|
||||
// cache a copy to speed up the next lookup, but prevent unbounded cache growth
|
||||
if (_hiCacheCount >= 200) {
|
||||
_hiCachePurge();
|
||||
}
|
||||
_hiCache[key] = saltedData;
|
||||
_hiCacheCount += 1;
|
||||
return saltedData;
|
||||
}
|
||||
function compareDigest(lhs, rhs) {
|
||||
if (lhs.length !== rhs.length) {
|
||||
return false;
|
||||
}
|
||||
if (typeof crypto.timingSafeEqual === 'function') {
|
||||
return crypto.timingSafeEqual(lhs, rhs);
|
||||
}
|
||||
let result = 0;
|
||||
for (let i = 0; i < lhs.length; i++) {
|
||||
result |= lhs[i] ^ rhs[i];
|
||||
}
|
||||
return result === 0;
|
||||
}
|
||||
function resolveError(err, result) {
|
||||
if (err)
|
||||
return err;
|
||||
if (result) {
|
||||
if (result.$err || result.errmsg)
|
||||
return new error_1.MongoServerError(result);
|
||||
}
|
||||
return;
|
||||
}
|
||||
class ScramSHA1 extends ScramSHA {
|
||||
constructor() {
|
||||
super('sha1');
|
||||
}
|
||||
}
|
||||
exports.ScramSHA1 = ScramSHA1;
|
||||
class ScramSHA256 extends ScramSHA {
|
||||
constructor() {
|
||||
super('sha256');
|
||||
}
|
||||
}
|
||||
exports.ScramSHA256 = ScramSHA256;
|
||||
//# sourceMappingURL=scram.js.map
|
||||
1
node_modules/mongodb/lib/cmap/auth/scram.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/auth/scram.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
39
node_modules/mongodb/lib/cmap/auth/x509.js
generated
vendored
Normal file
39
node_modules/mongodb/lib/cmap/auth/x509.js
generated
vendored
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.X509 = void 0;
|
||||
const error_1 = require("../../error");
|
||||
const utils_1 = require("../../utils");
|
||||
const auth_provider_1 = require("./auth_provider");
|
||||
class X509 extends auth_provider_1.AuthProvider {
|
||||
prepare(handshakeDoc, authContext, callback) {
|
||||
const { credentials } = authContext;
|
||||
if (!credentials) {
|
||||
return callback(new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.'));
|
||||
}
|
||||
Object.assign(handshakeDoc, {
|
||||
speculativeAuthenticate: x509AuthenticateCommand(credentials)
|
||||
});
|
||||
callback(undefined, handshakeDoc);
|
||||
}
|
||||
auth(authContext, callback) {
|
||||
const connection = authContext.connection;
|
||||
const credentials = authContext.credentials;
|
||||
if (!credentials) {
|
||||
return callback(new error_1.MongoMissingCredentialsError('AuthContext must provide credentials.'));
|
||||
}
|
||||
const response = authContext.response;
|
||||
if (response && response.speculativeAuthenticate) {
|
||||
return callback();
|
||||
}
|
||||
connection.command((0, utils_1.ns)('$external.$cmd'), x509AuthenticateCommand(credentials), undefined, callback);
|
||||
}
|
||||
}
|
||||
exports.X509 = X509;
|
||||
function x509AuthenticateCommand(credentials) {
|
||||
const command = { authenticate: 1, mechanism: 'MONGODB-X509' };
|
||||
if (credentials.username) {
|
||||
command.user = credentials.username;
|
||||
}
|
||||
return command;
|
||||
}
|
||||
//# sourceMappingURL=x509.js.map
|
||||
1
node_modules/mongodb/lib/cmap/auth/x509.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/auth/x509.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"x509.js","sourceRoot":"","sources":["../../../src/cmap/auth/x509.ts"],"names":[],"mappings":";;;AACA,uCAA2D;AAC3D,uCAA2C;AAE3C,mDAA4D;AAG5D,MAAa,IAAK,SAAQ,4BAAY;IAC3B,OAAO,CACd,YAA+B,EAC/B,WAAwB,EACxB,QAAkB;QAElB,MAAM,EAAE,WAAW,EAAE,GAAG,WAAW,CAAC;QACpC,IAAI,CAAC,WAAW,EAAE;YAChB,OAAO,QAAQ,CAAC,IAAI,oCAA4B,CAAC,uCAAuC,CAAC,CAAC,CAAC;SAC5F;QACD,MAAM,CAAC,MAAM,CAAC,YAAY,EAAE;YAC1B,uBAAuB,EAAE,uBAAuB,CAAC,WAAW,CAAC;SAC9D,CAAC,CAAC;QAEH,QAAQ,CAAC,SAAS,EAAE,YAAY,CAAC,CAAC;IACpC,CAAC;IAEQ,IAAI,CAAC,WAAwB,EAAE,QAAkB;QACxD,MAAM,UAAU,GAAG,WAAW,CAAC,UAAU,CAAC;QAC1C,MAAM,WAAW,GAAG,WAAW,CAAC,WAAW,CAAC;QAC5C,IAAI,CAAC,WAAW,EAAE;YAChB,OAAO,QAAQ,CAAC,IAAI,oCAA4B,CAAC,uCAAuC,CAAC,CAAC,CAAC;SAC5F;QACD,MAAM,QAAQ,GAAG,WAAW,CAAC,QAAQ,CAAC;QAEtC,IAAI,QAAQ,IAAI,QAAQ,CAAC,uBAAuB,EAAE;YAChD,OAAO,QAAQ,EAAE,CAAC;SACnB;QAED,UAAU,CAAC,OAAO,CAChB,IAAA,UAAE,EAAC,gBAAgB,CAAC,EACpB,uBAAuB,CAAC,WAAW,CAAC,EACpC,SAAS,EACT,QAAQ,CACT,CAAC;IACJ,CAAC;CACF;AApCD,oBAoCC;AAED,SAAS,uBAAuB,CAAC,WAA6B;IAC5D,MAAM,OAAO,GAAa,EAAE,YAAY,EAAE,CAAC,EAAE,SAAS,EAAE,cAAc,EAAE,CAAC;IACzE,IAAI,WAAW,CAAC,QAAQ,EAAE;QACxB,OAAO,CAAC,IAAI,GAAG,WAAW,CAAC,QAAQ,CAAC;KACrC;IAED,OAAO,OAAO,CAAC;AACjB,CAAC"}
|
||||
242
node_modules/mongodb/lib/cmap/command_monitoring_events.js
generated
vendored
Normal file
242
node_modules/mongodb/lib/cmap/command_monitoring_events.js
generated
vendored
Normal file
|
|
@ -0,0 +1,242 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.CommandFailedEvent = exports.CommandSucceededEvent = exports.CommandStartedEvent = void 0;
|
||||
const constants_1 = require("../constants");
|
||||
const utils_1 = require("../utils");
|
||||
const commands_1 = require("./commands");
|
||||
/**
|
||||
* An event indicating the start of a given
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class CommandStartedEvent {
|
||||
/**
|
||||
* Create a started event
|
||||
*
|
||||
* @internal
|
||||
* @param pool - the pool that originated the command
|
||||
* @param command - the command
|
||||
*/
|
||||
constructor(connection, command) {
|
||||
const cmd = extractCommand(command);
|
||||
const commandName = extractCommandName(cmd);
|
||||
const { address, connectionId, serviceId } = extractConnectionDetails(connection);
|
||||
// TODO: remove in major revision, this is not spec behavior
|
||||
if (SENSITIVE_COMMANDS.has(commandName)) {
|
||||
this.commandObj = {};
|
||||
this.commandObj[commandName] = true;
|
||||
}
|
||||
this.address = address;
|
||||
this.connectionId = connectionId;
|
||||
this.serviceId = serviceId;
|
||||
this.requestId = command.requestId;
|
||||
this.databaseName = databaseName(command);
|
||||
this.commandName = commandName;
|
||||
this.command = maybeRedact(commandName, cmd, cmd);
|
||||
}
|
||||
/* @internal */
|
||||
get hasServiceId() {
|
||||
return !!this.serviceId;
|
||||
}
|
||||
}
|
||||
exports.CommandStartedEvent = CommandStartedEvent;
|
||||
/**
|
||||
* An event indicating the success of a given command
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class CommandSucceededEvent {
|
||||
/**
|
||||
* Create a succeeded event
|
||||
*
|
||||
* @internal
|
||||
* @param pool - the pool that originated the command
|
||||
* @param command - the command
|
||||
* @param reply - the reply for this command from the server
|
||||
* @param started - a high resolution tuple timestamp of when the command was first sent, to calculate duration
|
||||
*/
|
||||
constructor(connection, command, reply, started) {
|
||||
const cmd = extractCommand(command);
|
||||
const commandName = extractCommandName(cmd);
|
||||
const { address, connectionId, serviceId } = extractConnectionDetails(connection);
|
||||
this.address = address;
|
||||
this.connectionId = connectionId;
|
||||
this.serviceId = serviceId;
|
||||
this.requestId = command.requestId;
|
||||
this.commandName = commandName;
|
||||
this.duration = (0, utils_1.calculateDurationInMs)(started);
|
||||
this.reply = maybeRedact(commandName, cmd, extractReply(command, reply));
|
||||
}
|
||||
/* @internal */
|
||||
get hasServiceId() {
|
||||
return !!this.serviceId;
|
||||
}
|
||||
}
|
||||
exports.CommandSucceededEvent = CommandSucceededEvent;
|
||||
/**
|
||||
* An event indicating the failure of a given command
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class CommandFailedEvent {
|
||||
/**
|
||||
* Create a failure event
|
||||
*
|
||||
* @internal
|
||||
* @param pool - the pool that originated the command
|
||||
* @param command - the command
|
||||
* @param error - the generated error or a server error response
|
||||
* @param started - a high resolution tuple timestamp of when the command was first sent, to calculate duration
|
||||
*/
|
||||
constructor(connection, command, error, started) {
|
||||
const cmd = extractCommand(command);
|
||||
const commandName = extractCommandName(cmd);
|
||||
const { address, connectionId, serviceId } = extractConnectionDetails(connection);
|
||||
this.address = address;
|
||||
this.connectionId = connectionId;
|
||||
this.serviceId = serviceId;
|
||||
this.requestId = command.requestId;
|
||||
this.commandName = commandName;
|
||||
this.duration = (0, utils_1.calculateDurationInMs)(started);
|
||||
this.failure = maybeRedact(commandName, cmd, error);
|
||||
}
|
||||
/* @internal */
|
||||
get hasServiceId() {
|
||||
return !!this.serviceId;
|
||||
}
|
||||
}
|
||||
exports.CommandFailedEvent = CommandFailedEvent;
|
||||
/** Commands that we want to redact because of the sensitive nature of their contents */
|
||||
const SENSITIVE_COMMANDS = new Set([
|
||||
'authenticate',
|
||||
'saslStart',
|
||||
'saslContinue',
|
||||
'getnonce',
|
||||
'createUser',
|
||||
'updateUser',
|
||||
'copydbgetnonce',
|
||||
'copydbsaslstart',
|
||||
'copydb'
|
||||
]);
|
||||
const HELLO_COMMANDS = new Set(['hello', constants_1.LEGACY_HELLO_COMMAND, constants_1.LEGACY_HELLO_COMMAND_CAMEL_CASE]);
|
||||
// helper methods
|
||||
const extractCommandName = (commandDoc) => Object.keys(commandDoc)[0];
|
||||
const namespace = (command) => command.ns;
|
||||
const databaseName = (command) => command.ns.split('.')[0];
|
||||
const collectionName = (command) => command.ns.split('.')[1];
|
||||
const maybeRedact = (commandName, commandDoc, result) => SENSITIVE_COMMANDS.has(commandName) ||
|
||||
(HELLO_COMMANDS.has(commandName) && commandDoc.speculativeAuthenticate)
|
||||
? {}
|
||||
: result;
|
||||
const LEGACY_FIND_QUERY_MAP = {
|
||||
$query: 'filter',
|
||||
$orderby: 'sort',
|
||||
$hint: 'hint',
|
||||
$comment: 'comment',
|
||||
$maxScan: 'maxScan',
|
||||
$max: 'max',
|
||||
$min: 'min',
|
||||
$returnKey: 'returnKey',
|
||||
$showDiskLoc: 'showRecordId',
|
||||
$maxTimeMS: 'maxTimeMS',
|
||||
$snapshot: 'snapshot'
|
||||
};
|
||||
const LEGACY_FIND_OPTIONS_MAP = {
|
||||
numberToSkip: 'skip',
|
||||
numberToReturn: 'batchSize',
|
||||
returnFieldSelector: 'projection'
|
||||
};
|
||||
const OP_QUERY_KEYS = [
|
||||
'tailable',
|
||||
'oplogReplay',
|
||||
'noCursorTimeout',
|
||||
'awaitData',
|
||||
'partial',
|
||||
'exhaust'
|
||||
];
|
||||
/** Extract the actual command from the query, possibly up-converting if it's a legacy format */
|
||||
function extractCommand(command) {
|
||||
if (command instanceof commands_1.Msg) {
|
||||
return (0, utils_1.deepCopy)(command.command);
|
||||
}
|
||||
if (command.query?.$query) {
|
||||
let result;
|
||||
if (command.ns === 'admin.$cmd') {
|
||||
// up-convert legacy command
|
||||
result = Object.assign({}, command.query.$query);
|
||||
}
|
||||
else {
|
||||
// up-convert legacy find command
|
||||
result = { find: collectionName(command) };
|
||||
Object.keys(LEGACY_FIND_QUERY_MAP).forEach(key => {
|
||||
if (command.query[key] != null) {
|
||||
result[LEGACY_FIND_QUERY_MAP[key]] = (0, utils_1.deepCopy)(command.query[key]);
|
||||
}
|
||||
});
|
||||
}
|
||||
Object.keys(LEGACY_FIND_OPTIONS_MAP).forEach(key => {
|
||||
const legacyKey = key;
|
||||
if (command[legacyKey] != null) {
|
||||
result[LEGACY_FIND_OPTIONS_MAP[legacyKey]] = (0, utils_1.deepCopy)(command[legacyKey]);
|
||||
}
|
||||
});
|
||||
OP_QUERY_KEYS.forEach(key => {
|
||||
if (command[key]) {
|
||||
result[key] = command[key];
|
||||
}
|
||||
});
|
||||
if (command.pre32Limit != null) {
|
||||
result.limit = command.pre32Limit;
|
||||
}
|
||||
if (command.query.$explain) {
|
||||
return { explain: result };
|
||||
}
|
||||
return result;
|
||||
}
|
||||
const clonedQuery = {};
|
||||
const clonedCommand = {};
|
||||
if (command.query) {
|
||||
for (const k in command.query) {
|
||||
clonedQuery[k] = (0, utils_1.deepCopy)(command.query[k]);
|
||||
}
|
||||
clonedCommand.query = clonedQuery;
|
||||
}
|
||||
for (const k in command) {
|
||||
if (k === 'query')
|
||||
continue;
|
||||
clonedCommand[k] = (0, utils_1.deepCopy)(command[k]);
|
||||
}
|
||||
return command.query ? clonedQuery : clonedCommand;
|
||||
}
|
||||
function extractReply(command, reply) {
|
||||
if (!reply) {
|
||||
return reply;
|
||||
}
|
||||
if (command instanceof commands_1.Msg) {
|
||||
return (0, utils_1.deepCopy)(reply.result ? reply.result : reply);
|
||||
}
|
||||
// is this a legacy find command?
|
||||
if (command.query && command.query.$query != null) {
|
||||
return {
|
||||
ok: 1,
|
||||
cursor: {
|
||||
id: (0, utils_1.deepCopy)(reply.cursorId),
|
||||
ns: namespace(command),
|
||||
firstBatch: (0, utils_1.deepCopy)(reply.documents)
|
||||
}
|
||||
};
|
||||
}
|
||||
return (0, utils_1.deepCopy)(reply.result ? reply.result : reply);
|
||||
}
|
||||
function extractConnectionDetails(connection) {
|
||||
let connectionId;
|
||||
if ('id' in connection) {
|
||||
connectionId = connection.id;
|
||||
}
|
||||
return {
|
||||
address: connection.address,
|
||||
serviceId: connection.serviceId,
|
||||
connectionId
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=command_monitoring_events.js.map
|
||||
1
node_modules/mongodb/lib/cmap/command_monitoring_events.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/command_monitoring_events.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
487
node_modules/mongodb/lib/cmap/commands.js
generated
vendored
Normal file
487
node_modules/mongodb/lib/cmap/commands.js
generated
vendored
Normal file
|
|
@ -0,0 +1,487 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.BinMsg = exports.Msg = exports.Response = exports.Query = void 0;
|
||||
const BSON = require("../bson");
|
||||
const error_1 = require("../error");
|
||||
const read_preference_1 = require("../read_preference");
|
||||
const utils_1 = require("../utils");
|
||||
const constants_1 = require("./wire_protocol/constants");
|
||||
// Incrementing request id
|
||||
let _requestId = 0;
|
||||
// Query flags
|
||||
const OPTS_TAILABLE_CURSOR = 2;
|
||||
const OPTS_SECONDARY = 4;
|
||||
const OPTS_OPLOG_REPLAY = 8;
|
||||
const OPTS_NO_CURSOR_TIMEOUT = 16;
|
||||
const OPTS_AWAIT_DATA = 32;
|
||||
const OPTS_EXHAUST = 64;
|
||||
const OPTS_PARTIAL = 128;
|
||||
// Response flags
|
||||
const CURSOR_NOT_FOUND = 1;
|
||||
const QUERY_FAILURE = 2;
|
||||
const SHARD_CONFIG_STALE = 4;
|
||||
const AWAIT_CAPABLE = 8;
|
||||
/**************************************************************
|
||||
* QUERY
|
||||
**************************************************************/
|
||||
/** @internal */
|
||||
class Query {
|
||||
constructor(ns, query, options) {
|
||||
// Basic options needed to be passed in
|
||||
// TODO(NODE-3483): Replace with MongoCommandError
|
||||
if (ns == null)
|
||||
throw new error_1.MongoRuntimeError('Namespace must be specified for query');
|
||||
// TODO(NODE-3483): Replace with MongoCommandError
|
||||
if (query == null)
|
||||
throw new error_1.MongoRuntimeError('A query document must be specified for query');
|
||||
// Validate that we are not passing 0x00 in the collection name
|
||||
if (ns.indexOf('\x00') !== -1) {
|
||||
// TODO(NODE-3483): Use MongoNamespace static method
|
||||
throw new error_1.MongoRuntimeError('Namespace cannot contain a null character');
|
||||
}
|
||||
// Basic options
|
||||
this.ns = ns;
|
||||
this.query = query;
|
||||
// Additional options
|
||||
this.numberToSkip = options.numberToSkip || 0;
|
||||
this.numberToReturn = options.numberToReturn || 0;
|
||||
this.returnFieldSelector = options.returnFieldSelector || undefined;
|
||||
this.requestId = Query.getRequestId();
|
||||
// special case for pre-3.2 find commands, delete ASAP
|
||||
this.pre32Limit = options.pre32Limit;
|
||||
// Serialization option
|
||||
this.serializeFunctions =
|
||||
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
|
||||
this.ignoreUndefined =
|
||||
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
|
||||
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
|
||||
this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false;
|
||||
this.batchSize = this.numberToReturn;
|
||||
// Flags
|
||||
this.tailable = false;
|
||||
this.secondaryOk = typeof options.secondaryOk === 'boolean' ? options.secondaryOk : false;
|
||||
this.oplogReplay = false;
|
||||
this.noCursorTimeout = false;
|
||||
this.awaitData = false;
|
||||
this.exhaust = false;
|
||||
this.partial = false;
|
||||
}
|
||||
/** Assign next request Id. */
|
||||
incRequestId() {
|
||||
this.requestId = _requestId++;
|
||||
}
|
||||
/** Peek next request Id. */
|
||||
nextRequestId() {
|
||||
return _requestId + 1;
|
||||
}
|
||||
/** Increment then return next request Id. */
|
||||
static getRequestId() {
|
||||
return ++_requestId;
|
||||
}
|
||||
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
|
||||
toBin() {
|
||||
const buffers = [];
|
||||
let projection = null;
|
||||
// Set up the flags
|
||||
let flags = 0;
|
||||
if (this.tailable) {
|
||||
flags |= OPTS_TAILABLE_CURSOR;
|
||||
}
|
||||
if (this.secondaryOk) {
|
||||
flags |= OPTS_SECONDARY;
|
||||
}
|
||||
if (this.oplogReplay) {
|
||||
flags |= OPTS_OPLOG_REPLAY;
|
||||
}
|
||||
if (this.noCursorTimeout) {
|
||||
flags |= OPTS_NO_CURSOR_TIMEOUT;
|
||||
}
|
||||
if (this.awaitData) {
|
||||
flags |= OPTS_AWAIT_DATA;
|
||||
}
|
||||
if (this.exhaust) {
|
||||
flags |= OPTS_EXHAUST;
|
||||
}
|
||||
if (this.partial) {
|
||||
flags |= OPTS_PARTIAL;
|
||||
}
|
||||
// If batchSize is different to this.numberToReturn
|
||||
if (this.batchSize !== this.numberToReturn)
|
||||
this.numberToReturn = this.batchSize;
|
||||
// Allocate write protocol header buffer
|
||||
const header = Buffer.alloc(4 * 4 + // Header
|
||||
4 + // Flags
|
||||
Buffer.byteLength(this.ns) +
|
||||
1 + // namespace
|
||||
4 + // numberToSkip
|
||||
4 // numberToReturn
|
||||
);
|
||||
// Add header to buffers
|
||||
buffers.push(header);
|
||||
// Serialize the query
|
||||
const query = BSON.serialize(this.query, {
|
||||
checkKeys: this.checkKeys,
|
||||
serializeFunctions: this.serializeFunctions,
|
||||
ignoreUndefined: this.ignoreUndefined
|
||||
});
|
||||
// Add query document
|
||||
buffers.push(query);
|
||||
if (this.returnFieldSelector && Object.keys(this.returnFieldSelector).length > 0) {
|
||||
// Serialize the projection document
|
||||
projection = BSON.serialize(this.returnFieldSelector, {
|
||||
checkKeys: this.checkKeys,
|
||||
serializeFunctions: this.serializeFunctions,
|
||||
ignoreUndefined: this.ignoreUndefined
|
||||
});
|
||||
// Add projection document
|
||||
buffers.push(projection);
|
||||
}
|
||||
// Total message size
|
||||
const totalLength = header.length + query.length + (projection ? projection.length : 0);
|
||||
// Set up the index
|
||||
let index = 4;
|
||||
// Write total document length
|
||||
header[3] = (totalLength >> 24) & 0xff;
|
||||
header[2] = (totalLength >> 16) & 0xff;
|
||||
header[1] = (totalLength >> 8) & 0xff;
|
||||
header[0] = totalLength & 0xff;
|
||||
// Write header information requestId
|
||||
header[index + 3] = (this.requestId >> 24) & 0xff;
|
||||
header[index + 2] = (this.requestId >> 16) & 0xff;
|
||||
header[index + 1] = (this.requestId >> 8) & 0xff;
|
||||
header[index] = this.requestId & 0xff;
|
||||
index = index + 4;
|
||||
// Write header information responseTo
|
||||
header[index + 3] = (0 >> 24) & 0xff;
|
||||
header[index + 2] = (0 >> 16) & 0xff;
|
||||
header[index + 1] = (0 >> 8) & 0xff;
|
||||
header[index] = 0 & 0xff;
|
||||
index = index + 4;
|
||||
// Write header information OP_QUERY
|
||||
header[index + 3] = (constants_1.OP_QUERY >> 24) & 0xff;
|
||||
header[index + 2] = (constants_1.OP_QUERY >> 16) & 0xff;
|
||||
header[index + 1] = (constants_1.OP_QUERY >> 8) & 0xff;
|
||||
header[index] = constants_1.OP_QUERY & 0xff;
|
||||
index = index + 4;
|
||||
// Write header information flags
|
||||
header[index + 3] = (flags >> 24) & 0xff;
|
||||
header[index + 2] = (flags >> 16) & 0xff;
|
||||
header[index + 1] = (flags >> 8) & 0xff;
|
||||
header[index] = flags & 0xff;
|
||||
index = index + 4;
|
||||
// Write collection name
|
||||
index = index + header.write(this.ns, index, 'utf8') + 1;
|
||||
header[index - 1] = 0;
|
||||
// Write header information flags numberToSkip
|
||||
header[index + 3] = (this.numberToSkip >> 24) & 0xff;
|
||||
header[index + 2] = (this.numberToSkip >> 16) & 0xff;
|
||||
header[index + 1] = (this.numberToSkip >> 8) & 0xff;
|
||||
header[index] = this.numberToSkip & 0xff;
|
||||
index = index + 4;
|
||||
// Write header information flags numberToReturn
|
||||
header[index + 3] = (this.numberToReturn >> 24) & 0xff;
|
||||
header[index + 2] = (this.numberToReturn >> 16) & 0xff;
|
||||
header[index + 1] = (this.numberToReturn >> 8) & 0xff;
|
||||
header[index] = this.numberToReturn & 0xff;
|
||||
index = index + 4;
|
||||
// Return the buffers
|
||||
return buffers;
|
||||
}
|
||||
}
|
||||
exports.Query = Query;
|
||||
/** @internal */
|
||||
class Response {
|
||||
constructor(message, msgHeader, msgBody, opts) {
|
||||
this.documents = new Array(0);
|
||||
this.parsed = false;
|
||||
this.raw = message;
|
||||
this.data = msgBody;
|
||||
this.opts = opts ?? {
|
||||
useBigInt64: false,
|
||||
promoteLongs: true,
|
||||
promoteValues: true,
|
||||
promoteBuffers: false,
|
||||
bsonRegExp: false
|
||||
};
|
||||
// Read the message header
|
||||
this.length = msgHeader.length;
|
||||
this.requestId = msgHeader.requestId;
|
||||
this.responseTo = msgHeader.responseTo;
|
||||
this.opCode = msgHeader.opCode;
|
||||
this.fromCompressed = msgHeader.fromCompressed;
|
||||
// Flag values
|
||||
this.useBigInt64 = typeof this.opts.useBigInt64 === 'boolean' ? this.opts.useBigInt64 : false;
|
||||
this.promoteLongs = typeof this.opts.promoteLongs === 'boolean' ? this.opts.promoteLongs : true;
|
||||
this.promoteValues =
|
||||
typeof this.opts.promoteValues === 'boolean' ? this.opts.promoteValues : true;
|
||||
this.promoteBuffers =
|
||||
typeof this.opts.promoteBuffers === 'boolean' ? this.opts.promoteBuffers : false;
|
||||
this.bsonRegExp = typeof this.opts.bsonRegExp === 'boolean' ? this.opts.bsonRegExp : false;
|
||||
}
|
||||
isParsed() {
|
||||
return this.parsed;
|
||||
}
|
||||
parse(options) {
|
||||
// Don't parse again if not needed
|
||||
if (this.parsed)
|
||||
return;
|
||||
options = options ?? {};
|
||||
// Allow the return of raw documents instead of parsing
|
||||
const raw = options.raw || false;
|
||||
const documentsReturnedIn = options.documentsReturnedIn || null;
|
||||
const useBigInt64 = options.useBigInt64 ?? this.opts.useBigInt64;
|
||||
const promoteLongs = options.promoteLongs ?? this.opts.promoteLongs;
|
||||
const promoteValues = options.promoteValues ?? this.opts.promoteValues;
|
||||
const promoteBuffers = options.promoteBuffers ?? this.opts.promoteBuffers;
|
||||
const bsonRegExp = options.bsonRegExp ?? this.opts.bsonRegExp;
|
||||
let bsonSize;
|
||||
// Set up the options
|
||||
const _options = {
|
||||
useBigInt64,
|
||||
promoteLongs,
|
||||
promoteValues,
|
||||
promoteBuffers,
|
||||
bsonRegExp
|
||||
};
|
||||
// Position within OP_REPLY at which documents start
|
||||
// (See https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#wire-op-reply)
|
||||
this.index = 20;
|
||||
// Read the message body
|
||||
this.responseFlags = this.data.readInt32LE(0);
|
||||
this.cursorId = new BSON.Long(this.data.readInt32LE(4), this.data.readInt32LE(8));
|
||||
this.startingFrom = this.data.readInt32LE(12);
|
||||
this.numberReturned = this.data.readInt32LE(16);
|
||||
// Preallocate document array
|
||||
this.documents = new Array(this.numberReturned);
|
||||
this.cursorNotFound = (this.responseFlags & CURSOR_NOT_FOUND) !== 0;
|
||||
this.queryFailure = (this.responseFlags & QUERY_FAILURE) !== 0;
|
||||
this.shardConfigStale = (this.responseFlags & SHARD_CONFIG_STALE) !== 0;
|
||||
this.awaitCapable = (this.responseFlags & AWAIT_CAPABLE) !== 0;
|
||||
// Parse Body
|
||||
for (let i = 0; i < this.numberReturned; i++) {
|
||||
bsonSize =
|
||||
this.data[this.index] |
|
||||
(this.data[this.index + 1] << 8) |
|
||||
(this.data[this.index + 2] << 16) |
|
||||
(this.data[this.index + 3] << 24);
|
||||
// If we have raw results specified slice the return document
|
||||
if (raw) {
|
||||
this.documents[i] = this.data.slice(this.index, this.index + bsonSize);
|
||||
}
|
||||
else {
|
||||
this.documents[i] = BSON.deserialize(this.data.slice(this.index, this.index + bsonSize), _options);
|
||||
}
|
||||
// Adjust the index
|
||||
this.index = this.index + bsonSize;
|
||||
}
|
||||
if (this.documents.length === 1 && documentsReturnedIn != null && raw) {
|
||||
const fieldsAsRaw = {};
|
||||
fieldsAsRaw[documentsReturnedIn] = true;
|
||||
_options.fieldsAsRaw = fieldsAsRaw;
|
||||
const doc = BSON.deserialize(this.documents[0], _options);
|
||||
this.documents = [doc];
|
||||
}
|
||||
// Set parsed
|
||||
this.parsed = true;
|
||||
}
|
||||
}
|
||||
exports.Response = Response;
|
||||
// Implementation of OP_MSG spec:
|
||||
// https://github.com/mongodb/specifications/blob/master/source/message/OP_MSG.rst
|
||||
//
|
||||
// struct Section {
|
||||
// uint8 payloadType;
|
||||
// union payload {
|
||||
// document document; // payloadType == 0
|
||||
// struct sequence { // payloadType == 1
|
||||
// int32 size;
|
||||
// cstring identifier;
|
||||
// document* documents;
|
||||
// };
|
||||
// };
|
||||
// };
|
||||
// struct OP_MSG {
|
||||
// struct MsgHeader {
|
||||
// int32 messageLength;
|
||||
// int32 requestID;
|
||||
// int32 responseTo;
|
||||
// int32 opCode = 2013;
|
||||
// };
|
||||
// uint32 flagBits;
|
||||
// Section+ sections;
|
||||
// [uint32 checksum;]
|
||||
// };
|
||||
// Msg Flags
|
||||
const OPTS_CHECKSUM_PRESENT = 1;
|
||||
const OPTS_MORE_TO_COME = 2;
|
||||
const OPTS_EXHAUST_ALLOWED = 1 << 16;
|
||||
/** @internal */
|
||||
class Msg {
|
||||
constructor(ns, command, options) {
|
||||
// Basic options needed to be passed in
|
||||
if (command == null)
|
||||
throw new error_1.MongoInvalidArgumentError('Query document must be specified for query');
|
||||
// Basic options
|
||||
this.ns = ns;
|
||||
this.command = command;
|
||||
this.command.$db = (0, utils_1.databaseNamespace)(ns);
|
||||
if (options.readPreference && options.readPreference.mode !== read_preference_1.ReadPreference.PRIMARY) {
|
||||
this.command.$readPreference = options.readPreference.toJSON();
|
||||
}
|
||||
// Ensure empty options
|
||||
this.options = options ?? {};
|
||||
// Additional options
|
||||
this.requestId = options.requestId ? options.requestId : Msg.getRequestId();
|
||||
// Serialization option
|
||||
this.serializeFunctions =
|
||||
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
|
||||
this.ignoreUndefined =
|
||||
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
|
||||
this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false;
|
||||
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
|
||||
// flags
|
||||
this.checksumPresent = false;
|
||||
this.moreToCome = options.moreToCome || false;
|
||||
this.exhaustAllowed =
|
||||
typeof options.exhaustAllowed === 'boolean' ? options.exhaustAllowed : false;
|
||||
}
|
||||
toBin() {
|
||||
const buffers = [];
|
||||
let flags = 0;
|
||||
if (this.checksumPresent) {
|
||||
flags |= OPTS_CHECKSUM_PRESENT;
|
||||
}
|
||||
if (this.moreToCome) {
|
||||
flags |= OPTS_MORE_TO_COME;
|
||||
}
|
||||
if (this.exhaustAllowed) {
|
||||
flags |= OPTS_EXHAUST_ALLOWED;
|
||||
}
|
||||
const header = Buffer.alloc(4 * 4 + // Header
|
||||
4 // Flags
|
||||
);
|
||||
buffers.push(header);
|
||||
let totalLength = header.length;
|
||||
const command = this.command;
|
||||
totalLength += this.makeDocumentSegment(buffers, command);
|
||||
header.writeInt32LE(totalLength, 0); // messageLength
|
||||
header.writeInt32LE(this.requestId, 4); // requestID
|
||||
header.writeInt32LE(0, 8); // responseTo
|
||||
header.writeInt32LE(constants_1.OP_MSG, 12); // opCode
|
||||
header.writeUInt32LE(flags, 16); // flags
|
||||
return buffers;
|
||||
}
|
||||
makeDocumentSegment(buffers, document) {
|
||||
const payloadTypeBuffer = Buffer.alloc(1);
|
||||
payloadTypeBuffer[0] = 0;
|
||||
const documentBuffer = this.serializeBson(document);
|
||||
buffers.push(payloadTypeBuffer);
|
||||
buffers.push(documentBuffer);
|
||||
return payloadTypeBuffer.length + documentBuffer.length;
|
||||
}
|
||||
serializeBson(document) {
|
||||
return BSON.serialize(document, {
|
||||
checkKeys: this.checkKeys,
|
||||
serializeFunctions: this.serializeFunctions,
|
||||
ignoreUndefined: this.ignoreUndefined
|
||||
});
|
||||
}
|
||||
static getRequestId() {
|
||||
_requestId = (_requestId + 1) & 0x7fffffff;
|
||||
return _requestId;
|
||||
}
|
||||
}
|
||||
exports.Msg = Msg;
|
||||
/** @internal */
|
||||
class BinMsg {
|
||||
constructor(message, msgHeader, msgBody, opts) {
|
||||
this.parsed = false;
|
||||
this.raw = message;
|
||||
this.data = msgBody;
|
||||
this.opts = opts ?? {
|
||||
useBigInt64: false,
|
||||
promoteLongs: true,
|
||||
promoteValues: true,
|
||||
promoteBuffers: false,
|
||||
bsonRegExp: false
|
||||
};
|
||||
// Read the message header
|
||||
this.length = msgHeader.length;
|
||||
this.requestId = msgHeader.requestId;
|
||||
this.responseTo = msgHeader.responseTo;
|
||||
this.opCode = msgHeader.opCode;
|
||||
this.fromCompressed = msgHeader.fromCompressed;
|
||||
// Read response flags
|
||||
this.responseFlags = msgBody.readInt32LE(0);
|
||||
this.checksumPresent = (this.responseFlags & OPTS_CHECKSUM_PRESENT) !== 0;
|
||||
this.moreToCome = (this.responseFlags & OPTS_MORE_TO_COME) !== 0;
|
||||
this.exhaustAllowed = (this.responseFlags & OPTS_EXHAUST_ALLOWED) !== 0;
|
||||
this.useBigInt64 = typeof this.opts.useBigInt64 === 'boolean' ? this.opts.useBigInt64 : false;
|
||||
this.promoteLongs = typeof this.opts.promoteLongs === 'boolean' ? this.opts.promoteLongs : true;
|
||||
this.promoteValues =
|
||||
typeof this.opts.promoteValues === 'boolean' ? this.opts.promoteValues : true;
|
||||
this.promoteBuffers =
|
||||
typeof this.opts.promoteBuffers === 'boolean' ? this.opts.promoteBuffers : false;
|
||||
this.bsonRegExp = typeof this.opts.bsonRegExp === 'boolean' ? this.opts.bsonRegExp : false;
|
||||
this.documents = [];
|
||||
}
|
||||
isParsed() {
|
||||
return this.parsed;
|
||||
}
|
||||
parse(options) {
|
||||
// Don't parse again if not needed
|
||||
if (this.parsed)
|
||||
return;
|
||||
options = options ?? {};
|
||||
this.index = 4;
|
||||
// Allow the return of raw documents instead of parsing
|
||||
const raw = options.raw || false;
|
||||
const documentsReturnedIn = options.documentsReturnedIn || null;
|
||||
const useBigInt64 = options.useBigInt64 ?? this.opts.useBigInt64;
|
||||
const promoteLongs = options.promoteLongs ?? this.opts.promoteLongs;
|
||||
const promoteValues = options.promoteValues ?? this.opts.promoteValues;
|
||||
const promoteBuffers = options.promoteBuffers ?? this.opts.promoteBuffers;
|
||||
const bsonRegExp = options.bsonRegExp ?? this.opts.bsonRegExp;
|
||||
const validation = this.parseBsonSerializationOptions(options);
|
||||
// Set up the options
|
||||
const bsonOptions = {
|
||||
useBigInt64,
|
||||
promoteLongs,
|
||||
promoteValues,
|
||||
promoteBuffers,
|
||||
bsonRegExp,
|
||||
validation
|
||||
// Due to the strictness of the BSON libraries validation option we need this cast
|
||||
};
|
||||
while (this.index < this.data.length) {
|
||||
const payloadType = this.data.readUInt8(this.index++);
|
||||
if (payloadType === 0) {
|
||||
const bsonSize = this.data.readUInt32LE(this.index);
|
||||
const bin = this.data.slice(this.index, this.index + bsonSize);
|
||||
this.documents.push(raw ? bin : BSON.deserialize(bin, bsonOptions));
|
||||
this.index += bsonSize;
|
||||
}
|
||||
else if (payloadType === 1) {
|
||||
// It was decided that no driver makes use of payload type 1
|
||||
// TODO(NODE-3483): Replace with MongoDeprecationError
|
||||
throw new error_1.MongoRuntimeError('OP_MSG Payload Type 1 detected unsupported protocol');
|
||||
}
|
||||
}
|
||||
if (this.documents.length === 1 && documentsReturnedIn != null && raw) {
|
||||
const fieldsAsRaw = {};
|
||||
fieldsAsRaw[documentsReturnedIn] = true;
|
||||
bsonOptions.fieldsAsRaw = fieldsAsRaw;
|
||||
const doc = BSON.deserialize(this.documents[0], bsonOptions);
|
||||
this.documents = [doc];
|
||||
}
|
||||
this.parsed = true;
|
||||
}
|
||||
parseBsonSerializationOptions({ enableUtf8Validation }) {
|
||||
if (enableUtf8Validation === false) {
|
||||
return { utf8: false };
|
||||
}
|
||||
return { utf8: { writeErrors: false } };
|
||||
}
|
||||
}
|
||||
exports.BinMsg = BinMsg;
|
||||
//# sourceMappingURL=commands.js.map
|
||||
1
node_modules/mongodb/lib/cmap/commands.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/commands.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
394
node_modules/mongodb/lib/cmap/connect.js
generated
vendored
Normal file
394
node_modules/mongodb/lib/cmap/connect.js
generated
vendored
Normal file
|
|
@ -0,0 +1,394 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.LEGAL_TCP_SOCKET_OPTIONS = exports.LEGAL_TLS_SOCKET_OPTIONS = exports.prepareHandshakeDocument = exports.connect = void 0;
|
||||
const net = require("net");
|
||||
const socks_1 = require("socks");
|
||||
const tls = require("tls");
|
||||
const bson_1 = require("../bson");
|
||||
const constants_1 = require("../constants");
|
||||
const error_1 = require("../error");
|
||||
const utils_1 = require("../utils");
|
||||
const auth_provider_1 = require("./auth/auth_provider");
|
||||
const gssapi_1 = require("./auth/gssapi");
|
||||
const mongocr_1 = require("./auth/mongocr");
|
||||
const mongodb_aws_1 = require("./auth/mongodb_aws");
|
||||
const plain_1 = require("./auth/plain");
|
||||
const providers_1 = require("./auth/providers");
|
||||
const scram_1 = require("./auth/scram");
|
||||
const x509_1 = require("./auth/x509");
|
||||
const connection_1 = require("./connection");
|
||||
const constants_2 = require("./wire_protocol/constants");
|
||||
const AUTH_PROVIDERS = new Map([
|
||||
[providers_1.AuthMechanism.MONGODB_AWS, new mongodb_aws_1.MongoDBAWS()],
|
||||
[providers_1.AuthMechanism.MONGODB_CR, new mongocr_1.MongoCR()],
|
||||
[providers_1.AuthMechanism.MONGODB_GSSAPI, new gssapi_1.GSSAPI()],
|
||||
[providers_1.AuthMechanism.MONGODB_PLAIN, new plain_1.Plain()],
|
||||
[providers_1.AuthMechanism.MONGODB_SCRAM_SHA1, new scram_1.ScramSHA1()],
|
||||
[providers_1.AuthMechanism.MONGODB_SCRAM_SHA256, new scram_1.ScramSHA256()],
|
||||
[providers_1.AuthMechanism.MONGODB_X509, new x509_1.X509()]
|
||||
]);
|
||||
function connect(options, callback) {
|
||||
makeConnection({ ...options, existingSocket: undefined }, (err, socket) => {
|
||||
if (err || !socket) {
|
||||
return callback(err);
|
||||
}
|
||||
let ConnectionType = options.connectionType ?? connection_1.Connection;
|
||||
if (options.autoEncrypter) {
|
||||
ConnectionType = connection_1.CryptoConnection;
|
||||
}
|
||||
performInitialHandshake(new ConnectionType(socket, options), options, callback);
|
||||
});
|
||||
}
|
||||
exports.connect = connect;
|
||||
function checkSupportedServer(hello, options) {
|
||||
const serverVersionHighEnough = hello &&
|
||||
(typeof hello.maxWireVersion === 'number' || hello.maxWireVersion instanceof bson_1.Int32) &&
|
||||
hello.maxWireVersion >= constants_2.MIN_SUPPORTED_WIRE_VERSION;
|
||||
const serverVersionLowEnough = hello &&
|
||||
(typeof hello.minWireVersion === 'number' || hello.minWireVersion instanceof bson_1.Int32) &&
|
||||
hello.minWireVersion <= constants_2.MAX_SUPPORTED_WIRE_VERSION;
|
||||
if (serverVersionHighEnough) {
|
||||
if (serverVersionLowEnough) {
|
||||
return null;
|
||||
}
|
||||
const message = `Server at ${options.hostAddress} reports minimum wire version ${JSON.stringify(hello.minWireVersion)}, but this version of the Node.js Driver requires at most ${constants_2.MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${constants_2.MAX_SUPPORTED_SERVER_VERSION})`;
|
||||
return new error_1.MongoCompatibilityError(message);
|
||||
}
|
||||
const message = `Server at ${options.hostAddress} reports maximum wire version ${JSON.stringify(hello.maxWireVersion) ?? 0}, but this version of the Node.js Driver requires at least ${constants_2.MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${constants_2.MIN_SUPPORTED_SERVER_VERSION})`;
|
||||
return new error_1.MongoCompatibilityError(message);
|
||||
}
|
||||
function performInitialHandshake(conn, options, _callback) {
|
||||
const callback = function (err, ret) {
|
||||
if (err && conn) {
|
||||
conn.destroy({ force: false });
|
||||
}
|
||||
_callback(err, ret);
|
||||
};
|
||||
const credentials = options.credentials;
|
||||
if (credentials) {
|
||||
if (!(credentials.mechanism === providers_1.AuthMechanism.MONGODB_DEFAULT) &&
|
||||
!AUTH_PROVIDERS.get(credentials.mechanism)) {
|
||||
callback(new error_1.MongoInvalidArgumentError(`AuthMechanism '${credentials.mechanism}' not supported`));
|
||||
return;
|
||||
}
|
||||
}
|
||||
const authContext = new auth_provider_1.AuthContext(conn, credentials, options);
|
||||
prepareHandshakeDocument(authContext, (err, handshakeDoc) => {
|
||||
if (err || !handshakeDoc) {
|
||||
return callback(err);
|
||||
}
|
||||
const handshakeOptions = Object.assign({}, options);
|
||||
if (typeof options.connectTimeoutMS === 'number') {
|
||||
// The handshake technically is a monitoring check, so its socket timeout should be connectTimeoutMS
|
||||
handshakeOptions.socketTimeoutMS = options.connectTimeoutMS;
|
||||
}
|
||||
const start = new Date().getTime();
|
||||
conn.command((0, utils_1.ns)('admin.$cmd'), handshakeDoc, handshakeOptions, (err, response) => {
|
||||
if (err) {
|
||||
callback(err);
|
||||
return;
|
||||
}
|
||||
if (response?.ok === 0) {
|
||||
callback(new error_1.MongoServerError(response));
|
||||
return;
|
||||
}
|
||||
if (!('isWritablePrimary' in response)) {
|
||||
// Provide hello-style response document.
|
||||
response.isWritablePrimary = response[constants_1.LEGACY_HELLO_COMMAND];
|
||||
}
|
||||
if (response.helloOk) {
|
||||
conn.helloOk = true;
|
||||
}
|
||||
const supportedServerErr = checkSupportedServer(response, options);
|
||||
if (supportedServerErr) {
|
||||
callback(supportedServerErr);
|
||||
return;
|
||||
}
|
||||
if (options.loadBalanced) {
|
||||
if (!response.serviceId) {
|
||||
return callback(new error_1.MongoCompatibilityError('Driver attempted to initialize in load balancing mode, ' +
|
||||
'but the server does not support this mode.'));
|
||||
}
|
||||
}
|
||||
// NOTE: This is metadata attached to the connection while porting away from
|
||||
// handshake being done in the `Server` class. Likely, it should be
|
||||
// relocated, or at very least restructured.
|
||||
conn.hello = response;
|
||||
conn.lastHelloMS = new Date().getTime() - start;
|
||||
if (!response.arbiterOnly && credentials) {
|
||||
// store the response on auth context
|
||||
authContext.response = response;
|
||||
const resolvedCredentials = credentials.resolveAuthMechanism(response);
|
||||
const provider = AUTH_PROVIDERS.get(resolvedCredentials.mechanism);
|
||||
if (!provider) {
|
||||
return callback(new error_1.MongoInvalidArgumentError(`No AuthProvider for ${resolvedCredentials.mechanism} defined.`));
|
||||
}
|
||||
provider.auth(authContext, err => {
|
||||
if (err) {
|
||||
if (err instanceof error_1.MongoError) {
|
||||
err.addErrorLabel(error_1.MongoErrorLabel.HandshakeError);
|
||||
if ((0, error_1.needsRetryableWriteLabel)(err, response.maxWireVersion)) {
|
||||
err.addErrorLabel(error_1.MongoErrorLabel.RetryableWriteError);
|
||||
}
|
||||
}
|
||||
return callback(err);
|
||||
}
|
||||
callback(undefined, conn);
|
||||
});
|
||||
return;
|
||||
}
|
||||
callback(undefined, conn);
|
||||
});
|
||||
});
|
||||
}
|
||||
/**
|
||||
* @internal
|
||||
*
|
||||
* This function is only exposed for testing purposes.
|
||||
*/
|
||||
function prepareHandshakeDocument(authContext, callback) {
|
||||
const options = authContext.options;
|
||||
const compressors = options.compressors ? options.compressors : [];
|
||||
const { serverApi } = authContext.connection;
|
||||
const handshakeDoc = {
|
||||
[serverApi?.version ? 'hello' : constants_1.LEGACY_HELLO_COMMAND]: 1,
|
||||
helloOk: true,
|
||||
client: options.metadata || (0, utils_1.makeClientMetadata)(options),
|
||||
compression: compressors
|
||||
};
|
||||
if (options.loadBalanced === true) {
|
||||
handshakeDoc.loadBalanced = true;
|
||||
}
|
||||
const credentials = authContext.credentials;
|
||||
if (credentials) {
|
||||
if (credentials.mechanism === providers_1.AuthMechanism.MONGODB_DEFAULT && credentials.username) {
|
||||
handshakeDoc.saslSupportedMechs = `${credentials.source}.${credentials.username}`;
|
||||
const provider = AUTH_PROVIDERS.get(providers_1.AuthMechanism.MONGODB_SCRAM_SHA256);
|
||||
if (!provider) {
|
||||
// This auth mechanism is always present.
|
||||
return callback(new error_1.MongoInvalidArgumentError(`No AuthProvider for ${providers_1.AuthMechanism.MONGODB_SCRAM_SHA256} defined.`));
|
||||
}
|
||||
return provider.prepare(handshakeDoc, authContext, callback);
|
||||
}
|
||||
const provider = AUTH_PROVIDERS.get(credentials.mechanism);
|
||||
if (!provider) {
|
||||
return callback(new error_1.MongoInvalidArgumentError(`No AuthProvider for ${credentials.mechanism} defined.`));
|
||||
}
|
||||
return provider.prepare(handshakeDoc, authContext, callback);
|
||||
}
|
||||
callback(undefined, handshakeDoc);
|
||||
}
|
||||
exports.prepareHandshakeDocument = prepareHandshakeDocument;
|
||||
/** @public */
|
||||
exports.LEGAL_TLS_SOCKET_OPTIONS = [
|
||||
'ALPNProtocols',
|
||||
'ca',
|
||||
'cert',
|
||||
'checkServerIdentity',
|
||||
'ciphers',
|
||||
'crl',
|
||||
'ecdhCurve',
|
||||
'key',
|
||||
'minDHSize',
|
||||
'passphrase',
|
||||
'pfx',
|
||||
'rejectUnauthorized',
|
||||
'secureContext',
|
||||
'secureProtocol',
|
||||
'servername',
|
||||
'session'
|
||||
];
|
||||
/** @public */
|
||||
exports.LEGAL_TCP_SOCKET_OPTIONS = [
|
||||
'family',
|
||||
'hints',
|
||||
'localAddress',
|
||||
'localPort',
|
||||
'lookup'
|
||||
];
|
||||
function parseConnectOptions(options) {
|
||||
const hostAddress = options.hostAddress;
|
||||
if (!hostAddress)
|
||||
throw new error_1.MongoInvalidArgumentError('Option "hostAddress" is required');
|
||||
const result = {};
|
||||
for (const name of exports.LEGAL_TCP_SOCKET_OPTIONS) {
|
||||
if (options[name] != null) {
|
||||
result[name] = options[name];
|
||||
}
|
||||
}
|
||||
if (typeof hostAddress.socketPath === 'string') {
|
||||
result.path = hostAddress.socketPath;
|
||||
return result;
|
||||
}
|
||||
else if (typeof hostAddress.host === 'string') {
|
||||
result.host = hostAddress.host;
|
||||
result.port = hostAddress.port;
|
||||
return result;
|
||||
}
|
||||
else {
|
||||
// This should never happen since we set up HostAddresses
|
||||
// But if we don't throw here the socket could hang until timeout
|
||||
// TODO(NODE-3483)
|
||||
throw new error_1.MongoRuntimeError(`Unexpected HostAddress ${JSON.stringify(hostAddress)}`);
|
||||
}
|
||||
}
|
||||
function parseSslOptions(options) {
|
||||
const result = parseConnectOptions(options);
|
||||
// Merge in valid SSL options
|
||||
for (const name of exports.LEGAL_TLS_SOCKET_OPTIONS) {
|
||||
if (options[name] != null) {
|
||||
result[name] = options[name];
|
||||
}
|
||||
}
|
||||
if (options.existingSocket) {
|
||||
result.socket = options.existingSocket;
|
||||
}
|
||||
// Set default sni servername to be the same as host
|
||||
if (result.servername == null && result.host && !net.isIP(result.host)) {
|
||||
result.servername = result.host;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
const SOCKET_ERROR_EVENT_LIST = ['error', 'close', 'timeout', 'parseError'];
|
||||
const SOCKET_ERROR_EVENTS = new Set(SOCKET_ERROR_EVENT_LIST);
|
||||
function makeConnection(options, _callback) {
|
||||
const useTLS = options.tls ?? false;
|
||||
const keepAlive = options.keepAlive ?? true;
|
||||
const socketTimeoutMS = options.socketTimeoutMS ?? Reflect.get(options, 'socketTimeout') ?? 0;
|
||||
const noDelay = options.noDelay ?? true;
|
||||
const connectTimeoutMS = options.connectTimeoutMS ?? 30000;
|
||||
const rejectUnauthorized = options.rejectUnauthorized ?? true;
|
||||
const keepAliveInitialDelay = ((options.keepAliveInitialDelay ?? 120000) > socketTimeoutMS
|
||||
? Math.round(socketTimeoutMS / 2)
|
||||
: options.keepAliveInitialDelay) ?? 120000;
|
||||
const existingSocket = options.existingSocket;
|
||||
let socket;
|
||||
const callback = function (err, ret) {
|
||||
if (err && socket) {
|
||||
socket.destroy();
|
||||
}
|
||||
_callback(err, ret);
|
||||
};
|
||||
if (options.proxyHost != null) {
|
||||
// Currently, only Socks5 is supported.
|
||||
return makeSocks5Connection({
|
||||
...options,
|
||||
connectTimeoutMS // Should always be present for Socks5
|
||||
}, callback);
|
||||
}
|
||||
if (useTLS) {
|
||||
const tlsSocket = tls.connect(parseSslOptions(options));
|
||||
if (typeof tlsSocket.disableRenegotiation === 'function') {
|
||||
tlsSocket.disableRenegotiation();
|
||||
}
|
||||
socket = tlsSocket;
|
||||
}
|
||||
else if (existingSocket) {
|
||||
// In the TLS case, parseSslOptions() sets options.socket to existingSocket,
|
||||
// so we only need to handle the non-TLS case here (where existingSocket
|
||||
// gives us all we need out of the box).
|
||||
socket = existingSocket;
|
||||
}
|
||||
else {
|
||||
socket = net.createConnection(parseConnectOptions(options));
|
||||
}
|
||||
socket.setKeepAlive(keepAlive, keepAliveInitialDelay);
|
||||
socket.setTimeout(connectTimeoutMS);
|
||||
socket.setNoDelay(noDelay);
|
||||
const connectEvent = useTLS ? 'secureConnect' : 'connect';
|
||||
let cancellationHandler;
|
||||
function errorHandler(eventName) {
|
||||
return (err) => {
|
||||
SOCKET_ERROR_EVENTS.forEach(event => socket.removeAllListeners(event));
|
||||
if (cancellationHandler && options.cancellationToken) {
|
||||
options.cancellationToken.removeListener('cancel', cancellationHandler);
|
||||
}
|
||||
socket.removeListener(connectEvent, connectHandler);
|
||||
callback(connectionFailureError(eventName, err));
|
||||
};
|
||||
}
|
||||
function connectHandler() {
|
||||
SOCKET_ERROR_EVENTS.forEach(event => socket.removeAllListeners(event));
|
||||
if (cancellationHandler && options.cancellationToken) {
|
||||
options.cancellationToken.removeListener('cancel', cancellationHandler);
|
||||
}
|
||||
if ('authorizationError' in socket) {
|
||||
if (socket.authorizationError && rejectUnauthorized) {
|
||||
return callback(socket.authorizationError);
|
||||
}
|
||||
}
|
||||
socket.setTimeout(socketTimeoutMS);
|
||||
callback(undefined, socket);
|
||||
}
|
||||
SOCKET_ERROR_EVENTS.forEach(event => socket.once(event, errorHandler(event)));
|
||||
if (options.cancellationToken) {
|
||||
cancellationHandler = errorHandler('cancel');
|
||||
options.cancellationToken.once('cancel', cancellationHandler);
|
||||
}
|
||||
if (existingSocket) {
|
||||
process.nextTick(connectHandler);
|
||||
}
|
||||
else {
|
||||
socket.once(connectEvent, connectHandler);
|
||||
}
|
||||
}
|
||||
function makeSocks5Connection(options, callback) {
|
||||
const hostAddress = utils_1.HostAddress.fromHostPort(options.proxyHost ?? '', // proxyHost is guaranteed to set here
|
||||
options.proxyPort ?? 1080);
|
||||
// First, connect to the proxy server itself:
|
||||
makeConnection({
|
||||
...options,
|
||||
hostAddress,
|
||||
tls: false,
|
||||
proxyHost: undefined
|
||||
}, (err, rawSocket) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
const destination = parseConnectOptions(options);
|
||||
if (typeof destination.host !== 'string' || typeof destination.port !== 'number') {
|
||||
return callback(new error_1.MongoInvalidArgumentError('Can only make Socks5 connections to TCP hosts'));
|
||||
}
|
||||
// Then, establish the Socks5 proxy connection:
|
||||
socks_1.SocksClient.createConnection({
|
||||
existing_socket: rawSocket,
|
||||
timeout: options.connectTimeoutMS,
|
||||
command: 'connect',
|
||||
destination: {
|
||||
host: destination.host,
|
||||
port: destination.port
|
||||
},
|
||||
proxy: {
|
||||
// host and port are ignored because we pass existing_socket
|
||||
host: 'iLoveJavaScript',
|
||||
port: 0,
|
||||
type: 5,
|
||||
userId: options.proxyUsername || undefined,
|
||||
password: options.proxyPassword || undefined
|
||||
}
|
||||
}).then(({ socket }) => {
|
||||
// Finally, now treat the resulting duplex stream as the
|
||||
// socket over which we send and receive wire protocol messages:
|
||||
makeConnection({
|
||||
...options,
|
||||
existingSocket: socket,
|
||||
proxyHost: undefined
|
||||
}, callback);
|
||||
}, error => callback(connectionFailureError('error', error)));
|
||||
});
|
||||
}
|
||||
function connectionFailureError(type, err) {
|
||||
switch (type) {
|
||||
case 'error':
|
||||
return new error_1.MongoNetworkError(err);
|
||||
case 'timeout':
|
||||
return new error_1.MongoNetworkTimeoutError('connection timed out');
|
||||
case 'close':
|
||||
return new error_1.MongoNetworkError('connection closed');
|
||||
case 'cancel':
|
||||
return new error_1.MongoNetworkError('connection establishment was cancelled');
|
||||
default:
|
||||
return new error_1.MongoNetworkError('unknown network error');
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=connect.js.map
|
||||
1
node_modules/mongodb/lib/cmap/connect.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/connect.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
496
node_modules/mongodb/lib/cmap/connection.js
generated
vendored
Normal file
496
node_modules/mongodb/lib/cmap/connection.js
generated
vendored
Normal file
|
|
@ -0,0 +1,496 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.hasSessionSupport = exports.CryptoConnection = exports.Connection = void 0;
|
||||
const timers_1 = require("timers");
|
||||
const constants_1 = require("../constants");
|
||||
const error_1 = require("../error");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const sessions_1 = require("../sessions");
|
||||
const utils_1 = require("../utils");
|
||||
const command_monitoring_events_1 = require("./command_monitoring_events");
|
||||
const commands_1 = require("./commands");
|
||||
const message_stream_1 = require("./message_stream");
|
||||
const stream_description_1 = require("./stream_description");
|
||||
const shared_1 = require("./wire_protocol/shared");
|
||||
/** @internal */
|
||||
const kStream = Symbol('stream');
|
||||
/** @internal */
|
||||
const kQueue = Symbol('queue');
|
||||
/** @internal */
|
||||
const kMessageStream = Symbol('messageStream');
|
||||
/** @internal */
|
||||
const kGeneration = Symbol('generation');
|
||||
/** @internal */
|
||||
const kLastUseTime = Symbol('lastUseTime');
|
||||
/** @internal */
|
||||
const kClusterTime = Symbol('clusterTime');
|
||||
/** @internal */
|
||||
const kDescription = Symbol('description');
|
||||
/** @internal */
|
||||
const kHello = Symbol('hello');
|
||||
/** @internal */
|
||||
const kAutoEncrypter = Symbol('autoEncrypter');
|
||||
/** @internal */
|
||||
const kDelayedTimeoutId = Symbol('delayedTimeoutId');
|
||||
const INVALID_QUEUE_SIZE = 'Connection internal queue contains more than 1 operation description';
|
||||
/** @internal */
|
||||
class Connection extends mongo_types_1.TypedEventEmitter {
|
||||
constructor(stream, options) {
|
||||
super();
|
||||
this.id = options.id;
|
||||
this.address = streamIdentifier(stream, options);
|
||||
this.socketTimeoutMS = options.socketTimeoutMS ?? 0;
|
||||
this.monitorCommands = options.monitorCommands;
|
||||
this.serverApi = options.serverApi;
|
||||
this.closed = false;
|
||||
this[kHello] = null;
|
||||
this[kClusterTime] = null;
|
||||
this[kDescription] = new stream_description_1.StreamDescription(this.address, options);
|
||||
this[kGeneration] = options.generation;
|
||||
this[kLastUseTime] = (0, utils_1.now)();
|
||||
// setup parser stream and message handling
|
||||
this[kQueue] = new Map();
|
||||
this[kMessageStream] = new message_stream_1.MessageStream({
|
||||
...options,
|
||||
maxBsonMessageSize: this.hello?.maxBsonMessageSize
|
||||
});
|
||||
this[kStream] = stream;
|
||||
this[kDelayedTimeoutId] = null;
|
||||
this[kMessageStream].on('message', message => this.onMessage(message));
|
||||
this[kMessageStream].on('error', error => this.onError(error));
|
||||
this[kStream].on('close', () => this.onClose());
|
||||
this[kStream].on('timeout', () => this.onTimeout());
|
||||
this[kStream].on('error', () => {
|
||||
/* ignore errors, listen to `close` instead */
|
||||
});
|
||||
// hook the message stream up to the passed in stream
|
||||
this[kStream].pipe(this[kMessageStream]);
|
||||
this[kMessageStream].pipe(this[kStream]);
|
||||
}
|
||||
get description() {
|
||||
return this[kDescription];
|
||||
}
|
||||
get hello() {
|
||||
return this[kHello];
|
||||
}
|
||||
// the `connect` method stores the result of the handshake hello on the connection
|
||||
set hello(response) {
|
||||
this[kDescription].receiveResponse(response);
|
||||
this[kDescription] = Object.freeze(this[kDescription]);
|
||||
// TODO: remove this, and only use the `StreamDescription` in the future
|
||||
this[kHello] = response;
|
||||
}
|
||||
// Set the whether the message stream is for a monitoring connection.
|
||||
set isMonitoringConnection(value) {
|
||||
this[kMessageStream].isMonitoringConnection = value;
|
||||
}
|
||||
get isMonitoringConnection() {
|
||||
return this[kMessageStream].isMonitoringConnection;
|
||||
}
|
||||
get serviceId() {
|
||||
return this.hello?.serviceId;
|
||||
}
|
||||
get loadBalanced() {
|
||||
return this.description.loadBalanced;
|
||||
}
|
||||
get generation() {
|
||||
return this[kGeneration] || 0;
|
||||
}
|
||||
set generation(generation) {
|
||||
this[kGeneration] = generation;
|
||||
}
|
||||
get idleTime() {
|
||||
return (0, utils_1.calculateDurationInMs)(this[kLastUseTime]);
|
||||
}
|
||||
get clusterTime() {
|
||||
return this[kClusterTime];
|
||||
}
|
||||
get stream() {
|
||||
return this[kStream];
|
||||
}
|
||||
markAvailable() {
|
||||
this[kLastUseTime] = (0, utils_1.now)();
|
||||
}
|
||||
onError(error) {
|
||||
this.cleanup(true, error);
|
||||
}
|
||||
onClose() {
|
||||
const message = `connection ${this.id} to ${this.address} closed`;
|
||||
this.cleanup(true, new error_1.MongoNetworkError(message));
|
||||
}
|
||||
onTimeout() {
|
||||
this[kDelayedTimeoutId] = (0, timers_1.setTimeout)(() => {
|
||||
const message = `connection ${this.id} to ${this.address} timed out`;
|
||||
const beforeHandshake = this.hello == null;
|
||||
this.cleanup(true, new error_1.MongoNetworkTimeoutError(message, { beforeHandshake }));
|
||||
}, 1).unref(); // No need for this timer to hold the event loop open
|
||||
}
|
||||
onMessage(message) {
|
||||
const delayedTimeoutId = this[kDelayedTimeoutId];
|
||||
if (delayedTimeoutId != null) {
|
||||
(0, timers_1.clearTimeout)(delayedTimeoutId);
|
||||
this[kDelayedTimeoutId] = null;
|
||||
}
|
||||
// always emit the message, in case we are streaming
|
||||
this.emit('message', message);
|
||||
let operationDescription = this[kQueue].get(message.responseTo);
|
||||
if (!operationDescription && this.isMonitoringConnection) {
|
||||
// This is how we recover when the initial hello's requestId is not
|
||||
// the responseTo when hello responses have been skipped:
|
||||
// First check if the map is of invalid size
|
||||
if (this[kQueue].size > 1) {
|
||||
this.cleanup(true, new error_1.MongoRuntimeError(INVALID_QUEUE_SIZE));
|
||||
}
|
||||
else {
|
||||
// Get the first orphaned operation description.
|
||||
const entry = this[kQueue].entries().next();
|
||||
if (entry.value != null) {
|
||||
const [requestId, orphaned] = entry.value;
|
||||
// If the orphaned operation description exists then set it.
|
||||
operationDescription = orphaned;
|
||||
// Remove the entry with the bad request id from the queue.
|
||||
this[kQueue].delete(requestId);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!operationDescription) {
|
||||
return;
|
||||
}
|
||||
const callback = operationDescription.cb;
|
||||
// SERVER-45775: For exhaust responses we should be able to use the same requestId to
|
||||
// track response, however the server currently synthetically produces remote requests
|
||||
// making the `responseTo` change on each response
|
||||
this[kQueue].delete(message.responseTo);
|
||||
if ('moreToCome' in message && message.moreToCome) {
|
||||
// If the operation description check above does find an orphaned
|
||||
// description and sets the operationDescription then this line will put one
|
||||
// back in the queue with the correct requestId and will resolve not being able
|
||||
// to find the next one via the responseTo of the next streaming hello.
|
||||
this[kQueue].set(message.requestId, operationDescription);
|
||||
}
|
||||
else if (operationDescription.socketTimeoutOverride) {
|
||||
this[kStream].setTimeout(this.socketTimeoutMS);
|
||||
}
|
||||
try {
|
||||
// Pass in the entire description because it has BSON parsing options
|
||||
message.parse(operationDescription);
|
||||
}
|
||||
catch (err) {
|
||||
// If this error is generated by our own code, it will already have the correct class applied
|
||||
// if it is not, then it is coming from a catastrophic data parse failure or the BSON library
|
||||
// in either case, it should not be wrapped
|
||||
callback(err);
|
||||
return;
|
||||
}
|
||||
if (message.documents[0]) {
|
||||
const document = message.documents[0];
|
||||
const session = operationDescription.session;
|
||||
if (session) {
|
||||
(0, sessions_1.updateSessionFromResponse)(session, document);
|
||||
}
|
||||
if (document.$clusterTime) {
|
||||
this[kClusterTime] = document.$clusterTime;
|
||||
this.emit(Connection.CLUSTER_TIME_RECEIVED, document.$clusterTime);
|
||||
}
|
||||
if (operationDescription.command) {
|
||||
if (document.writeConcernError) {
|
||||
callback(new error_1.MongoWriteConcernError(document.writeConcernError, document), document);
|
||||
return;
|
||||
}
|
||||
if (document.ok === 0 || document.$err || document.errmsg || document.code) {
|
||||
callback(new error_1.MongoServerError(document));
|
||||
return;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Pre 3.2 support
|
||||
if (document.ok === 0 || document.$err || document.errmsg) {
|
||||
callback(new error_1.MongoServerError(document));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
callback(undefined, message.documents[0]);
|
||||
}
|
||||
destroy(options, callback) {
|
||||
if (this.closed) {
|
||||
process.nextTick(() => callback?.());
|
||||
return;
|
||||
}
|
||||
if (typeof callback === 'function') {
|
||||
this.once('close', () => process.nextTick(() => callback()));
|
||||
}
|
||||
// load balanced mode requires that these listeners remain on the connection
|
||||
// after cleanup on timeouts, errors or close so we remove them before calling
|
||||
// cleanup.
|
||||
this.removeAllListeners(Connection.PINNED);
|
||||
this.removeAllListeners(Connection.UNPINNED);
|
||||
const message = `connection ${this.id} to ${this.address} closed`;
|
||||
this.cleanup(options.force, new error_1.MongoNetworkError(message));
|
||||
}
|
||||
/**
|
||||
* A method that cleans up the connection. When `force` is true, this method
|
||||
* forcibly destroys the socket.
|
||||
*
|
||||
* If an error is provided, any in-flight operations will be closed with the error.
|
||||
*
|
||||
* This method does nothing if the connection is already closed.
|
||||
*/
|
||||
cleanup(force, error) {
|
||||
if (this.closed) {
|
||||
return;
|
||||
}
|
||||
this.closed = true;
|
||||
const completeCleanup = () => {
|
||||
for (const op of this[kQueue].values()) {
|
||||
op.cb(error);
|
||||
}
|
||||
this[kQueue].clear();
|
||||
this.emit(Connection.CLOSE);
|
||||
};
|
||||
this[kStream].removeAllListeners();
|
||||
this[kMessageStream].removeAllListeners();
|
||||
this[kMessageStream].destroy();
|
||||
if (force) {
|
||||
this[kStream].destroy();
|
||||
completeCleanup();
|
||||
return;
|
||||
}
|
||||
if (!this[kStream].writableEnded) {
|
||||
this[kStream].end(() => {
|
||||
this[kStream].destroy();
|
||||
completeCleanup();
|
||||
});
|
||||
}
|
||||
else {
|
||||
completeCleanup();
|
||||
}
|
||||
}
|
||||
command(ns, cmd, options, callback) {
|
||||
const readPreference = (0, shared_1.getReadPreference)(cmd, options);
|
||||
const shouldUseOpMsg = supportsOpMsg(this);
|
||||
const session = options?.session;
|
||||
let clusterTime = this.clusterTime;
|
||||
let finalCmd = Object.assign({}, cmd);
|
||||
if (this.serverApi) {
|
||||
const { version, strict, deprecationErrors } = this.serverApi;
|
||||
finalCmd.apiVersion = version;
|
||||
if (strict != null)
|
||||
finalCmd.apiStrict = strict;
|
||||
if (deprecationErrors != null)
|
||||
finalCmd.apiDeprecationErrors = deprecationErrors;
|
||||
}
|
||||
if (hasSessionSupport(this) && session) {
|
||||
if (session.clusterTime &&
|
||||
clusterTime &&
|
||||
session.clusterTime.clusterTime.greaterThan(clusterTime.clusterTime)) {
|
||||
clusterTime = session.clusterTime;
|
||||
}
|
||||
const err = (0, sessions_1.applySession)(session, finalCmd, options);
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
}
|
||||
// if we have a known cluster time, gossip it
|
||||
if (clusterTime) {
|
||||
finalCmd.$clusterTime = clusterTime;
|
||||
}
|
||||
if ((0, shared_1.isSharded)(this) && !shouldUseOpMsg && readPreference && readPreference.mode !== 'primary') {
|
||||
finalCmd = {
|
||||
$query: finalCmd,
|
||||
$readPreference: readPreference.toJSON()
|
||||
};
|
||||
}
|
||||
const commandOptions = Object.assign({
|
||||
command: true,
|
||||
numberToSkip: 0,
|
||||
numberToReturn: -1,
|
||||
checkKeys: false,
|
||||
// This value is not overridable
|
||||
secondaryOk: readPreference.secondaryOk()
|
||||
}, options);
|
||||
const cmdNs = `${ns.db}.$cmd`;
|
||||
const message = shouldUseOpMsg
|
||||
? new commands_1.Msg(cmdNs, finalCmd, commandOptions)
|
||||
: new commands_1.Query(cmdNs, finalCmd, commandOptions);
|
||||
try {
|
||||
write(this, message, commandOptions, callback);
|
||||
}
|
||||
catch (err) {
|
||||
callback(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.Connection = Connection;
|
||||
/** @event */
|
||||
Connection.COMMAND_STARTED = constants_1.COMMAND_STARTED;
|
||||
/** @event */
|
||||
Connection.COMMAND_SUCCEEDED = constants_1.COMMAND_SUCCEEDED;
|
||||
/** @event */
|
||||
Connection.COMMAND_FAILED = constants_1.COMMAND_FAILED;
|
||||
/** @event */
|
||||
Connection.CLUSTER_TIME_RECEIVED = constants_1.CLUSTER_TIME_RECEIVED;
|
||||
/** @event */
|
||||
Connection.CLOSE = constants_1.CLOSE;
|
||||
/** @event */
|
||||
Connection.MESSAGE = constants_1.MESSAGE;
|
||||
/** @event */
|
||||
Connection.PINNED = constants_1.PINNED;
|
||||
/** @event */
|
||||
Connection.UNPINNED = constants_1.UNPINNED;
|
||||
/** @internal */
|
||||
class CryptoConnection extends Connection {
|
||||
constructor(stream, options) {
|
||||
super(stream, options);
|
||||
this[kAutoEncrypter] = options.autoEncrypter;
|
||||
}
|
||||
/** @internal @override */
|
||||
command(ns, cmd, options, callback) {
|
||||
const autoEncrypter = this[kAutoEncrypter];
|
||||
if (!autoEncrypter) {
|
||||
return callback(new error_1.MongoMissingDependencyError('No AutoEncrypter available for encryption'));
|
||||
}
|
||||
const serverWireVersion = (0, utils_1.maxWireVersion)(this);
|
||||
if (serverWireVersion === 0) {
|
||||
// This means the initial handshake hasn't happened yet
|
||||
return super.command(ns, cmd, options, callback);
|
||||
}
|
||||
if (serverWireVersion < 8) {
|
||||
callback(new error_1.MongoCompatibilityError('Auto-encryption requires a minimum MongoDB version of 4.2'));
|
||||
return;
|
||||
}
|
||||
// Save sort or indexKeys based on the command being run
|
||||
// the encrypt API serializes our JS objects to BSON to pass to the native code layer
|
||||
// and then deserializes the encrypted result, the protocol level components
|
||||
// of the command (ex. sort) are then converted to JS objects potentially losing
|
||||
// import key order information. These fields are never encrypted so we can save the values
|
||||
// from before the encryption and replace them after encryption has been performed
|
||||
const sort = cmd.find || cmd.findAndModify ? cmd.sort : null;
|
||||
const indexKeys = cmd.createIndexes
|
||||
? cmd.indexes.map((index) => index.key)
|
||||
: null;
|
||||
autoEncrypter.encrypt(ns.toString(), cmd, options, (err, encrypted) => {
|
||||
if (err || encrypted == null) {
|
||||
callback(err, null);
|
||||
return;
|
||||
}
|
||||
// Replace the saved values
|
||||
if (sort != null && (cmd.find || cmd.findAndModify)) {
|
||||
encrypted.sort = sort;
|
||||
}
|
||||
if (indexKeys != null && cmd.createIndexes) {
|
||||
for (const [offset, index] of indexKeys.entries()) {
|
||||
encrypted.indexes[offset].key = index;
|
||||
}
|
||||
}
|
||||
super.command(ns, encrypted, options, (err, response) => {
|
||||
if (err || response == null) {
|
||||
callback(err, response);
|
||||
return;
|
||||
}
|
||||
autoEncrypter.decrypt(response, options, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.CryptoConnection = CryptoConnection;
|
||||
/** @internal */
|
||||
function hasSessionSupport(conn) {
|
||||
const description = conn.description;
|
||||
return description.logicalSessionTimeoutMinutes != null || !!description.loadBalanced;
|
||||
}
|
||||
exports.hasSessionSupport = hasSessionSupport;
|
||||
function supportsOpMsg(conn) {
|
||||
const description = conn.description;
|
||||
if (description == null) {
|
||||
return false;
|
||||
}
|
||||
return (0, utils_1.maxWireVersion)(conn) >= 6 && !description.__nodejs_mock_server__;
|
||||
}
|
||||
function streamIdentifier(stream, options) {
|
||||
if (options.proxyHost) {
|
||||
// If proxy options are specified, the properties of `stream` itself
|
||||
// will not accurately reflect what endpoint this is connected to.
|
||||
return options.hostAddress.toString();
|
||||
}
|
||||
const { remoteAddress, remotePort } = stream;
|
||||
if (typeof remoteAddress === 'string' && typeof remotePort === 'number') {
|
||||
return utils_1.HostAddress.fromHostPort(remoteAddress, remotePort).toString();
|
||||
}
|
||||
return (0, utils_1.uuidV4)().toString('hex');
|
||||
}
|
||||
function write(conn, command, options, callback) {
|
||||
options = options ?? {};
|
||||
const operationDescription = {
|
||||
requestId: command.requestId,
|
||||
cb: callback,
|
||||
session: options.session,
|
||||
noResponse: typeof options.noResponse === 'boolean' ? options.noResponse : false,
|
||||
documentsReturnedIn: options.documentsReturnedIn,
|
||||
command: !!options.command,
|
||||
// for BSON parsing
|
||||
useBigInt64: typeof options.useBigInt64 === 'boolean' ? options.useBigInt64 : false,
|
||||
promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true,
|
||||
promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true,
|
||||
promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false,
|
||||
bsonRegExp: typeof options.bsonRegExp === 'boolean' ? options.bsonRegExp : false,
|
||||
enableUtf8Validation: typeof options.enableUtf8Validation === 'boolean' ? options.enableUtf8Validation : true,
|
||||
raw: typeof options.raw === 'boolean' ? options.raw : false,
|
||||
started: 0
|
||||
};
|
||||
if (conn[kDescription] && conn[kDescription].compressor) {
|
||||
operationDescription.agreedCompressor = conn[kDescription].compressor;
|
||||
if (conn[kDescription].zlibCompressionLevel) {
|
||||
operationDescription.zlibCompressionLevel = conn[kDescription].zlibCompressionLevel;
|
||||
}
|
||||
}
|
||||
if (typeof options.socketTimeoutMS === 'number') {
|
||||
operationDescription.socketTimeoutOverride = true;
|
||||
conn[kStream].setTimeout(options.socketTimeoutMS);
|
||||
}
|
||||
// if command monitoring is enabled we need to modify the callback here
|
||||
if (conn.monitorCommands) {
|
||||
conn.emit(Connection.COMMAND_STARTED, new command_monitoring_events_1.CommandStartedEvent(conn, command));
|
||||
operationDescription.started = (0, utils_1.now)();
|
||||
operationDescription.cb = (err, reply) => {
|
||||
// Command monitoring spec states that if ok is 1, then we must always emit
|
||||
// a command suceeded event, even if there's an error. Write concern errors
|
||||
// will have an ok: 1 in their reply.
|
||||
if (err && reply?.ok !== 1) {
|
||||
conn.emit(Connection.COMMAND_FAILED, new command_monitoring_events_1.CommandFailedEvent(conn, command, err, operationDescription.started));
|
||||
}
|
||||
else {
|
||||
if (reply && (reply.ok === 0 || reply.$err)) {
|
||||
conn.emit(Connection.COMMAND_FAILED, new command_monitoring_events_1.CommandFailedEvent(conn, command, reply, operationDescription.started));
|
||||
}
|
||||
else {
|
||||
conn.emit(Connection.COMMAND_SUCCEEDED, new command_monitoring_events_1.CommandSucceededEvent(conn, command, reply, operationDescription.started));
|
||||
}
|
||||
}
|
||||
if (typeof callback === 'function') {
|
||||
// Since we're passing through the reply with the write concern error now, we
|
||||
// need it not to be provided to the original callback in this case so
|
||||
// retryability does not get tricked into thinking the command actually
|
||||
// succeeded.
|
||||
callback(err, err instanceof error_1.MongoWriteConcernError ? undefined : reply);
|
||||
}
|
||||
};
|
||||
}
|
||||
if (!operationDescription.noResponse) {
|
||||
conn[kQueue].set(operationDescription.requestId, operationDescription);
|
||||
}
|
||||
try {
|
||||
conn[kMessageStream].writeCommand(command, operationDescription);
|
||||
}
|
||||
catch (e) {
|
||||
if (!operationDescription.noResponse) {
|
||||
conn[kQueue].delete(operationDescription.requestId);
|
||||
operationDescription.cb(e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (operationDescription.noResponse) {
|
||||
operationDescription.cb();
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=connection.js.map
|
||||
1
node_modules/mongodb/lib/cmap/connection.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/connection.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
591
node_modules/mongodb/lib/cmap/connection_pool.js
generated
vendored
Normal file
591
node_modules/mongodb/lib/cmap/connection_pool.js
generated
vendored
Normal file
|
|
@ -0,0 +1,591 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ConnectionPool = exports.PoolState = void 0;
|
||||
const timers_1 = require("timers");
|
||||
const constants_1 = require("../constants");
|
||||
const error_1 = require("../error");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const utils_1 = require("../utils");
|
||||
const connect_1 = require("./connect");
|
||||
const connection_1 = require("./connection");
|
||||
const connection_pool_events_1 = require("./connection_pool_events");
|
||||
const errors_1 = require("./errors");
|
||||
const metrics_1 = require("./metrics");
|
||||
/** @internal */
|
||||
const kServer = Symbol('server');
|
||||
/** @internal */
|
||||
const kConnections = Symbol('connections');
|
||||
/** @internal */
|
||||
const kPending = Symbol('pending');
|
||||
/** @internal */
|
||||
const kCheckedOut = Symbol('checkedOut');
|
||||
/** @internal */
|
||||
const kMinPoolSizeTimer = Symbol('minPoolSizeTimer');
|
||||
/** @internal */
|
||||
const kGeneration = Symbol('generation');
|
||||
/** @internal */
|
||||
const kServiceGenerations = Symbol('serviceGenerations');
|
||||
/** @internal */
|
||||
const kConnectionCounter = Symbol('connectionCounter');
|
||||
/** @internal */
|
||||
const kCancellationToken = Symbol('cancellationToken');
|
||||
/** @internal */
|
||||
const kWaitQueue = Symbol('waitQueue');
|
||||
/** @internal */
|
||||
const kCancelled = Symbol('cancelled');
|
||||
/** @internal */
|
||||
const kMetrics = Symbol('metrics');
|
||||
/** @internal */
|
||||
const kProcessingWaitQueue = Symbol('processingWaitQueue');
|
||||
/** @internal */
|
||||
const kPoolState = Symbol('poolState');
|
||||
/** @internal */
|
||||
exports.PoolState = Object.freeze({
|
||||
paused: 'paused',
|
||||
ready: 'ready',
|
||||
closed: 'closed'
|
||||
});
|
||||
/**
|
||||
* A pool of connections which dynamically resizes, and emit events related to pool activity
|
||||
* @internal
|
||||
*/
|
||||
class ConnectionPool extends mongo_types_1.TypedEventEmitter {
|
||||
constructor(server, options) {
|
||||
super();
|
||||
this.options = Object.freeze({
|
||||
...options,
|
||||
connectionType: connection_1.Connection,
|
||||
maxPoolSize: options.maxPoolSize ?? 100,
|
||||
minPoolSize: options.minPoolSize ?? 0,
|
||||
maxConnecting: options.maxConnecting ?? 2,
|
||||
maxIdleTimeMS: options.maxIdleTimeMS ?? 0,
|
||||
waitQueueTimeoutMS: options.waitQueueTimeoutMS ?? 0,
|
||||
minPoolSizeCheckFrequencyMS: options.minPoolSizeCheckFrequencyMS ?? 100,
|
||||
autoEncrypter: options.autoEncrypter,
|
||||
metadata: options.metadata
|
||||
});
|
||||
if (this.options.minPoolSize > this.options.maxPoolSize) {
|
||||
throw new error_1.MongoInvalidArgumentError('Connection pool minimum size must not be greater than maximum pool size');
|
||||
}
|
||||
this[kPoolState] = exports.PoolState.paused;
|
||||
this[kServer] = server;
|
||||
this[kConnections] = new utils_1.List();
|
||||
this[kPending] = 0;
|
||||
this[kCheckedOut] = new Set();
|
||||
this[kMinPoolSizeTimer] = undefined;
|
||||
this[kGeneration] = 0;
|
||||
this[kServiceGenerations] = new Map();
|
||||
this[kConnectionCounter] = (0, utils_1.makeCounter)(1);
|
||||
this[kCancellationToken] = new mongo_types_1.CancellationToken();
|
||||
this[kCancellationToken].setMaxListeners(Infinity);
|
||||
this[kWaitQueue] = new utils_1.List();
|
||||
this[kMetrics] = new metrics_1.ConnectionPoolMetrics();
|
||||
this[kProcessingWaitQueue] = false;
|
||||
process.nextTick(() => {
|
||||
this.emit(ConnectionPool.CONNECTION_POOL_CREATED, new connection_pool_events_1.ConnectionPoolCreatedEvent(this));
|
||||
});
|
||||
}
|
||||
/** The address of the endpoint the pool is connected to */
|
||||
get address() {
|
||||
return this.options.hostAddress.toString();
|
||||
}
|
||||
/**
|
||||
* Check if the pool has been closed
|
||||
*
|
||||
* TODO(NODE-3263): We can remove this property once shell no longer needs it
|
||||
*/
|
||||
get closed() {
|
||||
return this[kPoolState] === exports.PoolState.closed;
|
||||
}
|
||||
/** An integer representing the SDAM generation of the pool */
|
||||
get generation() {
|
||||
return this[kGeneration];
|
||||
}
|
||||
/** An integer expressing how many total connections (available + pending + in use) the pool currently has */
|
||||
get totalConnectionCount() {
|
||||
return (this.availableConnectionCount + this.pendingConnectionCount + this.currentCheckedOutCount);
|
||||
}
|
||||
/** An integer expressing how many connections are currently available in the pool. */
|
||||
get availableConnectionCount() {
|
||||
return this[kConnections].length;
|
||||
}
|
||||
get pendingConnectionCount() {
|
||||
return this[kPending];
|
||||
}
|
||||
get currentCheckedOutCount() {
|
||||
return this[kCheckedOut].size;
|
||||
}
|
||||
get waitQueueSize() {
|
||||
return this[kWaitQueue].length;
|
||||
}
|
||||
get loadBalanced() {
|
||||
return this.options.loadBalanced;
|
||||
}
|
||||
get serviceGenerations() {
|
||||
return this[kServiceGenerations];
|
||||
}
|
||||
get serverError() {
|
||||
return this[kServer].description.error;
|
||||
}
|
||||
/**
|
||||
* This is exposed ONLY for use in mongosh, to enable
|
||||
* killing all connections if a user quits the shell with
|
||||
* operations in progress.
|
||||
*
|
||||
* This property may be removed as a part of NODE-3263.
|
||||
*/
|
||||
get checkedOutConnections() {
|
||||
return this[kCheckedOut];
|
||||
}
|
||||
/**
|
||||
* Get the metrics information for the pool when a wait queue timeout occurs.
|
||||
*/
|
||||
waitQueueErrorMetrics() {
|
||||
return this[kMetrics].info(this.options.maxPoolSize);
|
||||
}
|
||||
/**
|
||||
* Set the pool state to "ready"
|
||||
*/
|
||||
ready() {
|
||||
if (this[kPoolState] !== exports.PoolState.paused) {
|
||||
return;
|
||||
}
|
||||
this[kPoolState] = exports.PoolState.ready;
|
||||
this.emit(ConnectionPool.CONNECTION_POOL_READY, new connection_pool_events_1.ConnectionPoolReadyEvent(this));
|
||||
(0, timers_1.clearTimeout)(this[kMinPoolSizeTimer]);
|
||||
this.ensureMinPoolSize();
|
||||
}
|
||||
/**
|
||||
* Check a connection out of this pool. The connection will continue to be tracked, but no reference to it
|
||||
* will be held by the pool. This means that if a connection is checked out it MUST be checked back in or
|
||||
* explicitly destroyed by the new owner.
|
||||
*/
|
||||
checkOut(callback) {
|
||||
this.emit(ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new connection_pool_events_1.ConnectionCheckOutStartedEvent(this));
|
||||
const waitQueueMember = { callback };
|
||||
const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS;
|
||||
if (waitQueueTimeoutMS) {
|
||||
waitQueueMember.timer = (0, timers_1.setTimeout)(() => {
|
||||
waitQueueMember[kCancelled] = true;
|
||||
waitQueueMember.timer = undefined;
|
||||
this.emit(ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new connection_pool_events_1.ConnectionCheckOutFailedEvent(this, 'timeout'));
|
||||
waitQueueMember.callback(new errors_1.WaitQueueTimeoutError(this.loadBalanced
|
||||
? this.waitQueueErrorMetrics()
|
||||
: 'Timed out while checking out a connection from connection pool', this.address));
|
||||
}, waitQueueTimeoutMS);
|
||||
}
|
||||
this[kWaitQueue].push(waitQueueMember);
|
||||
process.nextTick(() => this.processWaitQueue());
|
||||
}
|
||||
/**
|
||||
* Check a connection into the pool.
|
||||
*
|
||||
* @param connection - The connection to check in
|
||||
*/
|
||||
checkIn(connection) {
|
||||
if (!this[kCheckedOut].has(connection)) {
|
||||
return;
|
||||
}
|
||||
const poolClosed = this.closed;
|
||||
const stale = this.connectionIsStale(connection);
|
||||
const willDestroy = !!(poolClosed || stale || connection.closed);
|
||||
if (!willDestroy) {
|
||||
connection.markAvailable();
|
||||
this[kConnections].unshift(connection);
|
||||
}
|
||||
this[kCheckedOut].delete(connection);
|
||||
this.emit(ConnectionPool.CONNECTION_CHECKED_IN, new connection_pool_events_1.ConnectionCheckedInEvent(this, connection));
|
||||
if (willDestroy) {
|
||||
const reason = connection.closed ? 'error' : poolClosed ? 'poolClosed' : 'stale';
|
||||
this.destroyConnection(connection, reason);
|
||||
}
|
||||
process.nextTick(() => this.processWaitQueue());
|
||||
}
|
||||
/**
|
||||
* Clear the pool
|
||||
*
|
||||
* Pool reset is handled by incrementing the pool's generation count. Any existing connection of a
|
||||
* previous generation will eventually be pruned during subsequent checkouts.
|
||||
*/
|
||||
clear(options = {}) {
|
||||
if (this.closed) {
|
||||
return;
|
||||
}
|
||||
// handle load balanced case
|
||||
if (this.loadBalanced) {
|
||||
const { serviceId } = options;
|
||||
if (!serviceId) {
|
||||
throw new error_1.MongoRuntimeError('ConnectionPool.clear() called in load balanced mode with no serviceId.');
|
||||
}
|
||||
const sid = serviceId.toHexString();
|
||||
const generation = this.serviceGenerations.get(sid);
|
||||
// Only need to worry if the generation exists, since it should
|
||||
// always be there but typescript needs the check.
|
||||
if (generation == null) {
|
||||
throw new error_1.MongoRuntimeError('Service generations are required in load balancer mode.');
|
||||
}
|
||||
else {
|
||||
// Increment the generation for the service id.
|
||||
this.serviceGenerations.set(sid, generation + 1);
|
||||
}
|
||||
this.emit(ConnectionPool.CONNECTION_POOL_CLEARED, new connection_pool_events_1.ConnectionPoolClearedEvent(this, { serviceId }));
|
||||
return;
|
||||
}
|
||||
// handle non load-balanced case
|
||||
const interruptInUseConnections = options.interruptInUseConnections ?? false;
|
||||
const oldGeneration = this[kGeneration];
|
||||
this[kGeneration] += 1;
|
||||
const alreadyPaused = this[kPoolState] === exports.PoolState.paused;
|
||||
this[kPoolState] = exports.PoolState.paused;
|
||||
this.clearMinPoolSizeTimer();
|
||||
if (!alreadyPaused) {
|
||||
this.emit(ConnectionPool.CONNECTION_POOL_CLEARED, new connection_pool_events_1.ConnectionPoolClearedEvent(this, { interruptInUseConnections }));
|
||||
}
|
||||
if (interruptInUseConnections) {
|
||||
process.nextTick(() => this.interruptInUseConnections(oldGeneration));
|
||||
}
|
||||
this.processWaitQueue();
|
||||
}
|
||||
/**
|
||||
* Closes all stale in-use connections in the pool with a resumable PoolClearedOnNetworkError.
|
||||
*
|
||||
* Only connections where `connection.generation <= minGeneration` are killed.
|
||||
*/
|
||||
interruptInUseConnections(minGeneration) {
|
||||
for (const connection of this[kCheckedOut]) {
|
||||
if (connection.generation <= minGeneration) {
|
||||
this.checkIn(connection);
|
||||
connection.onError(new errors_1.PoolClearedOnNetworkError(this));
|
||||
}
|
||||
}
|
||||
}
|
||||
close(_options, _cb) {
|
||||
let options = _options;
|
||||
const callback = (_cb ?? _options);
|
||||
if (typeof options === 'function') {
|
||||
options = {};
|
||||
}
|
||||
options = Object.assign({ force: false }, options);
|
||||
if (this.closed) {
|
||||
return callback();
|
||||
}
|
||||
// immediately cancel any in-flight connections
|
||||
this[kCancellationToken].emit('cancel');
|
||||
// end the connection counter
|
||||
if (typeof this[kConnectionCounter].return === 'function') {
|
||||
this[kConnectionCounter].return(undefined);
|
||||
}
|
||||
this[kPoolState] = exports.PoolState.closed;
|
||||
this.clearMinPoolSizeTimer();
|
||||
this.processWaitQueue();
|
||||
(0, utils_1.eachAsync)(this[kConnections].toArray(), (conn, cb) => {
|
||||
this.emit(ConnectionPool.CONNECTION_CLOSED, new connection_pool_events_1.ConnectionClosedEvent(this, conn, 'poolClosed'));
|
||||
conn.destroy({ force: !!options.force }, cb);
|
||||
}, err => {
|
||||
this[kConnections].clear();
|
||||
this.emit(ConnectionPool.CONNECTION_POOL_CLOSED, new connection_pool_events_1.ConnectionPoolClosedEvent(this));
|
||||
callback(err);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Runs a lambda with an implicitly checked out connection, checking that connection back in when the lambda
|
||||
* has completed by calling back.
|
||||
*
|
||||
* NOTE: please note the required signature of `fn`
|
||||
*
|
||||
* @remarks When in load balancer mode, connections can be pinned to cursors or transactions.
|
||||
* In these cases we pass the connection in to this method to ensure it is used and a new
|
||||
* connection is not checked out.
|
||||
*
|
||||
* @param conn - A pinned connection for use in load balancing mode.
|
||||
* @param fn - A function which operates on a managed connection
|
||||
* @param callback - The original callback
|
||||
*/
|
||||
withConnection(conn, fn, callback) {
|
||||
if (conn) {
|
||||
// use the provided connection, and do _not_ check it in after execution
|
||||
fn(undefined, conn, (fnErr, result) => {
|
||||
if (typeof callback === 'function') {
|
||||
if (fnErr) {
|
||||
callback(fnErr);
|
||||
}
|
||||
else {
|
||||
callback(undefined, result);
|
||||
}
|
||||
}
|
||||
});
|
||||
return;
|
||||
}
|
||||
this.checkOut((err, conn) => {
|
||||
// don't callback with `err` here, we might want to act upon it inside `fn`
|
||||
fn(err, conn, (fnErr, result) => {
|
||||
if (typeof callback === 'function') {
|
||||
if (fnErr) {
|
||||
callback(fnErr);
|
||||
}
|
||||
else {
|
||||
callback(undefined, result);
|
||||
}
|
||||
}
|
||||
if (conn) {
|
||||
this.checkIn(conn);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
/** Clear the min pool size timer */
|
||||
clearMinPoolSizeTimer() {
|
||||
const minPoolSizeTimer = this[kMinPoolSizeTimer];
|
||||
if (minPoolSizeTimer) {
|
||||
(0, timers_1.clearTimeout)(minPoolSizeTimer);
|
||||
}
|
||||
}
|
||||
destroyConnection(connection, reason) {
|
||||
this.emit(ConnectionPool.CONNECTION_CLOSED, new connection_pool_events_1.ConnectionClosedEvent(this, connection, reason));
|
||||
// destroy the connection
|
||||
process.nextTick(() => connection.destroy({ force: false }));
|
||||
}
|
||||
connectionIsStale(connection) {
|
||||
const serviceId = connection.serviceId;
|
||||
if (this.loadBalanced && serviceId) {
|
||||
const sid = serviceId.toHexString();
|
||||
const generation = this.serviceGenerations.get(sid);
|
||||
return connection.generation !== generation;
|
||||
}
|
||||
return connection.generation !== this[kGeneration];
|
||||
}
|
||||
connectionIsIdle(connection) {
|
||||
return !!(this.options.maxIdleTimeMS && connection.idleTime > this.options.maxIdleTimeMS);
|
||||
}
|
||||
/**
|
||||
* Destroys a connection if the connection is perished.
|
||||
*
|
||||
* @returns `true` if the connection was destroyed, `false` otherwise.
|
||||
*/
|
||||
destroyConnectionIfPerished(connection) {
|
||||
const isStale = this.connectionIsStale(connection);
|
||||
const isIdle = this.connectionIsIdle(connection);
|
||||
if (!isStale && !isIdle && !connection.closed) {
|
||||
return false;
|
||||
}
|
||||
const reason = connection.closed ? 'error' : isStale ? 'stale' : 'idle';
|
||||
this.destroyConnection(connection, reason);
|
||||
return true;
|
||||
}
|
||||
createConnection(callback) {
|
||||
const connectOptions = {
|
||||
...this.options,
|
||||
id: this[kConnectionCounter].next().value,
|
||||
generation: this[kGeneration],
|
||||
cancellationToken: this[kCancellationToken]
|
||||
};
|
||||
this[kPending]++;
|
||||
// This is our version of a "virtual" no-I/O connection as the spec requires
|
||||
this.emit(ConnectionPool.CONNECTION_CREATED, new connection_pool_events_1.ConnectionCreatedEvent(this, { id: connectOptions.id }));
|
||||
(0, connect_1.connect)(connectOptions, (err, connection) => {
|
||||
if (err || !connection) {
|
||||
this[kPending]--;
|
||||
this.emit(ConnectionPool.CONNECTION_CLOSED, new connection_pool_events_1.ConnectionClosedEvent(this, { id: connectOptions.id, serviceId: undefined }, 'error'));
|
||||
if (err instanceof error_1.MongoNetworkError || err instanceof error_1.MongoServerError) {
|
||||
err.connectionGeneration = connectOptions.generation;
|
||||
}
|
||||
callback(err ?? new error_1.MongoRuntimeError('Connection creation failed without error'));
|
||||
return;
|
||||
}
|
||||
// The pool might have closed since we started trying to create a connection
|
||||
if (this[kPoolState] !== exports.PoolState.ready) {
|
||||
this[kPending]--;
|
||||
connection.destroy({ force: true });
|
||||
callback(this.closed ? new errors_1.PoolClosedError(this) : new errors_1.PoolClearedError(this));
|
||||
return;
|
||||
}
|
||||
// forward all events from the connection to the pool
|
||||
for (const event of [...constants_1.APM_EVENTS, connection_1.Connection.CLUSTER_TIME_RECEIVED]) {
|
||||
connection.on(event, (e) => this.emit(event, e));
|
||||
}
|
||||
if (this.loadBalanced) {
|
||||
connection.on(connection_1.Connection.PINNED, pinType => this[kMetrics].markPinned(pinType));
|
||||
connection.on(connection_1.Connection.UNPINNED, pinType => this[kMetrics].markUnpinned(pinType));
|
||||
const serviceId = connection.serviceId;
|
||||
if (serviceId) {
|
||||
let generation;
|
||||
const sid = serviceId.toHexString();
|
||||
if ((generation = this.serviceGenerations.get(sid))) {
|
||||
connection.generation = generation;
|
||||
}
|
||||
else {
|
||||
this.serviceGenerations.set(sid, 0);
|
||||
connection.generation = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
connection.markAvailable();
|
||||
this.emit(ConnectionPool.CONNECTION_READY, new connection_pool_events_1.ConnectionReadyEvent(this, connection));
|
||||
this[kPending]--;
|
||||
callback(undefined, connection);
|
||||
return;
|
||||
});
|
||||
}
|
||||
ensureMinPoolSize() {
|
||||
const minPoolSize = this.options.minPoolSize;
|
||||
if (this[kPoolState] !== exports.PoolState.ready || minPoolSize === 0) {
|
||||
return;
|
||||
}
|
||||
this[kConnections].prune(connection => this.destroyConnectionIfPerished(connection));
|
||||
if (this.totalConnectionCount < minPoolSize &&
|
||||
this.pendingConnectionCount < this.options.maxConnecting) {
|
||||
// NOTE: ensureMinPoolSize should not try to get all the pending
|
||||
// connection permits because that potentially delays the availability of
|
||||
// the connection to a checkout request
|
||||
this.createConnection((err, connection) => {
|
||||
if (err) {
|
||||
this[kServer].handleError(err);
|
||||
}
|
||||
if (!err && connection) {
|
||||
this[kConnections].push(connection);
|
||||
process.nextTick(() => this.processWaitQueue());
|
||||
}
|
||||
if (this[kPoolState] === exports.PoolState.ready) {
|
||||
(0, timers_1.clearTimeout)(this[kMinPoolSizeTimer]);
|
||||
this[kMinPoolSizeTimer] = (0, timers_1.setTimeout)(() => this.ensureMinPoolSize(), this.options.minPoolSizeCheckFrequencyMS);
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
(0, timers_1.clearTimeout)(this[kMinPoolSizeTimer]);
|
||||
this[kMinPoolSizeTimer] = (0, timers_1.setTimeout)(() => this.ensureMinPoolSize(), this.options.minPoolSizeCheckFrequencyMS);
|
||||
}
|
||||
}
|
||||
processWaitQueue() {
|
||||
if (this[kProcessingWaitQueue]) {
|
||||
return;
|
||||
}
|
||||
this[kProcessingWaitQueue] = true;
|
||||
while (this.waitQueueSize) {
|
||||
const waitQueueMember = this[kWaitQueue].first();
|
||||
if (!waitQueueMember) {
|
||||
this[kWaitQueue].shift();
|
||||
continue;
|
||||
}
|
||||
if (waitQueueMember[kCancelled]) {
|
||||
this[kWaitQueue].shift();
|
||||
continue;
|
||||
}
|
||||
if (this[kPoolState] !== exports.PoolState.ready) {
|
||||
const reason = this.closed ? 'poolClosed' : 'connectionError';
|
||||
const error = this.closed ? new errors_1.PoolClosedError(this) : new errors_1.PoolClearedError(this);
|
||||
this.emit(ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new connection_pool_events_1.ConnectionCheckOutFailedEvent(this, reason));
|
||||
if (waitQueueMember.timer) {
|
||||
(0, timers_1.clearTimeout)(waitQueueMember.timer);
|
||||
}
|
||||
this[kWaitQueue].shift();
|
||||
waitQueueMember.callback(error);
|
||||
continue;
|
||||
}
|
||||
if (!this.availableConnectionCount) {
|
||||
break;
|
||||
}
|
||||
const connection = this[kConnections].shift();
|
||||
if (!connection) {
|
||||
break;
|
||||
}
|
||||
if (!this.destroyConnectionIfPerished(connection)) {
|
||||
this[kCheckedOut].add(connection);
|
||||
this.emit(ConnectionPool.CONNECTION_CHECKED_OUT, new connection_pool_events_1.ConnectionCheckedOutEvent(this, connection));
|
||||
if (waitQueueMember.timer) {
|
||||
(0, timers_1.clearTimeout)(waitQueueMember.timer);
|
||||
}
|
||||
this[kWaitQueue].shift();
|
||||
waitQueueMember.callback(undefined, connection);
|
||||
}
|
||||
}
|
||||
const { maxPoolSize, maxConnecting } = this.options;
|
||||
while (this.waitQueueSize > 0 &&
|
||||
this.pendingConnectionCount < maxConnecting &&
|
||||
(maxPoolSize === 0 || this.totalConnectionCount < maxPoolSize)) {
|
||||
const waitQueueMember = this[kWaitQueue].shift();
|
||||
if (!waitQueueMember || waitQueueMember[kCancelled]) {
|
||||
continue;
|
||||
}
|
||||
this.createConnection((err, connection) => {
|
||||
if (waitQueueMember[kCancelled]) {
|
||||
if (!err && connection) {
|
||||
this[kConnections].push(connection);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (err) {
|
||||
this.emit(ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new connection_pool_events_1.ConnectionCheckOutFailedEvent(this, 'connectionError'));
|
||||
}
|
||||
else if (connection) {
|
||||
this[kCheckedOut].add(connection);
|
||||
this.emit(ConnectionPool.CONNECTION_CHECKED_OUT, new connection_pool_events_1.ConnectionCheckedOutEvent(this, connection));
|
||||
}
|
||||
if (waitQueueMember.timer) {
|
||||
(0, timers_1.clearTimeout)(waitQueueMember.timer);
|
||||
}
|
||||
waitQueueMember.callback(err, connection);
|
||||
}
|
||||
process.nextTick(() => this.processWaitQueue());
|
||||
});
|
||||
}
|
||||
this[kProcessingWaitQueue] = false;
|
||||
}
|
||||
}
|
||||
exports.ConnectionPool = ConnectionPool;
|
||||
/**
|
||||
* Emitted when the connection pool is created.
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_POOL_CREATED = constants_1.CONNECTION_POOL_CREATED;
|
||||
/**
|
||||
* Emitted once when the connection pool is closed
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_POOL_CLOSED = constants_1.CONNECTION_POOL_CLOSED;
|
||||
/**
|
||||
* Emitted each time the connection pool is cleared and it's generation incremented
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_POOL_CLEARED = constants_1.CONNECTION_POOL_CLEARED;
|
||||
/**
|
||||
* Emitted each time the connection pool is marked ready
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_POOL_READY = constants_1.CONNECTION_POOL_READY;
|
||||
/**
|
||||
* Emitted when a connection is created.
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_CREATED = constants_1.CONNECTION_CREATED;
|
||||
/**
|
||||
* Emitted when a connection becomes established, and is ready to use
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_READY = constants_1.CONNECTION_READY;
|
||||
/**
|
||||
* Emitted when a connection is closed
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_CLOSED = constants_1.CONNECTION_CLOSED;
|
||||
/**
|
||||
* Emitted when an attempt to check out a connection begins
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_CHECK_OUT_STARTED = constants_1.CONNECTION_CHECK_OUT_STARTED;
|
||||
/**
|
||||
* Emitted when an attempt to check out a connection fails
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_CHECK_OUT_FAILED = constants_1.CONNECTION_CHECK_OUT_FAILED;
|
||||
/**
|
||||
* Emitted each time a connection is successfully checked out of the connection pool
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_CHECKED_OUT = constants_1.CONNECTION_CHECKED_OUT;
|
||||
/**
|
||||
* Emitted each time a connection is successfully checked into the connection pool
|
||||
* @event
|
||||
*/
|
||||
ConnectionPool.CONNECTION_CHECKED_IN = constants_1.CONNECTION_CHECKED_IN;
|
||||
//# sourceMappingURL=connection_pool.js.map
|
||||
1
node_modules/mongodb/lib/cmap/connection_pool.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/connection_pool.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
160
node_modules/mongodb/lib/cmap/connection_pool_events.js
generated
vendored
Normal file
160
node_modules/mongodb/lib/cmap/connection_pool_events.js
generated
vendored
Normal file
|
|
@ -0,0 +1,160 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ConnectionPoolClearedEvent = exports.ConnectionCheckedInEvent = exports.ConnectionCheckedOutEvent = exports.ConnectionCheckOutFailedEvent = exports.ConnectionCheckOutStartedEvent = exports.ConnectionClosedEvent = exports.ConnectionReadyEvent = exports.ConnectionCreatedEvent = exports.ConnectionPoolClosedEvent = exports.ConnectionPoolReadyEvent = exports.ConnectionPoolCreatedEvent = exports.ConnectionPoolMonitoringEvent = void 0;
|
||||
/**
|
||||
* The base export class for all monitoring events published from the connection pool
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool) {
|
||||
this.time = new Date();
|
||||
this.address = pool.address;
|
||||
}
|
||||
}
|
||||
exports.ConnectionPoolMonitoringEvent = ConnectionPoolMonitoringEvent;
|
||||
/**
|
||||
* An event published when a connection pool is created
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionPoolCreatedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool) {
|
||||
super(pool);
|
||||
this.options = pool.options;
|
||||
}
|
||||
}
|
||||
exports.ConnectionPoolCreatedEvent = ConnectionPoolCreatedEvent;
|
||||
/**
|
||||
* An event published when a connection pool is ready
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionPoolReadyEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool) {
|
||||
super(pool);
|
||||
}
|
||||
}
|
||||
exports.ConnectionPoolReadyEvent = ConnectionPoolReadyEvent;
|
||||
/**
|
||||
* An event published when a connection pool is closed
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionPoolClosedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool) {
|
||||
super(pool);
|
||||
}
|
||||
}
|
||||
exports.ConnectionPoolClosedEvent = ConnectionPoolClosedEvent;
|
||||
/**
|
||||
* An event published when a connection pool creates a new connection
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionCreatedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, connection) {
|
||||
super(pool);
|
||||
this.connectionId = connection.id;
|
||||
}
|
||||
}
|
||||
exports.ConnectionCreatedEvent = ConnectionCreatedEvent;
|
||||
/**
|
||||
* An event published when a connection is ready for use
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionReadyEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, connection) {
|
||||
super(pool);
|
||||
this.connectionId = connection.id;
|
||||
}
|
||||
}
|
||||
exports.ConnectionReadyEvent = ConnectionReadyEvent;
|
||||
/**
|
||||
* An event published when a connection is closed
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionClosedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, connection, reason) {
|
||||
super(pool);
|
||||
this.connectionId = connection.id;
|
||||
this.reason = reason || 'unknown';
|
||||
this.serviceId = connection.serviceId;
|
||||
}
|
||||
}
|
||||
exports.ConnectionClosedEvent = ConnectionClosedEvent;
|
||||
/**
|
||||
* An event published when a request to check a connection out begins
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionCheckOutStartedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool) {
|
||||
super(pool);
|
||||
}
|
||||
}
|
||||
exports.ConnectionCheckOutStartedEvent = ConnectionCheckOutStartedEvent;
|
||||
/**
|
||||
* An event published when a request to check a connection out fails
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionCheckOutFailedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, reason) {
|
||||
super(pool);
|
||||
this.reason = reason;
|
||||
}
|
||||
}
|
||||
exports.ConnectionCheckOutFailedEvent = ConnectionCheckOutFailedEvent;
|
||||
/**
|
||||
* An event published when a connection is checked out of the connection pool
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionCheckedOutEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, connection) {
|
||||
super(pool);
|
||||
this.connectionId = connection.id;
|
||||
}
|
||||
}
|
||||
exports.ConnectionCheckedOutEvent = ConnectionCheckedOutEvent;
|
||||
/**
|
||||
* An event published when a connection is checked into the connection pool
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionCheckedInEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, connection) {
|
||||
super(pool);
|
||||
this.connectionId = connection.id;
|
||||
}
|
||||
}
|
||||
exports.ConnectionCheckedInEvent = ConnectionCheckedInEvent;
|
||||
/**
|
||||
* An event published when a connection pool is cleared
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ConnectionPoolClearedEvent extends ConnectionPoolMonitoringEvent {
|
||||
/** @internal */
|
||||
constructor(pool, options = {}) {
|
||||
super(pool);
|
||||
this.serviceId = options.serviceId;
|
||||
this.interruptInUseConnections = options.interruptInUseConnections;
|
||||
}
|
||||
}
|
||||
exports.ConnectionPoolClearedEvent = ConnectionPoolClearedEvent;
|
||||
//# sourceMappingURL=connection_pool_events.js.map
|
||||
1
node_modules/mongodb/lib/cmap/connection_pool_events.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/connection_pool_events.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"connection_pool_events.js","sourceRoot":"","sources":["../../src/cmap/connection_pool_events.ts"],"names":[],"mappings":";;;AAKA;;;;GAIG;AACH,MAAa,6BAA6B;IAMxC,gBAAgB;IAChB,YAAY,IAAoB;QAC9B,IAAI,CAAC,IAAI,GAAG,IAAI,IAAI,EAAE,CAAC;QACvB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,OAAO,CAAC;IAC9B,CAAC;CACF;AAXD,sEAWC;AAED;;;;GAIG;AACH,MAAa,0BAA2B,SAAQ,6BAA6B;IAI3E,gBAAgB;IAChB,YAAY,IAAoB;QAC9B,KAAK,CAAC,IAAI,CAAC,CAAC;QACZ,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,OAAO,CAAC;IAC9B,CAAC;CACF;AATD,gEASC;AAED;;;;GAIG;AACH,MAAa,wBAAyB,SAAQ,6BAA6B;IACzE,gBAAgB;IAChB,YAAY,IAAoB;QAC9B,KAAK,CAAC,IAAI,CAAC,CAAC;IACd,CAAC;CACF;AALD,4DAKC;AAED;;;;GAIG;AACH,MAAa,yBAA0B,SAAQ,6BAA6B;IAC1E,gBAAgB;IAChB,YAAY,IAAoB;QAC9B,KAAK,CAAC,IAAI,CAAC,CAAC;IACd,CAAC;CACF;AALD,8DAKC;AAED;;;;GAIG;AACH,MAAa,sBAAuB,SAAQ,6BAA6B;IAIvE,gBAAgB;IAChB,YAAY,IAAoB,EAAE,UAAwC;QACxE,KAAK,CAAC,IAAI,CAAC,CAAC;QACZ,IAAI,CAAC,YAAY,GAAG,UAAU,CAAC,EAAE,CAAC;IACpC,CAAC;CACF;AATD,wDASC;AAED;;;;GAIG;AACH,MAAa,oBAAqB,SAAQ,6BAA6B;IAIrE,gBAAgB;IAChB,YAAY,IAAoB,EAAE,UAAsB;QACtD,KAAK,CAAC,IAAI,CAAC,CAAC;QACZ,IAAI,CAAC,YAAY,GAAG,UAAU,CAAC,EAAE,CAAC;IACpC,CAAC;CACF;AATD,oDASC;AAED;;;;GAIG;AACH,MAAa,qBAAsB,SAAQ,6BAA6B;IAOtE,gBAAgB;IAChB,YACE,IAAoB,EACpB,UAAgD,EAChD,MAAc;QAEd,KAAK,CAAC,IAAI,CAAC,CAAC;QACZ,IAAI,CAAC,YAAY,GAAG,UAAU,CAAC,EAAE,CAAC;QAClC,IAAI,CAAC,MAAM,GAAG,MAAM,IAAI,SAAS,CAAC;QAClC,IAAI,CAAC,SAAS,GAAG,UAAU,CAAC,SAAS,CAAC;IACxC,CAAC;CACF;AAlBD,sDAkBC;AAED;;;;GAIG;AACH,MAAa,8BAA+B,SAAQ,6BAA6B;IAC/E,gBAAgB;IAChB,YAAY,IAAoB;QAC9B,KAAK,CAAC,IAAI,CAAC,CAAC;IACd,CAAC;CACF;AALD,wEAKC;AAED;;;;GAIG;AACH,MAAa,6BAA8B,SAAQ,6BAA6B;IAI9E,gBAAgB;IAChB,YAAY,IAAoB,EAAE,MAAyB;QACzD,KAAK,CAAC,IAAI,CAAC,CAAC;QACZ,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;IACvB,CAAC;CACF;AATD,sEASC;AAED;;;;GAIG;AACH,MAAa,yBAA0B,SAAQ,6BAA6B;IAI1E,gBAAgB;IAChB,YAAY,IAAoB,EAAE,UAAsB;QACtD,KAAK,CAAC,IAAI,CAAC,CAAC;QACZ,IAAI,CAAC,YAAY,GAAG,UAAU,CAAC,EAAE,CAAC;IACpC,CAAC;CACF;AATD,8DASC;AAED;;;;GAIG;AACH,MAAa,wBAAyB,SAAQ,6BAA6B;IAIzE,gBAAgB;IAChB,YAAY,IAAoB,EAAE,UAAsB;QACtD,KAAK,CAAC,IAAI,CAAC,CAAC;QACZ,IAAI,CAAC,YAAY,GAAG,UAAU,CAAC,EAAE,CAAC;IACpC,CAAC;CACF;AATD,4DASC;AAED;;;;GAIG;AACH,MAAa,0BAA2B,SAAQ,6BAA6B;IAM3E,gBAAgB;IAChB,YACE,IAAoB,EACpB,UAAyE,EAAE;QAE3E,KAAK,CAAC,IAAI,CAAC,CAAC;QACZ,IAAI,CAAC,SAAS,GAAG,OAAO,CAAC,SAAS,CAAC;QACnC,IAAI,CAAC,yBAAyB,GAAG,OAAO,CAAC,yBAAyB,CAAC;IACrE,CAAC;CACF;AAfD,gEAeC"}
|
||||
64
node_modules/mongodb/lib/cmap/errors.js
generated
vendored
Normal file
64
node_modules/mongodb/lib/cmap/errors.js
generated
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.WaitQueueTimeoutError = exports.PoolClearedOnNetworkError = exports.PoolClearedError = exports.PoolClosedError = void 0;
|
||||
const error_1 = require("../error");
|
||||
/**
|
||||
* An error indicating a connection pool is closed
|
||||
* @category Error
|
||||
*/
|
||||
class PoolClosedError extends error_1.MongoDriverError {
|
||||
constructor(pool) {
|
||||
super('Attempted to check out a connection from closed connection pool');
|
||||
this.address = pool.address;
|
||||
}
|
||||
get name() {
|
||||
return 'MongoPoolClosedError';
|
||||
}
|
||||
}
|
||||
exports.PoolClosedError = PoolClosedError;
|
||||
/**
|
||||
* An error indicating a connection pool is currently paused
|
||||
* @category Error
|
||||
*/
|
||||
class PoolClearedError extends error_1.MongoNetworkError {
|
||||
constructor(pool, message) {
|
||||
const errorMessage = message
|
||||
? message
|
||||
: `Connection pool for ${pool.address} was cleared because another operation failed with: "${pool.serverError?.message}"`;
|
||||
super(errorMessage);
|
||||
this.address = pool.address;
|
||||
this.addErrorLabel(error_1.MongoErrorLabel.RetryableWriteError);
|
||||
}
|
||||
get name() {
|
||||
return 'MongoPoolClearedError';
|
||||
}
|
||||
}
|
||||
exports.PoolClearedError = PoolClearedError;
|
||||
/**
|
||||
* An error indicating that a connection pool has been cleared after the monitor for that server timed out.
|
||||
* @category Error
|
||||
*/
|
||||
class PoolClearedOnNetworkError extends PoolClearedError {
|
||||
constructor(pool) {
|
||||
super(pool, `Connection to ${pool.address} interrupted due to server monitor timeout`);
|
||||
}
|
||||
get name() {
|
||||
return 'PoolClearedOnNetworkError';
|
||||
}
|
||||
}
|
||||
exports.PoolClearedOnNetworkError = PoolClearedOnNetworkError;
|
||||
/**
|
||||
* An error thrown when a request to check out a connection times out
|
||||
* @category Error
|
||||
*/
|
||||
class WaitQueueTimeoutError extends error_1.MongoDriverError {
|
||||
constructor(message, address) {
|
||||
super(message);
|
||||
this.address = address;
|
||||
}
|
||||
get name() {
|
||||
return 'MongoWaitQueueTimeoutError';
|
||||
}
|
||||
}
|
||||
exports.WaitQueueTimeoutError = WaitQueueTimeoutError;
|
||||
//# sourceMappingURL=errors.js.map
|
||||
1
node_modules/mongodb/lib/cmap/errors.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/errors.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"errors.js","sourceRoot":"","sources":["../../src/cmap/errors.ts"],"names":[],"mappings":";;;AAAA,oCAAgF;AAGhF;;;GAGG;AACH,MAAa,eAAgB,SAAQ,wBAAgB;IAInD,YAAY,IAAoB;QAC9B,KAAK,CAAC,iEAAiE,CAAC,CAAC;QACzE,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,OAAO,CAAC;IAC9B,CAAC;IAED,IAAa,IAAI;QACf,OAAO,sBAAsB,CAAC;IAChC,CAAC;CACF;AAZD,0CAYC;AAED;;;GAGG;AACH,MAAa,gBAAiB,SAAQ,yBAAiB;IAIrD,YAAY,IAAoB,EAAE,OAAgB;QAChD,MAAM,YAAY,GAAG,OAAO;YAC1B,CAAC,CAAC,OAAO;YACT,CAAC,CAAC,uBAAuB,IAAI,CAAC,OAAO,wDAAwD,IAAI,CAAC,WAAW,EAAE,OAAO,GAAG,CAAC;QAC5H,KAAK,CAAC,YAAY,CAAC,CAAC;QACpB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,OAAO,CAAC;QAE5B,IAAI,CAAC,aAAa,CAAC,uBAAe,CAAC,mBAAmB,CAAC,CAAC;IAC1D,CAAC;IAED,IAAa,IAAI;QACf,OAAO,uBAAuB,CAAC;IACjC,CAAC;CACF;AAjBD,4CAiBC;AAED;;;GAGG;AACH,MAAa,yBAA0B,SAAQ,gBAAgB;IAC7D,YAAY,IAAoB;QAC9B,KAAK,CAAC,IAAI,EAAE,iBAAiB,IAAI,CAAC,OAAO,4CAA4C,CAAC,CAAC;IACzF,CAAC;IAED,IAAa,IAAI;QACf,OAAO,2BAA2B,CAAC;IACrC,CAAC;CACF;AARD,8DAQC;AAED;;;GAGG;AACH,MAAa,qBAAsB,SAAQ,wBAAgB;IAIzD,YAAY,OAAe,EAAE,OAAe;QAC1C,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;IAED,IAAa,IAAI;QACf,OAAO,4BAA4B,CAAC;IACtC,CAAC;CACF;AAZD,sDAYC"}
|
||||
156
node_modules/mongodb/lib/cmap/message_stream.js
generated
vendored
Normal file
156
node_modules/mongodb/lib/cmap/message_stream.js
generated
vendored
Normal file
|
|
@ -0,0 +1,156 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MessageStream = void 0;
|
||||
const stream_1 = require("stream");
|
||||
const error_1 = require("../error");
|
||||
const utils_1 = require("../utils");
|
||||
const commands_1 = require("./commands");
|
||||
const compression_1 = require("./wire_protocol/compression");
|
||||
const constants_1 = require("./wire_protocol/constants");
|
||||
const MESSAGE_HEADER_SIZE = 16;
|
||||
const COMPRESSION_DETAILS_SIZE = 9; // originalOpcode + uncompressedSize, compressorID
|
||||
const kDefaultMaxBsonMessageSize = 1024 * 1024 * 16 * 4;
|
||||
/** @internal */
|
||||
const kBuffer = Symbol('buffer');
|
||||
/**
|
||||
* A duplex stream that is capable of reading and writing raw wire protocol messages, with
|
||||
* support for optional compression
|
||||
* @internal
|
||||
*/
|
||||
class MessageStream extends stream_1.Duplex {
|
||||
constructor(options = {}) {
|
||||
super(options);
|
||||
/** @internal */
|
||||
this.isMonitoringConnection = false;
|
||||
this.maxBsonMessageSize = options.maxBsonMessageSize || kDefaultMaxBsonMessageSize;
|
||||
this[kBuffer] = new utils_1.BufferPool();
|
||||
}
|
||||
get buffer() {
|
||||
return this[kBuffer];
|
||||
}
|
||||
_write(chunk, _, callback) {
|
||||
this[kBuffer].append(chunk);
|
||||
processIncomingData(this, callback);
|
||||
}
|
||||
_read( /* size */) {
|
||||
// NOTE: This implementation is empty because we explicitly push data to be read
|
||||
// when `writeMessage` is called.
|
||||
return;
|
||||
}
|
||||
writeCommand(command, operationDescription) {
|
||||
const agreedCompressor = operationDescription.agreedCompressor ?? 'none';
|
||||
if (agreedCompressor === 'none' || !canCompress(command)) {
|
||||
const data = command.toBin();
|
||||
this.push(Array.isArray(data) ? Buffer.concat(data) : data);
|
||||
return;
|
||||
}
|
||||
// otherwise, compress the message
|
||||
const concatenatedOriginalCommandBuffer = Buffer.concat(command.toBin());
|
||||
const messageToBeCompressed = concatenatedOriginalCommandBuffer.slice(MESSAGE_HEADER_SIZE);
|
||||
// Extract information needed for OP_COMPRESSED from the uncompressed message
|
||||
const originalCommandOpCode = concatenatedOriginalCommandBuffer.readInt32LE(12);
|
||||
const options = {
|
||||
agreedCompressor,
|
||||
zlibCompressionLevel: operationDescription.zlibCompressionLevel ?? 0
|
||||
};
|
||||
// Compress the message body
|
||||
(0, compression_1.compress)(options, messageToBeCompressed).then(compressedMessage => {
|
||||
// Create the msgHeader of OP_COMPRESSED
|
||||
const msgHeader = Buffer.alloc(MESSAGE_HEADER_SIZE);
|
||||
msgHeader.writeInt32LE(MESSAGE_HEADER_SIZE + COMPRESSION_DETAILS_SIZE + compressedMessage.length, 0); // messageLength
|
||||
msgHeader.writeInt32LE(command.requestId, 4); // requestID
|
||||
msgHeader.writeInt32LE(0, 8); // responseTo (zero)
|
||||
msgHeader.writeInt32LE(constants_1.OP_COMPRESSED, 12); // opCode
|
||||
// Create the compression details of OP_COMPRESSED
|
||||
const compressionDetails = Buffer.alloc(COMPRESSION_DETAILS_SIZE);
|
||||
compressionDetails.writeInt32LE(originalCommandOpCode, 0); // originalOpcode
|
||||
compressionDetails.writeInt32LE(messageToBeCompressed.length, 4); // Size of the uncompressed compressedMessage, excluding the MsgHeader
|
||||
compressionDetails.writeUInt8(compression_1.Compressor[agreedCompressor], 8); // compressorID
|
||||
this.push(Buffer.concat([msgHeader, compressionDetails, compressedMessage]));
|
||||
}, error => {
|
||||
operationDescription.cb(error);
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.MessageStream = MessageStream;
|
||||
// Return whether a command contains an uncompressible command term
|
||||
// Will return true if command contains no uncompressible command terms
|
||||
function canCompress(command) {
|
||||
const commandDoc = command instanceof commands_1.Msg ? command.command : command.query;
|
||||
const commandName = Object.keys(commandDoc)[0];
|
||||
return !compression_1.uncompressibleCommands.has(commandName);
|
||||
}
|
||||
function processIncomingData(stream, callback) {
|
||||
const buffer = stream[kBuffer];
|
||||
const sizeOfMessage = buffer.getInt32();
|
||||
if (sizeOfMessage == null) {
|
||||
return callback();
|
||||
}
|
||||
if (sizeOfMessage < 0) {
|
||||
return callback(new error_1.MongoParseError(`Invalid message size: ${sizeOfMessage}`));
|
||||
}
|
||||
if (sizeOfMessage > stream.maxBsonMessageSize) {
|
||||
return callback(new error_1.MongoParseError(`Invalid message size: ${sizeOfMessage}, max allowed: ${stream.maxBsonMessageSize}`));
|
||||
}
|
||||
if (sizeOfMessage > buffer.length) {
|
||||
return callback();
|
||||
}
|
||||
const message = buffer.read(sizeOfMessage);
|
||||
const messageHeader = {
|
||||
length: message.readInt32LE(0),
|
||||
requestId: message.readInt32LE(4),
|
||||
responseTo: message.readInt32LE(8),
|
||||
opCode: message.readInt32LE(12)
|
||||
};
|
||||
const monitorHasAnotherHello = () => {
|
||||
if (stream.isMonitoringConnection) {
|
||||
// Can we read the next message size?
|
||||
const sizeOfMessage = buffer.getInt32();
|
||||
if (sizeOfMessage != null && sizeOfMessage <= buffer.length) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
let ResponseType = messageHeader.opCode === constants_1.OP_MSG ? commands_1.BinMsg : commands_1.Response;
|
||||
if (messageHeader.opCode !== constants_1.OP_COMPRESSED) {
|
||||
const messageBody = message.subarray(MESSAGE_HEADER_SIZE);
|
||||
// If we are a monitoring connection message stream and
|
||||
// there is more in the buffer that can be read, skip processing since we
|
||||
// want the last hello command response that is in the buffer.
|
||||
if (monitorHasAnotherHello()) {
|
||||
return processIncomingData(stream, callback);
|
||||
}
|
||||
stream.emit('message', new ResponseType(message, messageHeader, messageBody));
|
||||
if (buffer.length >= 4) {
|
||||
return processIncomingData(stream, callback);
|
||||
}
|
||||
return callback();
|
||||
}
|
||||
messageHeader.fromCompressed = true;
|
||||
messageHeader.opCode = message.readInt32LE(MESSAGE_HEADER_SIZE);
|
||||
messageHeader.length = message.readInt32LE(MESSAGE_HEADER_SIZE + 4);
|
||||
const compressorID = message[MESSAGE_HEADER_SIZE + 8];
|
||||
const compressedBuffer = message.slice(MESSAGE_HEADER_SIZE + 9);
|
||||
// recalculate based on wrapped opcode
|
||||
ResponseType = messageHeader.opCode === constants_1.OP_MSG ? commands_1.BinMsg : commands_1.Response;
|
||||
(0, compression_1.decompress)(compressorID, compressedBuffer).then(messageBody => {
|
||||
if (messageBody.length !== messageHeader.length) {
|
||||
return callback(new error_1.MongoDecompressionError('Message body and message header must be the same length'));
|
||||
}
|
||||
// If we are a monitoring connection message stream and
|
||||
// there is more in the buffer that can be read, skip processing since we
|
||||
// want the last hello command response that is in the buffer.
|
||||
if (monitorHasAnotherHello()) {
|
||||
return processIncomingData(stream, callback);
|
||||
}
|
||||
stream.emit('message', new ResponseType(message, messageHeader, messageBody));
|
||||
if (buffer.length >= 4) {
|
||||
return processIncomingData(stream, callback);
|
||||
}
|
||||
return callback();
|
||||
}, error => {
|
||||
return callback(error);
|
||||
});
|
||||
}
|
||||
//# sourceMappingURL=message_stream.js.map
|
||||
1
node_modules/mongodb/lib/cmap/message_stream.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/message_stream.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
62
node_modules/mongodb/lib/cmap/metrics.js
generated
vendored
Normal file
62
node_modules/mongodb/lib/cmap/metrics.js
generated
vendored
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ConnectionPoolMetrics = void 0;
|
||||
/** @internal */
|
||||
class ConnectionPoolMetrics {
|
||||
constructor() {
|
||||
this.txnConnections = 0;
|
||||
this.cursorConnections = 0;
|
||||
this.otherConnections = 0;
|
||||
}
|
||||
/**
|
||||
* Mark a connection as pinned for a specific operation.
|
||||
*/
|
||||
markPinned(pinType) {
|
||||
if (pinType === ConnectionPoolMetrics.TXN) {
|
||||
this.txnConnections += 1;
|
||||
}
|
||||
else if (pinType === ConnectionPoolMetrics.CURSOR) {
|
||||
this.cursorConnections += 1;
|
||||
}
|
||||
else {
|
||||
this.otherConnections += 1;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Unmark a connection as pinned for an operation.
|
||||
*/
|
||||
markUnpinned(pinType) {
|
||||
if (pinType === ConnectionPoolMetrics.TXN) {
|
||||
this.txnConnections -= 1;
|
||||
}
|
||||
else if (pinType === ConnectionPoolMetrics.CURSOR) {
|
||||
this.cursorConnections -= 1;
|
||||
}
|
||||
else {
|
||||
this.otherConnections -= 1;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Return information about the cmap metrics as a string.
|
||||
*/
|
||||
info(maxPoolSize) {
|
||||
return ('Timed out while checking out a connection from connection pool: ' +
|
||||
`maxPoolSize: ${maxPoolSize}, ` +
|
||||
`connections in use by cursors: ${this.cursorConnections}, ` +
|
||||
`connections in use by transactions: ${this.txnConnections}, ` +
|
||||
`connections in use by other operations: ${this.otherConnections}`);
|
||||
}
|
||||
/**
|
||||
* Reset the metrics to the initial values.
|
||||
*/
|
||||
reset() {
|
||||
this.txnConnections = 0;
|
||||
this.cursorConnections = 0;
|
||||
this.otherConnections = 0;
|
||||
}
|
||||
}
|
||||
exports.ConnectionPoolMetrics = ConnectionPoolMetrics;
|
||||
ConnectionPoolMetrics.TXN = 'txn';
|
||||
ConnectionPoolMetrics.CURSOR = 'cursor';
|
||||
ConnectionPoolMetrics.OTHER = 'other';
|
||||
//# sourceMappingURL=metrics.js.map
|
||||
1
node_modules/mongodb/lib/cmap/metrics.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/metrics.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"metrics.js","sourceRoot":"","sources":["../../src/cmap/metrics.ts"],"names":[],"mappings":";;;AAAA,gBAAgB;AAChB,MAAa,qBAAqB;IAAlC;QAKE,mBAAc,GAAG,CAAC,CAAC;QACnB,sBAAiB,GAAG,CAAC,CAAC;QACtB,qBAAgB,GAAG,CAAC,CAAC;IAiDvB,CAAC;IA/CC;;OAEG;IACH,UAAU,CAAC,OAAe;QACxB,IAAI,OAAO,KAAK,qBAAqB,CAAC,GAAG,EAAE;YACzC,IAAI,CAAC,cAAc,IAAI,CAAC,CAAC;SAC1B;aAAM,IAAI,OAAO,KAAK,qBAAqB,CAAC,MAAM,EAAE;YACnD,IAAI,CAAC,iBAAiB,IAAI,CAAC,CAAC;SAC7B;aAAM;YACL,IAAI,CAAC,gBAAgB,IAAI,CAAC,CAAC;SAC5B;IACH,CAAC;IAED;;OAEG;IACH,YAAY,CAAC,OAAe;QAC1B,IAAI,OAAO,KAAK,qBAAqB,CAAC,GAAG,EAAE;YACzC,IAAI,CAAC,cAAc,IAAI,CAAC,CAAC;SAC1B;aAAM,IAAI,OAAO,KAAK,qBAAqB,CAAC,MAAM,EAAE;YACnD,IAAI,CAAC,iBAAiB,IAAI,CAAC,CAAC;SAC7B;aAAM;YACL,IAAI,CAAC,gBAAgB,IAAI,CAAC,CAAC;SAC5B;IACH,CAAC;IAED;;OAEG;IACH,IAAI,CAAC,WAAmB;QACtB,OAAO,CACL,kEAAkE;YAClE,gBAAgB,WAAW,IAAI;YAC/B,kCAAkC,IAAI,CAAC,iBAAiB,IAAI;YAC5D,uCAAuC,IAAI,CAAC,cAAc,IAAI;YAC9D,2CAA2C,IAAI,CAAC,gBAAgB,EAAE,CACnE,CAAC;IACJ,CAAC;IAED;;OAEG;IACH,KAAK;QACH,IAAI,CAAC,cAAc,GAAG,CAAC,CAAC;QACxB,IAAI,CAAC,iBAAiB,GAAG,CAAC,CAAC;QAC3B,IAAI,CAAC,gBAAgB,GAAG,CAAC,CAAC;IAC5B,CAAC;;AAvDH,sDAwDC;AAvDiB,yBAAG,GAAG,KAAc,CAAC;AACrB,4BAAM,GAAG,QAAiB,CAAC;AAC3B,2BAAK,GAAG,OAAgB,CAAC"}
|
||||
51
node_modules/mongodb/lib/cmap/stream_description.js
generated
vendored
Normal file
51
node_modules/mongodb/lib/cmap/stream_description.js
generated
vendored
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.StreamDescription = void 0;
|
||||
const common_1 = require("../sdam/common");
|
||||
const server_description_1 = require("../sdam/server_description");
|
||||
const RESPONSE_FIELDS = [
|
||||
'minWireVersion',
|
||||
'maxWireVersion',
|
||||
'maxBsonObjectSize',
|
||||
'maxMessageSizeBytes',
|
||||
'maxWriteBatchSize',
|
||||
'logicalSessionTimeoutMinutes'
|
||||
];
|
||||
/** @public */
|
||||
class StreamDescription {
|
||||
constructor(address, options) {
|
||||
this.address = address;
|
||||
this.type = common_1.ServerType.Unknown;
|
||||
this.minWireVersion = undefined;
|
||||
this.maxWireVersion = undefined;
|
||||
this.maxBsonObjectSize = 16777216;
|
||||
this.maxMessageSizeBytes = 48000000;
|
||||
this.maxWriteBatchSize = 100000;
|
||||
this.logicalSessionTimeoutMinutes = options?.logicalSessionTimeoutMinutes;
|
||||
this.loadBalanced = !!options?.loadBalanced;
|
||||
this.compressors =
|
||||
options && options.compressors && Array.isArray(options.compressors)
|
||||
? options.compressors
|
||||
: [];
|
||||
}
|
||||
receiveResponse(response) {
|
||||
if (response == null) {
|
||||
return;
|
||||
}
|
||||
this.type = (0, server_description_1.parseServerType)(response);
|
||||
for (const field of RESPONSE_FIELDS) {
|
||||
if (response[field] != null) {
|
||||
this[field] = response[field];
|
||||
}
|
||||
// testing case
|
||||
if ('__nodejs_mock_server__' in response) {
|
||||
this.__nodejs_mock_server__ = response['__nodejs_mock_server__'];
|
||||
}
|
||||
}
|
||||
if (response.compression) {
|
||||
this.compressor = this.compressors.filter(c => response.compression?.includes(c))[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.StreamDescription = StreamDescription;
|
||||
//# sourceMappingURL=stream_description.js.map
|
||||
1
node_modules/mongodb/lib/cmap/stream_description.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/stream_description.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"stream_description.js","sourceRoot":"","sources":["../../src/cmap/stream_description.ts"],"names":[],"mappings":";;;AACA,2CAA4C;AAC5C,mEAA6D;AAG7D,MAAM,eAAe,GAAG;IACtB,gBAAgB;IAChB,gBAAgB;IAChB,mBAAmB;IACnB,qBAAqB;IACrB,mBAAmB;IACnB,8BAA8B;CACtB,CAAC;AASX,cAAc;AACd,MAAa,iBAAiB;IAiB5B,YAAY,OAAe,EAAE,OAAkC;QAC7D,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;QACvB,IAAI,CAAC,IAAI,GAAG,mBAAU,CAAC,OAAO,CAAC;QAC/B,IAAI,CAAC,cAAc,GAAG,SAAS,CAAC;QAChC,IAAI,CAAC,cAAc,GAAG,SAAS,CAAC;QAChC,IAAI,CAAC,iBAAiB,GAAG,QAAQ,CAAC;QAClC,IAAI,CAAC,mBAAmB,GAAG,QAAQ,CAAC;QACpC,IAAI,CAAC,iBAAiB,GAAG,MAAM,CAAC;QAChC,IAAI,CAAC,4BAA4B,GAAG,OAAO,EAAE,4BAA4B,CAAC;QAC1E,IAAI,CAAC,YAAY,GAAG,CAAC,CAAC,OAAO,EAAE,YAAY,CAAC;QAC5C,IAAI,CAAC,WAAW;YACd,OAAO,IAAI,OAAO,CAAC,WAAW,IAAI,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,WAAW,CAAC;gBAClE,CAAC,CAAC,OAAO,CAAC,WAAW;gBACrB,CAAC,CAAC,EAAE,CAAC;IACX,CAAC;IAED,eAAe,CAAC,QAAyB;QACvC,IAAI,QAAQ,IAAI,IAAI,EAAE;YACpB,OAAO;SACR;QACD,IAAI,CAAC,IAAI,GAAG,IAAA,oCAAe,EAAC,QAAQ,CAAC,CAAC;QACtC,KAAK,MAAM,KAAK,IAAI,eAAe,EAAE;YACnC,IAAI,QAAQ,CAAC,KAAK,CAAC,IAAI,IAAI,EAAE;gBAC3B,IAAI,CAAC,KAAK,CAAC,GAAG,QAAQ,CAAC,KAAK,CAAC,CAAC;aAC/B;YAED,eAAe;YACf,IAAI,wBAAwB,IAAI,QAAQ,EAAE;gBACxC,IAAI,CAAC,sBAAsB,GAAG,QAAQ,CAAC,wBAAwB,CAAC,CAAC;aAClE;SACF;QAED,IAAI,QAAQ,CAAC,WAAW,EAAE;YACxB,IAAI,CAAC,UAAU,GAAG,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,QAAQ,CAAC,WAAW,EAAE,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;SACtF;IACH,CAAC;CACF;AArDD,8CAqDC"}
|
||||
81
node_modules/mongodb/lib/cmap/wire_protocol/compression.js
generated
vendored
Normal file
81
node_modules/mongodb/lib/cmap/wire_protocol/compression.js
generated
vendored
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.decompress = exports.compress = exports.uncompressibleCommands = exports.Compressor = void 0;
|
||||
const util_1 = require("util");
|
||||
const zlib = require("zlib");
|
||||
const constants_1 = require("../../constants");
|
||||
const deps_1 = require("../../deps");
|
||||
const error_1 = require("../../error");
|
||||
/** @public */
|
||||
exports.Compressor = Object.freeze({
|
||||
none: 0,
|
||||
snappy: 1,
|
||||
zlib: 2,
|
||||
zstd: 3
|
||||
});
|
||||
exports.uncompressibleCommands = new Set([
|
||||
constants_1.LEGACY_HELLO_COMMAND,
|
||||
'saslStart',
|
||||
'saslContinue',
|
||||
'getnonce',
|
||||
'authenticate',
|
||||
'createUser',
|
||||
'updateUser',
|
||||
'copydbSaslStart',
|
||||
'copydbgetnonce',
|
||||
'copydb'
|
||||
]);
|
||||
const ZSTD_COMPRESSION_LEVEL = 3;
|
||||
const zlibInflate = (0, util_1.promisify)(zlib.inflate.bind(zlib));
|
||||
const zlibDeflate = (0, util_1.promisify)(zlib.deflate.bind(zlib));
|
||||
// Facilitate compressing a message using an agreed compressor
|
||||
async function compress(options, dataToBeCompressed) {
|
||||
const zlibOptions = {};
|
||||
switch (options.agreedCompressor) {
|
||||
case 'snappy':
|
||||
if ('kModuleError' in deps_1.Snappy) {
|
||||
throw deps_1.Snappy['kModuleError'];
|
||||
}
|
||||
return deps_1.Snappy.compress(dataToBeCompressed);
|
||||
case 'zstd':
|
||||
if ('kModuleError' in deps_1.ZStandard) {
|
||||
throw deps_1.ZStandard['kModuleError'];
|
||||
}
|
||||
return deps_1.ZStandard.compress(dataToBeCompressed, ZSTD_COMPRESSION_LEVEL);
|
||||
case 'zlib':
|
||||
if (options.zlibCompressionLevel) {
|
||||
zlibOptions.level = options.zlibCompressionLevel;
|
||||
}
|
||||
return zlibDeflate(dataToBeCompressed, zlibOptions);
|
||||
default:
|
||||
throw new error_1.MongoInvalidArgumentError(`Unknown compressor ${options.agreedCompressor} failed to compress`);
|
||||
}
|
||||
}
|
||||
exports.compress = compress;
|
||||
// Decompress a message using the given compressor
|
||||
async function decompress(compressorID, compressedData) {
|
||||
if (compressorID !== exports.Compressor.snappy &&
|
||||
compressorID !== exports.Compressor.zstd &&
|
||||
compressorID !== exports.Compressor.zlib &&
|
||||
compressorID !== exports.Compressor.none) {
|
||||
throw new error_1.MongoDecompressionError(`Server sent message compressed using an unsupported compressor. (Received compressor ID ${compressorID})`);
|
||||
}
|
||||
switch (compressorID) {
|
||||
case exports.Compressor.snappy:
|
||||
if ('kModuleError' in deps_1.Snappy) {
|
||||
throw deps_1.Snappy['kModuleError'];
|
||||
}
|
||||
return deps_1.Snappy.uncompress(compressedData, { asBuffer: true });
|
||||
case exports.Compressor.zstd:
|
||||
if ('kModuleError' in deps_1.ZStandard) {
|
||||
throw deps_1.ZStandard['kModuleError'];
|
||||
}
|
||||
return deps_1.ZStandard.decompress(compressedData);
|
||||
case exports.Compressor.zlib:
|
||||
return zlibInflate(compressedData);
|
||||
default:
|
||||
return compressedData;
|
||||
}
|
||||
}
|
||||
exports.decompress = decompress;
|
||||
//# sourceMappingURL=compression.js.map
|
||||
1
node_modules/mongodb/lib/cmap/wire_protocol/compression.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/wire_protocol/compression.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"compression.js","sourceRoot":"","sources":["../../../src/cmap/wire_protocol/compression.ts"],"names":[],"mappings":";;;AAAA,+BAAiC;AACjC,6BAA6B;AAE7B,+CAAuD;AACvD,qCAA+C;AAC/C,uCAAiF;AAEjF,cAAc;AACD,QAAA,UAAU,GAAG,MAAM,CAAC,MAAM,CAAC;IACtC,IAAI,EAAE,CAAC;IACP,MAAM,EAAE,CAAC;IACT,IAAI,EAAE,CAAC;IACP,IAAI,EAAE,CAAC;CACC,CAAC,CAAC;AAQC,QAAA,sBAAsB,GAAG,IAAI,GAAG,CAAC;IAC5C,gCAAoB;IACpB,WAAW;IACX,cAAc;IACd,UAAU;IACV,cAAc;IACd,YAAY;IACZ,YAAY;IACZ,iBAAiB;IACjB,gBAAgB;IAChB,QAAQ;CACT,CAAC,CAAC;AAEH,MAAM,sBAAsB,GAAG,CAAC,CAAC;AAEjC,MAAM,WAAW,GAAG,IAAA,gBAAS,EAAC,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;AACvD,MAAM,WAAW,GAAG,IAAA,gBAAS,EAAC,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;AAEvD,8DAA8D;AACvD,KAAK,UAAU,QAAQ,CAC5B,OAA2E,EAC3E,kBAA0B;IAE1B,MAAM,WAAW,GAAG,EAAsB,CAAC;IAC3C,QAAQ,OAAO,CAAC,gBAAgB,EAAE;QAChC,KAAK,QAAQ;YACX,IAAI,cAAc,IAAI,aAAM,EAAE;gBAC5B,MAAM,aAAM,CAAC,cAAc,CAAC,CAAC;aAC9B;YACD,OAAO,aAAM,CAAC,QAAQ,CAAC,kBAAkB,CAAC,CAAC;QAE7C,KAAK,MAAM;YACT,IAAI,cAAc,IAAI,gBAAS,EAAE;gBAC/B,MAAM,gBAAS,CAAC,cAAc,CAAC,CAAC;aACjC;YACD,OAAO,gBAAS,CAAC,QAAQ,CAAC,kBAAkB,EAAE,sBAAsB,CAAC,CAAC;QAExE,KAAK,MAAM;YACT,IAAI,OAAO,CAAC,oBAAoB,EAAE;gBAChC,WAAW,CAAC,KAAK,GAAG,OAAO,CAAC,oBAAoB,CAAC;aAClD;YACD,OAAO,WAAW,CAAC,kBAAkB,EAAE,WAAW,CAAC,CAAC;QAEtD;YACE,MAAM,IAAI,iCAAyB,CACjC,sBAAsB,OAAO,CAAC,gBAAgB,qBAAqB,CACpE,CAAC;KACL;AACH,CAAC;AA7BD,4BA6BC;AAED,kDAAkD;AAC3C,KAAK,UAAU,UAAU,CAAC,YAAoB,EAAE,cAAsB;IAC3E,IACE,YAAY,KAAK,kBAAU,CAAC,MAAM;QAClC,YAAY,KAAK,kBAAU,CAAC,IAAI;QAChC,YAAY,KAAK,kBAAU,CAAC,IAAI;QAChC,YAAY,KAAK,kBAAU,CAAC,IAAI,EAChC;QACA,MAAM,IAAI,+BAAuB,CAC/B,2FAA2F,YAAY,GAAG,CAC3G,CAAC;KACH;IAED,QAAQ,YAAY,EAAE;QACpB,KAAK,kBAAU,CAAC,MAAM;YACpB,IAAI,cAAc,IAAI,aAAM,EAAE;gBAC5B,MAAM,aAAM,CAAC,cAAc,CAAC,CAAC;aAC9B;YACD,OAAO,aAAM,CAAC,UAAU,CAAC,cAAc,EAAE,EAAE,QAAQ,EAAE,IAAI,EAAE,CAAC,CAAC;QAE/D,KAAK,kBAAU,CAAC,IAAI;YAClB,IAAI,cAAc,IAAI,gBAAS,EAAE;gBAC/B,MAAM,gBAAS,CAAC,cAAc,CAAC,CAAC;aACjC;YACD,OAAO,gBAAS,CAAC,UAAU,CAAC,cAAc,CAAC,CAAC;QAE9C,KAAK,kBAAU,CAAC,IAAI;YAClB,OAAO,WAAW,CAAC,cAAc,CAAC,CAAC;QAErC;YACE,OAAO,cAAc,CAAC;KACzB;AACH,CAAC;AA/BD,gCA+BC"}
|
||||
15
node_modules/mongodb/lib/cmap/wire_protocol/constants.js
generated
vendored
Normal file
15
node_modules/mongodb/lib/cmap/wire_protocol/constants.js
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.OP_MSG = exports.OP_COMPRESSED = exports.OP_DELETE = exports.OP_QUERY = exports.OP_INSERT = exports.OP_UPDATE = exports.OP_REPLY = exports.MAX_SUPPORTED_WIRE_VERSION = exports.MIN_SUPPORTED_WIRE_VERSION = exports.MAX_SUPPORTED_SERVER_VERSION = exports.MIN_SUPPORTED_SERVER_VERSION = void 0;
|
||||
exports.MIN_SUPPORTED_SERVER_VERSION = '3.6';
|
||||
exports.MAX_SUPPORTED_SERVER_VERSION = '6.0';
|
||||
exports.MIN_SUPPORTED_WIRE_VERSION = 6;
|
||||
exports.MAX_SUPPORTED_WIRE_VERSION = 17;
|
||||
exports.OP_REPLY = 1;
|
||||
exports.OP_UPDATE = 2001;
|
||||
exports.OP_INSERT = 2002;
|
||||
exports.OP_QUERY = 2004;
|
||||
exports.OP_DELETE = 2006;
|
||||
exports.OP_COMPRESSED = 2012;
|
||||
exports.OP_MSG = 2013;
|
||||
//# sourceMappingURL=constants.js.map
|
||||
1
node_modules/mongodb/lib/cmap/wire_protocol/constants.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/wire_protocol/constants.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"constants.js","sourceRoot":"","sources":["../../../src/cmap/wire_protocol/constants.ts"],"names":[],"mappings":";;;AAAa,QAAA,4BAA4B,GAAG,KAAK,CAAC;AACrC,QAAA,4BAA4B,GAAG,KAAK,CAAC;AACrC,QAAA,0BAA0B,GAAG,CAAC,CAAC;AAC/B,QAAA,0BAA0B,GAAG,EAAE,CAAC;AAChC,QAAA,QAAQ,GAAG,CAAC,CAAC;AACb,QAAA,SAAS,GAAG,IAAI,CAAC;AACjB,QAAA,SAAS,GAAG,IAAI,CAAC;AACjB,QAAA,QAAQ,GAAG,IAAI,CAAC;AAChB,QAAA,SAAS,GAAG,IAAI,CAAC;AACjB,QAAA,aAAa,GAAG,IAAI,CAAC;AACrB,QAAA,MAAM,GAAG,IAAI,CAAC"}
|
||||
55
node_modules/mongodb/lib/cmap/wire_protocol/shared.js
generated
vendored
Normal file
55
node_modules/mongodb/lib/cmap/wire_protocol/shared.js
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isSharded = exports.applyCommonQueryOptions = exports.getReadPreference = void 0;
|
||||
const error_1 = require("../../error");
|
||||
const read_preference_1 = require("../../read_preference");
|
||||
const common_1 = require("../../sdam/common");
|
||||
const topology_description_1 = require("../../sdam/topology_description");
|
||||
function getReadPreference(cmd, options) {
|
||||
// Default to command version of the readPreference
|
||||
let readPreference = cmd.readPreference || read_preference_1.ReadPreference.primary;
|
||||
// If we have an option readPreference override the command one
|
||||
if (options?.readPreference) {
|
||||
readPreference = options.readPreference;
|
||||
}
|
||||
if (typeof readPreference === 'string') {
|
||||
readPreference = read_preference_1.ReadPreference.fromString(readPreference);
|
||||
}
|
||||
if (!(readPreference instanceof read_preference_1.ReadPreference)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Option "readPreference" must be a ReadPreference instance');
|
||||
}
|
||||
return readPreference;
|
||||
}
|
||||
exports.getReadPreference = getReadPreference;
|
||||
function applyCommonQueryOptions(queryOptions, options) {
|
||||
Object.assign(queryOptions, {
|
||||
raw: typeof options.raw === 'boolean' ? options.raw : false,
|
||||
promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true,
|
||||
promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true,
|
||||
promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false,
|
||||
bsonRegExp: typeof options.bsonRegExp === 'boolean' ? options.bsonRegExp : false,
|
||||
enableUtf8Validation: typeof options.enableUtf8Validation === 'boolean' ? options.enableUtf8Validation : true
|
||||
});
|
||||
if (options.session) {
|
||||
queryOptions.session = options.session;
|
||||
}
|
||||
return queryOptions;
|
||||
}
|
||||
exports.applyCommonQueryOptions = applyCommonQueryOptions;
|
||||
function isSharded(topologyOrServer) {
|
||||
if (topologyOrServer == null) {
|
||||
return false;
|
||||
}
|
||||
if (topologyOrServer.description && topologyOrServer.description.type === common_1.ServerType.Mongos) {
|
||||
return true;
|
||||
}
|
||||
// NOTE: This is incredibly inefficient, and should be removed once command construction
|
||||
// happens based on `Server` not `Topology`.
|
||||
if (topologyOrServer.description && topologyOrServer.description instanceof topology_description_1.TopologyDescription) {
|
||||
const servers = Array.from(topologyOrServer.description.servers.values());
|
||||
return servers.some((server) => server.type === common_1.ServerType.Mongos);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
exports.isSharded = isSharded;
|
||||
//# sourceMappingURL=shared.js.map
|
||||
1
node_modules/mongodb/lib/cmap/wire_protocol/shared.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/cmap/wire_protocol/shared.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"shared.js","sourceRoot":"","sources":["../../../src/cmap/wire_protocol/shared.ts"],"names":[],"mappings":";;;AACA,uCAAwD;AAExD,2DAAuD;AACvD,8CAA+C;AAI/C,0EAAsE;AAQtE,SAAgB,iBAAiB,CAAC,GAAa,EAAE,OAA8B;IAC7E,mDAAmD;IACnD,IAAI,cAAc,GAAG,GAAG,CAAC,cAAc,IAAI,gCAAc,CAAC,OAAO,CAAC;IAClE,+DAA+D;IAC/D,IAAI,OAAO,EAAE,cAAc,EAAE;QAC3B,cAAc,GAAG,OAAO,CAAC,cAAc,CAAC;KACzC;IAED,IAAI,OAAO,cAAc,KAAK,QAAQ,EAAE;QACtC,cAAc,GAAG,gCAAc,CAAC,UAAU,CAAC,cAAc,CAAC,CAAC;KAC5D;IAED,IAAI,CAAC,CAAC,cAAc,YAAY,gCAAc,CAAC,EAAE;QAC/C,MAAM,IAAI,iCAAyB,CACjC,2DAA2D,CAC5D,CAAC;KACH;IAED,OAAO,cAAc,CAAC;AACxB,CAAC;AAnBD,8CAmBC;AAED,SAAgB,uBAAuB,CACrC,YAA4B,EAC5B,OAAuB;IAEvB,MAAM,CAAC,MAAM,CAAC,YAAY,EAAE;QAC1B,GAAG,EAAE,OAAO,OAAO,CAAC,GAAG,KAAK,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK;QAC3D,YAAY,EAAE,OAAO,OAAO,CAAC,YAAY,KAAK,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC,YAAY,CAAC,CAAC,CAAC,IAAI;QACrF,aAAa,EAAE,OAAO,OAAO,CAAC,aAAa,KAAK,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC,aAAa,CAAC,CAAC,CAAC,IAAI;QACxF,cAAc,EAAE,OAAO,OAAO,CAAC,cAAc,KAAK,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC,cAAc,CAAC,CAAC,CAAC,KAAK;QAC5F,UAAU,EAAE,OAAO,OAAO,CAAC,UAAU,KAAK,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC,UAAU,CAAC,CAAC,CAAC,KAAK;QAChF,oBAAoB,EAClB,OAAO,OAAO,CAAC,oBAAoB,KAAK,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC,oBAAoB,CAAC,CAAC,CAAC,IAAI;KAC1F,CAAC,CAAC;IAEH,IAAI,OAAO,CAAC,OAAO,EAAE;QACnB,YAAY,CAAC,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC;KACxC;IAED,OAAO,YAAY,CAAC;AACtB,CAAC;AAnBD,0DAmBC;AAED,SAAgB,SAAS,CAAC,gBAAiD;IACzE,IAAI,gBAAgB,IAAI,IAAI,EAAE;QAC5B,OAAO,KAAK,CAAC;KACd;IAED,IAAI,gBAAgB,CAAC,WAAW,IAAI,gBAAgB,CAAC,WAAW,CAAC,IAAI,KAAK,mBAAU,CAAC,MAAM,EAAE;QAC3F,OAAO,IAAI,CAAC;KACb;IAED,wFAAwF;IACxF,kDAAkD;IAClD,IAAI,gBAAgB,CAAC,WAAW,IAAI,gBAAgB,CAAC,WAAW,YAAY,0CAAmB,EAAE;QAC/F,MAAM,OAAO,GAAwB,KAAK,CAAC,IAAI,CAAC,gBAAgB,CAAC,WAAW,CAAC,OAAO,CAAC,MAAM,EAAE,CAAC,CAAC;QAC/F,OAAO,OAAO,CAAC,IAAI,CAAC,CAAC,MAAyB,EAAE,EAAE,CAAC,MAAM,CAAC,IAAI,KAAK,mBAAU,CAAC,MAAM,CAAC,CAAC;KACvF;IAED,OAAO,KAAK,CAAC;AACf,CAAC;AAjBD,8BAiBC"}
|
||||
Loading…
Add table
Add a link
Reference in a new issue