Initial commit
* Examples work * setup.py kinda updasted * Fork of txmongo but with new pymongo embedded
This commit is contained in:
95
asyncio_mongo/_pymongo/__init__.py
Normal file
95
asyncio_mongo/_pymongo/__init__.py
Normal file
@@ -0,0 +1,95 @@
|
||||
# Copyright 2009-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Python driver for MongoDB."""
|
||||
|
||||
|
||||
ASCENDING = 1
|
||||
"""Ascending sort order."""
|
||||
DESCENDING = -1
|
||||
"""Descending sort order."""
|
||||
|
||||
GEO2D = "2d"
|
||||
"""Index specifier for a 2-dimensional `geospatial index`_.
|
||||
|
||||
.. versionadded:: 1.5.1
|
||||
|
||||
.. note:: Geo-spatial indexing requires server version **>= 1.3.3**.
|
||||
|
||||
.. _geospatial index: http://docs.mongodb.org/manual/core/geospatial-indexes/
|
||||
"""
|
||||
|
||||
GEOHAYSTACK = "geoHaystack"
|
||||
"""Index specifier for a 2-dimensional `haystack index`_.
|
||||
|
||||
.. versionadded:: 2.1
|
||||
|
||||
.. note:: Geo-spatial indexing requires server version **>= 1.5.6**.
|
||||
|
||||
.. _haystack index: http://docs.mongodb.org/manual/core/geospatial-indexes/#haystack-indexes
|
||||
"""
|
||||
|
||||
GEOSPHERE = "2dsphere"
|
||||
"""Index specifier for a `spherical geospatial index`_.
|
||||
|
||||
.. versionadded:: 2.5
|
||||
|
||||
.. note:: 2dsphere indexing requires server version **>= 2.4.0**.
|
||||
|
||||
.. _spherical geospatial index: http://docs.mongodb.org/manual/release-notes/2.4/#new-geospatial-indexes-with-geojson-and-improved-spherical-geometry
|
||||
"""
|
||||
|
||||
HASHED = "hashed"
|
||||
"""Index specifier for a `hashed index`_.
|
||||
|
||||
.. versionadded:: 2.5
|
||||
|
||||
.. note:: hashed indexing requires server version **>= 2.4.0**.
|
||||
|
||||
.. _hashed index: http://docs.mongodb.org/manual/release-notes/2.4/#new-hashed-index-and-sharding-with-a-hashed-shard-key
|
||||
"""
|
||||
|
||||
OFF = 0
|
||||
"""No database profiling."""
|
||||
SLOW_ONLY = 1
|
||||
"""Only profile slow operations."""
|
||||
ALL = 2
|
||||
"""Profile all operations."""
|
||||
|
||||
version_tuple = (2, 6, 3)
|
||||
|
||||
def get_version_string():
|
||||
if isinstance(version_tuple[-1], str):
|
||||
return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1]
|
||||
return '.'.join(map(str, version_tuple))
|
||||
|
||||
version = get_version_string()
|
||||
"""Current version of PyMongo."""
|
||||
|
||||
from asyncio_mongo._pymongo.connection import Connection
|
||||
from asyncio_mongo._pymongo.mongo_client import MongoClient
|
||||
from asyncio_mongo._pymongo.mongo_replica_set_client import MongoReplicaSetClient
|
||||
from asyncio_mongo._pymongo.replica_set_connection import ReplicaSetConnection
|
||||
from asyncio_mongo._pymongo.read_preferences import ReadPreference
|
||||
|
||||
def has_c():
|
||||
"""Is the C extension installed?
|
||||
|
||||
.. versionadded:: 1.5
|
||||
"""
|
||||
try:
|
||||
from asyncio_mongo._pymongo import _cmessage
|
||||
return True
|
||||
except ImportError:
|
||||
return False
|
||||
BIN
asyncio_mongo/_pymongo/_cmessage.so
Executable file
BIN
asyncio_mongo/_pymongo/_cmessage.so
Executable file
Binary file not shown.
215
asyncio_mongo/_pymongo/auth.py
Normal file
215
asyncio_mongo/_pymongo/auth.py
Normal file
@@ -0,0 +1,215 @@
|
||||
# Copyright 2013 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Authentication helpers."""
|
||||
|
||||
try:
|
||||
import hashlib
|
||||
_MD5 = hashlib.md5
|
||||
except ImportError: # for Python < 2.5
|
||||
import md5
|
||||
_MD5 = md5.new
|
||||
|
||||
HAVE_KERBEROS = True
|
||||
try:
|
||||
import kerberos
|
||||
except ImportError:
|
||||
HAVE_KERBEROS = False
|
||||
|
||||
from asyncio_mongo._bson.binary import Binary
|
||||
from asyncio_mongo._bson.son import SON
|
||||
from asyncio_mongo._pymongo.errors import ConfigurationError, OperationFailure
|
||||
|
||||
|
||||
MECHANISMS = ('GSSAPI', 'MONGODB-CR', 'MONGODB-X509', 'PLAIN')
|
||||
"""The authentication mechanisms supported by PyMongo."""
|
||||
|
||||
|
||||
def _build_credentials_tuple(mech, source, user, passwd, extra):
|
||||
"""Build and return a mechanism specific credentials tuple.
|
||||
"""
|
||||
if mech == 'GSSAPI':
|
||||
gsn = extra.get('gssapiservicename', 'mongodb')
|
||||
# No password, source is always $external.
|
||||
return (mech, '$external', user, gsn)
|
||||
elif mech == 'MONGODB-X509':
|
||||
return (mech, '$external', user)
|
||||
return (mech, source, user, passwd)
|
||||
|
||||
|
||||
def _password_digest(username, password):
|
||||
"""Get a password digest to use for authentication.
|
||||
"""
|
||||
if not isinstance(password, str):
|
||||
raise TypeError("password must be an instance "
|
||||
"of %s" % (str.__name__,))
|
||||
if len(password) == 0:
|
||||
raise TypeError("password can't be empty")
|
||||
if not isinstance(username, str):
|
||||
raise TypeError("username must be an instance "
|
||||
"of %s" % (str.__name__,))
|
||||
|
||||
md5hash = _MD5()
|
||||
data = "%s:mongo:%s" % (username, password)
|
||||
md5hash.update(data.encode('utf-8'))
|
||||
return str(md5hash.hexdigest())
|
||||
|
||||
|
||||
def _auth_key(nonce, username, password):
|
||||
"""Get an auth key to use for authentication.
|
||||
"""
|
||||
digest = _password_digest(username, password)
|
||||
md5hash = _MD5()
|
||||
data = "%s%s%s" % (nonce, str(username), digest)
|
||||
md5hash.update(data.encode('utf-8'))
|
||||
return str(md5hash.hexdigest())
|
||||
|
||||
|
||||
def _authenticate_gssapi(credentials, sock_info, cmd_func):
|
||||
"""Authenticate using GSSAPI.
|
||||
"""
|
||||
try:
|
||||
dummy, username, gsn = credentials
|
||||
# Starting here and continuing through the while loop below - establish
|
||||
# the security context. See RFC 4752, Section 3.1, first paragraph.
|
||||
result, ctx = kerberos.authGSSClientInit(gsn + '@' + sock_info.host,
|
||||
kerberos.GSS_C_MUTUAL_FLAG)
|
||||
if result != kerberos.AUTH_GSS_COMPLETE:
|
||||
raise OperationFailure('Kerberos context failed to initialize.')
|
||||
|
||||
try:
|
||||
# pykerberos uses a weird mix of exceptions and return values
|
||||
# to indicate errors.
|
||||
# 0 == continue, 1 == complete, -1 == error
|
||||
# Only authGSSClientStep can return 0.
|
||||
if kerberos.authGSSClientStep(ctx, '') != 0:
|
||||
raise OperationFailure('Unknown kerberos '
|
||||
'failure in step function.')
|
||||
|
||||
# Start a SASL conversation with mongod/s
|
||||
# Note: pykerberos deals with base64 encoded byte strings.
|
||||
# Since mongo accepts base64 strings as the payload we don't
|
||||
# have to use bson.binary.Binary.
|
||||
payload = kerberos.authGSSClientResponse(ctx)
|
||||
cmd = SON([('saslStart', 1),
|
||||
('mechanism', 'GSSAPI'),
|
||||
('payload', payload),
|
||||
('autoAuthorize', 1)])
|
||||
response, _ = cmd_func(sock_info, '$external', cmd)
|
||||
|
||||
# Limit how many times we loop to catch protocol / library issues
|
||||
for _ in range(10):
|
||||
result = kerberos.authGSSClientStep(ctx,
|
||||
str(response['payload']))
|
||||
if result == -1:
|
||||
raise OperationFailure('Unknown kerberos '
|
||||
'failure in step function.')
|
||||
|
||||
payload = kerberos.authGSSClientResponse(ctx) or ''
|
||||
|
||||
cmd = SON([('saslContinue', 1),
|
||||
('conversationId', response['conversationId']),
|
||||
('payload', payload)])
|
||||
response, _ = cmd_func(sock_info, '$external', cmd)
|
||||
|
||||
if result == kerberos.AUTH_GSS_COMPLETE:
|
||||
break
|
||||
else:
|
||||
raise OperationFailure('Kerberos '
|
||||
'authentication failed to complete.')
|
||||
|
||||
# Once the security context is established actually authenticate.
|
||||
# See RFC 4752, Section 3.1, last two paragraphs.
|
||||
if kerberos.authGSSClientUnwrap(ctx,
|
||||
str(response['payload'])) != 1:
|
||||
raise OperationFailure('Unknown kerberos '
|
||||
'failure during GSS_Unwrap step.')
|
||||
|
||||
if kerberos.authGSSClientWrap(ctx,
|
||||
kerberos.authGSSClientResponse(ctx),
|
||||
username) != 1:
|
||||
raise OperationFailure('Unknown kerberos '
|
||||
'failure during GSS_Wrap step.')
|
||||
|
||||
payload = kerberos.authGSSClientResponse(ctx)
|
||||
cmd = SON([('saslContinue', 1),
|
||||
('conversationId', response['conversationId']),
|
||||
('payload', payload)])
|
||||
response, _ = cmd_func(sock_info, '$external', cmd)
|
||||
|
||||
finally:
|
||||
kerberos.authGSSClientClean(ctx)
|
||||
|
||||
except kerberos.KrbError as exc:
|
||||
raise OperationFailure(str(exc))
|
||||
|
||||
|
||||
def _authenticate_plain(credentials, sock_info, cmd_func):
|
||||
"""Authenticate using SASL PLAIN (RFC 4616)
|
||||
"""
|
||||
source, username, password = credentials
|
||||
payload = ('\x00%s\x00%s' % (username, password)).encode('utf-8')
|
||||
cmd = SON([('saslStart', 1),
|
||||
('mechanism', 'PLAIN'),
|
||||
('payload', Binary(payload)),
|
||||
('autoAuthorize', 1)])
|
||||
cmd_func(sock_info, source, cmd)
|
||||
|
||||
|
||||
def _authenticate_x509(credentials, sock_info, cmd_func):
|
||||
"""Authenticate using MONGODB-X509.
|
||||
"""
|
||||
dummy, username = credentials
|
||||
query = SON([('authenticate', 1),
|
||||
('mechanism', 'MONGODB-X509'),
|
||||
('user', username)])
|
||||
cmd_func(sock_info, '$external', query)
|
||||
|
||||
|
||||
def _authenticate_mongo_cr(credentials, sock_info, cmd_func):
|
||||
"""Authenticate using MONGODB-CR.
|
||||
"""
|
||||
source, username, password = credentials
|
||||
# Get a nonce
|
||||
response, _ = cmd_func(sock_info, source, {'getnonce': 1})
|
||||
nonce = response['nonce']
|
||||
key = _auth_key(nonce, username, password)
|
||||
|
||||
# Actually authenticate
|
||||
query = SON([('authenticate', 1),
|
||||
('user', username),
|
||||
('nonce', nonce),
|
||||
('key', key)])
|
||||
cmd_func(sock_info, source, query)
|
||||
|
||||
|
||||
_AUTH_MAP = {
|
||||
'GSSAPI': _authenticate_gssapi,
|
||||
'MONGODB-CR': _authenticate_mongo_cr,
|
||||
'MONGODB-X509': _authenticate_x509,
|
||||
'PLAIN': _authenticate_plain,
|
||||
}
|
||||
|
||||
|
||||
def authenticate(credentials, sock_info, cmd_func):
|
||||
"""Authenticate sock_info.
|
||||
"""
|
||||
mechanism = credentials[0]
|
||||
if mechanism == 'GSSAPI':
|
||||
if not HAVE_KERBEROS:
|
||||
raise ConfigurationError('The "kerberos" module must be '
|
||||
'installed to use GSSAPI authentication.')
|
||||
auth_func = _AUTH_MAP.get(mechanism)
|
||||
auth_func(credentials[1:], sock_info, cmd_func)
|
||||
|
||||
1489
asyncio_mongo/_pymongo/collection.py
Normal file
1489
asyncio_mongo/_pymongo/collection.py
Normal file
File diff suppressed because it is too large
Load Diff
646
asyncio_mongo/_pymongo/common.py
Normal file
646
asyncio_mongo/_pymongo/common.py
Normal file
@@ -0,0 +1,646 @@
|
||||
# Copyright 2011-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you
|
||||
# may not use this file except in compliance with the License. You
|
||||
# may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied. See the License for the specific language governing
|
||||
# permissions and limitations under the License.
|
||||
|
||||
|
||||
"""Functions and classes common to multiple pymongo modules."""
|
||||
import sys
|
||||
import warnings
|
||||
from asyncio_mongo._pymongo import read_preferences
|
||||
|
||||
from asyncio_mongo._pymongo.auth import MECHANISMS
|
||||
from asyncio_mongo._pymongo.read_preferences import ReadPreference
|
||||
from asyncio_mongo._pymongo.errors import ConfigurationError
|
||||
|
||||
HAS_SSL = True
|
||||
try:
|
||||
import ssl
|
||||
except ImportError:
|
||||
HAS_SSL = False
|
||||
|
||||
|
||||
# Jython 2.7 includes an incomplete ssl module. See PYTHON-498.
|
||||
if sys.platform.startswith('java'):
|
||||
HAS_SSL = False
|
||||
|
||||
|
||||
def raise_config_error(key, dummy):
|
||||
"""Raise ConfigurationError with the given key name."""
|
||||
raise ConfigurationError("Unknown option %s" % (key,))
|
||||
|
||||
|
||||
def validate_boolean(option, value):
|
||||
"""Validates that 'value' is 'true' or 'false'.
|
||||
"""
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
elif isinstance(value, str):
|
||||
if value not in ('true', 'false'):
|
||||
raise ConfigurationError("The value of %s must be "
|
||||
"'true' or 'false'" % (option,))
|
||||
return value == 'true'
|
||||
raise TypeError("Wrong type for %s, value must be a boolean" % (option,))
|
||||
|
||||
|
||||
def validate_integer(option, value):
|
||||
"""Validates that 'value' is an integer (or basestring representation).
|
||||
"""
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
elif isinstance(value, str):
|
||||
if not value.isdigit():
|
||||
raise ConfigurationError("The value of %s must be "
|
||||
"an integer" % (option,))
|
||||
return int(value)
|
||||
raise TypeError("Wrong type for %s, value must be an integer" % (option,))
|
||||
|
||||
|
||||
def validate_positive_integer(option, value):
|
||||
"""Validate that 'value' is a positive integer.
|
||||
"""
|
||||
val = validate_integer(option, value)
|
||||
if val < 0:
|
||||
raise ConfigurationError("The value of %s must be "
|
||||
"a positive integer" % (option,))
|
||||
return val
|
||||
|
||||
|
||||
def validate_readable(option, value):
|
||||
"""Validates that 'value' is file-like and readable.
|
||||
"""
|
||||
# First make sure its a string py3.3 open(True, 'r') succeeds
|
||||
# Used in ssl cert checking due to poor ssl module error reporting
|
||||
value = validate_basestring(option, value)
|
||||
open(value, 'r').close()
|
||||
return value
|
||||
|
||||
|
||||
def validate_cert_reqs(option, value):
|
||||
"""Validate the cert reqs are valid. It must be None or one of the three
|
||||
values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or ``ssl.CERT_REQUIRED``"""
|
||||
if value is None:
|
||||
return value
|
||||
if HAS_SSL:
|
||||
if value in (ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED):
|
||||
return value
|
||||
raise ConfigurationError("The value of %s must be one of: "
|
||||
"`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or "
|
||||
"`ssl.CERT_REQUIRED" % (option,))
|
||||
else:
|
||||
raise ConfigurationError("The value of %s is set but can't be "
|
||||
"validated. The ssl module is not available"
|
||||
% (option,))
|
||||
|
||||
|
||||
def validate_positive_integer_or_none(option, value):
|
||||
"""Validate that 'value' is a positive integer or None.
|
||||
"""
|
||||
if value is None:
|
||||
return value
|
||||
return validate_positive_integer(option, value)
|
||||
|
||||
|
||||
def validate_basestring(option, value):
|
||||
"""Validates that 'value' is an instance of `basestring`.
|
||||
"""
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
raise TypeError("Wrong type for %s, value must be an "
|
||||
"instance of %s" % (option, str.__name__))
|
||||
|
||||
|
||||
def validate_int_or_basestring(option, value):
|
||||
"""Validates that 'value' is an integer or string.
|
||||
"""
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
elif isinstance(value, str):
|
||||
if value.isdigit():
|
||||
return int(value)
|
||||
return value
|
||||
raise TypeError("Wrong type for %s, value must be an "
|
||||
"integer or a string" % (option,))
|
||||
|
||||
|
||||
def validate_positive_float(option, value):
|
||||
"""Validates that 'value' is a float, or can be converted to one, and is
|
||||
positive.
|
||||
"""
|
||||
err = ConfigurationError("%s must be a positive int or float" % (option,))
|
||||
try:
|
||||
value = float(value)
|
||||
except (ValueError, TypeError):
|
||||
raise err
|
||||
|
||||
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
|
||||
# one billion - this is a reasonable approximation for infinity
|
||||
if not 0 < value < 1e9:
|
||||
raise err
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def validate_timeout_or_none(option, value):
|
||||
"""Validates a timeout specified in milliseconds returning
|
||||
a value in floating point seconds.
|
||||
"""
|
||||
if value is None:
|
||||
return value
|
||||
return validate_positive_float(option, value) / 1000.0
|
||||
|
||||
|
||||
def validate_read_preference(dummy, value):
|
||||
"""Validate read preference for a ReplicaSetConnection.
|
||||
"""
|
||||
if value in read_preferences.modes:
|
||||
return value
|
||||
|
||||
# Also allow string form of enum for uri_parser
|
||||
try:
|
||||
return read_preferences.mongos_enum(value)
|
||||
except ValueError:
|
||||
raise ConfigurationError("Not a valid read preference")
|
||||
|
||||
|
||||
def validate_tag_sets(dummy, value):
|
||||
"""Validate tag sets for a ReplicaSetConnection.
|
||||
"""
|
||||
if value is None:
|
||||
return [{}]
|
||||
|
||||
if not isinstance(value, list):
|
||||
raise ConfigurationError((
|
||||
"Tag sets %s invalid, must be a list" ) % repr(value))
|
||||
if len(value) == 0:
|
||||
raise ConfigurationError((
|
||||
"Tag sets %s invalid, must be None or contain at least one set of"
|
||||
" tags") % repr(value))
|
||||
|
||||
for tags in value:
|
||||
if not isinstance(tags, dict):
|
||||
raise ConfigurationError(
|
||||
"Tag set %s invalid, must be a dict" % repr(tags))
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def validate_auth_mechanism(option, value):
|
||||
"""Validate the authMechanism URI option.
|
||||
"""
|
||||
if value not in MECHANISMS:
|
||||
raise ConfigurationError("%s must be in "
|
||||
"%s" % (option, MECHANISMS))
|
||||
return value
|
||||
|
||||
|
||||
# jounal is an alias for j,
|
||||
# wtimeoutms is an alias for wtimeout
|
||||
VALIDATORS = {
|
||||
'replicaset': validate_basestring,
|
||||
'slaveok': validate_boolean,
|
||||
'slave_okay': validate_boolean,
|
||||
'safe': validate_boolean,
|
||||
'w': validate_int_or_basestring,
|
||||
'wtimeout': validate_integer,
|
||||
'wtimeoutms': validate_integer,
|
||||
'fsync': validate_boolean,
|
||||
'j': validate_boolean,
|
||||
'journal': validate_boolean,
|
||||
'connecttimeoutms': validate_timeout_or_none,
|
||||
'sockettimeoutms': validate_timeout_or_none,
|
||||
'waitqueuetimeoutms': validate_timeout_or_none,
|
||||
'waitqueuemultiple': validate_positive_integer_or_none,
|
||||
'ssl': validate_boolean,
|
||||
'ssl_keyfile': validate_readable,
|
||||
'ssl_certfile': validate_readable,
|
||||
'ssl_cert_reqs': validate_cert_reqs,
|
||||
'ssl_ca_certs': validate_readable,
|
||||
'readpreference': validate_read_preference,
|
||||
'read_preference': validate_read_preference,
|
||||
'tag_sets': validate_tag_sets,
|
||||
'secondaryacceptablelatencyms': validate_positive_float,
|
||||
'secondary_acceptable_latency_ms': validate_positive_float,
|
||||
'auto_start_request': validate_boolean,
|
||||
'use_greenlets': validate_boolean,
|
||||
'authmechanism': validate_auth_mechanism,
|
||||
'authsource': validate_basestring,
|
||||
'gssapiservicename': validate_basestring,
|
||||
}
|
||||
|
||||
|
||||
_AUTH_OPTIONS = frozenset(['gssapiservicename'])
|
||||
|
||||
|
||||
def validate_auth_option(option, value):
|
||||
"""Validate optional authentication parameters.
|
||||
"""
|
||||
lower, value = validate(option, value)
|
||||
if lower not in _AUTH_OPTIONS:
|
||||
raise ConfigurationError('Unknown '
|
||||
'authentication option: %s' % (option,))
|
||||
return lower, value
|
||||
|
||||
|
||||
def validate(option, value):
|
||||
"""Generic validation function.
|
||||
"""
|
||||
lower = option.lower()
|
||||
validator = VALIDATORS.get(lower, raise_config_error)
|
||||
value = validator(option, value)
|
||||
return lower, value
|
||||
|
||||
|
||||
SAFE_OPTIONS = frozenset([
|
||||
'w',
|
||||
'wtimeout',
|
||||
'wtimeoutms',
|
||||
'fsync',
|
||||
'j',
|
||||
'journal'
|
||||
])
|
||||
|
||||
|
||||
class WriteConcern(dict):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""A subclass of dict that overrides __setitem__ to
|
||||
validate write concern options.
|
||||
"""
|
||||
super(WriteConcern, self).__init__(*args, **kwargs)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if key not in SAFE_OPTIONS:
|
||||
raise ConfigurationError("%s is not a valid write "
|
||||
"concern option." % (key,))
|
||||
key, value = validate(key, value)
|
||||
super(WriteConcern, self).__setitem__(key, value)
|
||||
|
||||
|
||||
class BaseObject(object):
|
||||
"""A base class that provides attributes and methods common
|
||||
to multiple pymongo classes.
|
||||
|
||||
SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO 10GEN
|
||||
"""
|
||||
|
||||
def __init__(self, **options):
|
||||
|
||||
self.__slave_okay = False
|
||||
self.__read_pref = ReadPreference.PRIMARY
|
||||
self.__tag_sets = [{}]
|
||||
self.__secondary_acceptable_latency_ms = 15
|
||||
self.__safe = None
|
||||
self.__write_concern = WriteConcern()
|
||||
self.__set_options(options)
|
||||
if (self.__read_pref == ReadPreference.PRIMARY
|
||||
and self.__tag_sets != [{}]
|
||||
):
|
||||
raise ConfigurationError(
|
||||
"ReadPreference PRIMARY cannot be combined with tags")
|
||||
|
||||
# If safe hasn't been implicitly set by write concerns then set it.
|
||||
if self.__safe is None:
|
||||
if options.get("w") == 0:
|
||||
self.__safe = False
|
||||
else:
|
||||
self.__safe = validate_boolean('safe', options.get("safe", True))
|
||||
# Note: 'safe' is always passed by Connection and ReplicaSetConnection
|
||||
# Always do the most "safe" thing, but warn about conflicts.
|
||||
if self.__safe and options.get('w') == 0:
|
||||
warnings.warn("Conflicting write concerns. 'w' set to 0 "
|
||||
"but other options have enabled write concern. "
|
||||
"Please set 'w' to a value other than 0.",
|
||||
UserWarning)
|
||||
|
||||
def __set_safe_option(self, option, value):
|
||||
"""Validates and sets getlasterror options for this
|
||||
object (Connection, Database, Collection, etc.)
|
||||
"""
|
||||
if value is None:
|
||||
self.__write_concern.pop(option, None)
|
||||
else:
|
||||
self.__write_concern[option] = value
|
||||
if option != "w" or value != 0:
|
||||
self.__safe = True
|
||||
|
||||
def __set_options(self, options):
|
||||
"""Validates and sets all options passed to this object."""
|
||||
for option, value in options.items():
|
||||
if option in ('slave_okay', 'slaveok'):
|
||||
self.__slave_okay = validate_boolean(option, value)
|
||||
elif option in ('read_preference', "readpreference"):
|
||||
self.__read_pref = validate_read_preference(option, value)
|
||||
elif option == 'tag_sets':
|
||||
self.__tag_sets = validate_tag_sets(option, value)
|
||||
elif option in (
|
||||
'secondaryacceptablelatencyms',
|
||||
'secondary_acceptable_latency_ms'
|
||||
):
|
||||
self.__secondary_acceptable_latency_ms = \
|
||||
validate_positive_float(option, value)
|
||||
elif option in SAFE_OPTIONS:
|
||||
if option == 'journal':
|
||||
self.__set_safe_option('j', value)
|
||||
elif option == 'wtimeoutms':
|
||||
self.__set_safe_option('wtimeout', value)
|
||||
else:
|
||||
self.__set_safe_option(option, value)
|
||||
|
||||
def __set_write_concern(self, value):
|
||||
"""Property setter for write_concern."""
|
||||
if not isinstance(value, dict):
|
||||
raise ConfigurationError("write_concern must be an "
|
||||
"instance of dict or a subclass.")
|
||||
# Make a copy here to avoid users accidentally setting the
|
||||
# same dict on multiple instances.
|
||||
wc = WriteConcern()
|
||||
for k, v in value.items():
|
||||
# Make sure we validate each option.
|
||||
wc[k] = v
|
||||
self.__write_concern = wc
|
||||
|
||||
def __get_write_concern(self):
|
||||
"""The default write concern for this instance.
|
||||
|
||||
Supports dict style access for getting/setting write concern
|
||||
options. Valid options include:
|
||||
|
||||
- `w`: (integer or string) If this is a replica set, write operations
|
||||
will block until they have been replicated to the specified number
|
||||
or tagged set of servers. `w=<int>` always includes the replica set
|
||||
primary (e.g. w=3 means write to the primary and wait until
|
||||
replicated to **two** secondaries). **Setting w=0 disables write
|
||||
acknowledgement and all other write concern options.**
|
||||
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
|
||||
in milliseconds to control how long to wait for write propagation
|
||||
to complete. If replication does not complete in the given
|
||||
timeframe, a timeout exception is raised.
|
||||
- `j`: If ``True`` block until write operations have been committed
|
||||
to the journal. Ignored if the server is running without journaling.
|
||||
- `fsync`: If ``True`` force the database to fsync all files before
|
||||
returning. When used with `j` the server awaits the next group
|
||||
commit before returning.
|
||||
|
||||
>>> m = pymongo.MongoClient()
|
||||
>>> m.write_concern
|
||||
{}
|
||||
>>> m.write_concern = {'w': 2, 'wtimeout': 1000}
|
||||
>>> m.write_concern
|
||||
{'wtimeout': 1000, 'w': 2}
|
||||
>>> m.write_concern['j'] = True
|
||||
>>> m.write_concern
|
||||
{'wtimeout': 1000, 'j': True, 'w': 2}
|
||||
>>> m.write_concern = {'j': True}
|
||||
>>> m.write_concern
|
||||
{'j': True}
|
||||
>>> # Disable write acknowledgement and write concern
|
||||
...
|
||||
>>> m.write_concern['w'] = 0
|
||||
|
||||
|
||||
.. note:: Accessing :attr:`write_concern` returns its value
|
||||
(a subclass of :class:`dict`), not a copy.
|
||||
|
||||
.. warning:: If you are using :class:`~pymongo.connection.Connection`
|
||||
or :class:`~pymongo.replica_set_connection.ReplicaSetConnection`
|
||||
make sure you explicitly set ``w`` to 1 (or a greater value) or
|
||||
:attr:`safe` to ``True``. Unlike calling
|
||||
:meth:`set_lasterror_options`, setting an option in
|
||||
:attr:`write_concern` does not implicitly set :attr:`safe`
|
||||
to ``True``.
|
||||
"""
|
||||
# To support dict style access we have to return the actual
|
||||
# WriteConcern here, not a copy.
|
||||
return self.__write_concern
|
||||
|
||||
write_concern = property(__get_write_concern, __set_write_concern)
|
||||
|
||||
def __get_slave_okay(self):
|
||||
"""DEPRECATED. Use :attr:`read_preference` instead.
|
||||
|
||||
.. versionchanged:: 2.1
|
||||
Deprecated slave_okay.
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
return self.__slave_okay
|
||||
|
||||
def __set_slave_okay(self, value):
|
||||
"""Property setter for slave_okay"""
|
||||
warnings.warn("slave_okay is deprecated. Please use "
|
||||
"read_preference instead.", DeprecationWarning,
|
||||
stacklevel=2)
|
||||
self.__slave_okay = validate_boolean('slave_okay', value)
|
||||
|
||||
slave_okay = property(__get_slave_okay, __set_slave_okay)
|
||||
|
||||
def __get_read_pref(self):
|
||||
"""The read preference mode for this instance.
|
||||
|
||||
See :class:`~pymongo.read_preferences.ReadPreference` for available options.
|
||||
|
||||
.. versionadded:: 2.1
|
||||
"""
|
||||
return self.__read_pref
|
||||
|
||||
def __set_read_pref(self, value):
|
||||
"""Property setter for read_preference"""
|
||||
self.__read_pref = validate_read_preference('read_preference', value)
|
||||
|
||||
read_preference = property(__get_read_pref, __set_read_pref)
|
||||
|
||||
def __get_acceptable_latency(self):
|
||||
"""Any replica-set member whose ping time is within
|
||||
secondary_acceptable_latency_ms of the nearest member may accept
|
||||
reads. Defaults to 15 milliseconds.
|
||||
|
||||
See :class:`~pymongo.read_preferences.ReadPreference`.
|
||||
|
||||
.. versionadded:: 2.3
|
||||
|
||||
.. note:: ``secondary_acceptable_latency_ms`` is ignored when talking to a
|
||||
replica set *through* a mongos. The equivalent is the localThreshold_ command
|
||||
line option.
|
||||
|
||||
.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold
|
||||
"""
|
||||
return self.__secondary_acceptable_latency_ms
|
||||
|
||||
def __set_acceptable_latency(self, value):
|
||||
"""Property setter for secondary_acceptable_latency_ms"""
|
||||
self.__secondary_acceptable_latency_ms = (validate_positive_float(
|
||||
'secondary_acceptable_latency_ms', value))
|
||||
|
||||
secondary_acceptable_latency_ms = property(
|
||||
__get_acceptable_latency, __set_acceptable_latency)
|
||||
|
||||
def __get_tag_sets(self):
|
||||
"""Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to
|
||||
read only from members whose ``dc`` tag has the value ``"ny"``.
|
||||
To specify a priority-order for tag sets, provide a list of
|
||||
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
|
||||
set, ``{}``, means "read from any member that matches the mode,
|
||||
ignoring tags." ReplicaSetConnection tries each set of tags in turn
|
||||
until it finds a set of tags with at least one matching member.
|
||||
|
||||
.. seealso:: `Data-Center Awareness
|
||||
<http://www.mongodb.org/display/DOCS/Data+Center+Awareness>`_
|
||||
|
||||
.. versionadded:: 2.3
|
||||
"""
|
||||
return self.__tag_sets
|
||||
|
||||
def __set_tag_sets(self, value):
|
||||
"""Property setter for tag_sets"""
|
||||
self.__tag_sets = validate_tag_sets('tag_sets', value)
|
||||
|
||||
tag_sets = property(__get_tag_sets, __set_tag_sets)
|
||||
|
||||
def __get_safe(self):
|
||||
"""**DEPRECATED:** Use the 'w' :attr:`write_concern` option instead.
|
||||
|
||||
Use getlasterror with every write operation?
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
return self.__safe
|
||||
|
||||
def __set_safe(self, value):
|
||||
"""Property setter for safe"""
|
||||
warnings.warn("safe is deprecated. Please use the"
|
||||
" 'w' write_concern option instead.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
self.__safe = validate_boolean('safe', value)
|
||||
|
||||
safe = property(__get_safe, __set_safe)
|
||||
|
||||
def get_lasterror_options(self):
|
||||
"""DEPRECATED: Use :attr:`write_concern` instead.
|
||||
|
||||
Returns a dict of the getlasterror options set on this instance.
|
||||
|
||||
.. versionchanged:: 2.4
|
||||
Deprecated get_lasterror_options.
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
warnings.warn("get_lasterror_options is deprecated. Please use "
|
||||
"write_concern instead.", DeprecationWarning,
|
||||
stacklevel=2)
|
||||
return self.__write_concern.copy()
|
||||
|
||||
def set_lasterror_options(self, **kwargs):
|
||||
"""DEPRECATED: Use :attr:`write_concern` instead.
|
||||
|
||||
Set getlasterror options for this instance.
|
||||
|
||||
Valid options include j=<bool>, w=<int/string>, wtimeout=<int>,
|
||||
and fsync=<bool>. Implies safe=True.
|
||||
|
||||
:Parameters:
|
||||
- `**kwargs`: Options should be passed as keyword
|
||||
arguments (e.g. w=2, fsync=True)
|
||||
|
||||
.. versionchanged:: 2.4
|
||||
Deprecated set_lasterror_options.
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
warnings.warn("set_lasterror_options is deprecated. Please use "
|
||||
"write_concern instead.", DeprecationWarning,
|
||||
stacklevel=2)
|
||||
for key, value in kwargs.items():
|
||||
self.__set_safe_option(key, value)
|
||||
|
||||
def unset_lasterror_options(self, *options):
|
||||
"""DEPRECATED: Use :attr:`write_concern` instead.
|
||||
|
||||
Unset getlasterror options for this instance.
|
||||
|
||||
If no options are passed unsets all getlasterror options.
|
||||
This does not set `safe` to False.
|
||||
|
||||
:Parameters:
|
||||
- `*options`: The list of options to unset.
|
||||
|
||||
.. versionchanged:: 2.4
|
||||
Deprecated unset_lasterror_options.
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
warnings.warn("unset_lasterror_options is deprecated. Please use "
|
||||
"write_concern instead.", DeprecationWarning,
|
||||
stacklevel=2)
|
||||
if len(options):
|
||||
for option in options:
|
||||
self.__write_concern.pop(option, None)
|
||||
else:
|
||||
self.__write_concern = WriteConcern()
|
||||
|
||||
def _get_wc_override(self):
|
||||
"""Get write concern override.
|
||||
|
||||
Used in internal methods that **must** do acknowledged write ops.
|
||||
We don't want to override user write concern options if write concern
|
||||
is already enabled.
|
||||
"""
|
||||
if self.safe and self.__write_concern.get('w') != 0:
|
||||
return {}
|
||||
return {'w': 1}
|
||||
|
||||
def _get_write_mode(self, safe=None, **options):
|
||||
"""Get the current write mode.
|
||||
|
||||
Determines if the current write is safe or not based on the
|
||||
passed in or inherited safe value, write_concern values, or
|
||||
passed options.
|
||||
|
||||
:Parameters:
|
||||
- `safe`: check that the operation succeeded?
|
||||
- `**options`: overriding write concern options.
|
||||
|
||||
.. versionadded:: 2.3
|
||||
"""
|
||||
# Don't ever send w=1 to the server.
|
||||
def pop1(dct):
|
||||
if dct.get('w') == 1:
|
||||
dct.pop('w')
|
||||
return dct
|
||||
|
||||
if safe is not None:
|
||||
warnings.warn("The safe parameter is deprecated. Please use "
|
||||
"write concern options instead.", DeprecationWarning,
|
||||
stacklevel=3)
|
||||
validate_boolean('safe', safe)
|
||||
|
||||
# Passed options override collection level defaults.
|
||||
if safe is not None or options:
|
||||
if safe or options:
|
||||
if not options:
|
||||
options = self.__write_concern.copy()
|
||||
# Backwards compatability edge case. Call getLastError
|
||||
# with no options if safe=True was passed but collection
|
||||
# level defaults have been disabled with w=0.
|
||||
# These should be equivalent:
|
||||
# Connection(w=0).foo.bar.insert({}, safe=True)
|
||||
# MongoClient(w=0).foo.bar.insert({}, w=1)
|
||||
if options.get('w') == 0:
|
||||
return True, {}
|
||||
# Passing w=0 overrides passing safe=True.
|
||||
return options.get('w') != 0, pop1(options)
|
||||
return False, {}
|
||||
|
||||
# Fall back to collection level defaults.
|
||||
# w=0 takes precedence over self.safe = True
|
||||
if self.__write_concern.get('w') == 0:
|
||||
return False, {}
|
||||
elif self.safe or self.__write_concern.get('w', 0) != 0:
|
||||
return True, pop1(self.__write_concern.copy())
|
||||
|
||||
return False, {}
|
||||
231
asyncio_mongo/_pymongo/connection.py
Normal file
231
asyncio_mongo/_pymongo/connection.py
Normal file
@@ -0,0 +1,231 @@
|
||||
# Copyright 2009-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you
|
||||
# may not use this file except in compliance with the License. You
|
||||
# may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied. See the License for the specific language governing
|
||||
# permissions and limitations under the License.
|
||||
|
||||
"""Tools for connecting to MongoDB.
|
||||
|
||||
.. warning::
|
||||
**DEPRECATED:** Please use :mod:`~pymongo.mongo_client` instead.
|
||||
|
||||
.. seealso:: Module :mod:`~pymongo.master_slave_connection` for
|
||||
connecting to master-slave clusters, and
|
||||
:doc:`/examples/high_availability` for an example of how to connect
|
||||
to a replica set, or specify a list of mongos instances for automatic
|
||||
failover.
|
||||
|
||||
To get a :class:`~pymongo.database.Database` instance from a
|
||||
:class:`Connection` use either dictionary-style or attribute-style
|
||||
access:
|
||||
|
||||
.. doctest::
|
||||
|
||||
>>> from asyncio_mongo._pymongo import Connection
|
||||
>>> c = Connection()
|
||||
>>> c.test_database
|
||||
Database(Connection('localhost', 27017), u'test_database')
|
||||
>>> c['test-database']
|
||||
Database(Connection('localhost', 27017), u'test-database')
|
||||
"""
|
||||
from asyncio_mongo._pymongo.mongo_client import MongoClient
|
||||
from asyncio_mongo._pymongo.errors import ConfigurationError
|
||||
|
||||
|
||||
class Connection(MongoClient):
|
||||
"""Connection to MongoDB.
|
||||
"""
|
||||
|
||||
def __init__(self, host=None, port=None, max_pool_size=None,
|
||||
network_timeout=None, document_class=dict,
|
||||
tz_aware=False, _connect=True, **kwargs):
|
||||
"""Create a new connection to a single MongoDB instance at *host:port*.
|
||||
|
||||
.. warning::
|
||||
**DEPRECATED:** :class:`Connection` is deprecated. Please
|
||||
use :class:`~pymongo.mongo_client.MongoClient` instead.
|
||||
|
||||
The resultant connection object has connection-pooling built
|
||||
in. It also performs auto-reconnection when necessary. If an
|
||||
operation fails because of a connection error,
|
||||
:class:`~pymongo.errors.ConnectionFailure` is raised. If
|
||||
auto-reconnection will be performed,
|
||||
:class:`~pymongo.errors.AutoReconnect` will be
|
||||
raised. Application code should handle this exception
|
||||
(recognizing that the operation failed) and then continue to
|
||||
execute.
|
||||
|
||||
Raises :class:`TypeError` if port is not an instance of
|
||||
``int``. Raises :class:`~pymongo.errors.ConnectionFailure` if
|
||||
the connection cannot be made.
|
||||
|
||||
The `host` parameter can be a full `mongodb URI
|
||||
<http://dochub.mongodb.org/core/connections>`_, in addition to
|
||||
a simple hostname. It can also be a list of hostnames or
|
||||
URIs. Any port specified in the host string(s) will override
|
||||
the `port` parameter. If multiple mongodb URIs containing
|
||||
database or auth information are passed, the last database,
|
||||
username, and password present will be used. For username and
|
||||
passwords reserved characters like ':', '/', '+' and '@' must be
|
||||
escaped following RFC 2396.
|
||||
|
||||
:Parameters:
|
||||
- `host` (optional): hostname or IP address of the
|
||||
instance to connect to, or a mongodb URI, or a list of
|
||||
hostnames / mongodb URIs. If `host` is an IPv6 literal
|
||||
it must be enclosed in '[' and ']' characters following
|
||||
the RFC2732 URL syntax (e.g. '[::1]' for localhost)
|
||||
- `port` (optional): port number on which to connect
|
||||
- `max_pool_size` (optional): The maximum number of connections
|
||||
that the pool will open simultaneously. If this is set, operations
|
||||
will block if there are `max_pool_size` outstanding connections
|
||||
from the pool. By default the pool size is unlimited.
|
||||
- `network_timeout` (optional): timeout (in seconds) to use
|
||||
for socket operations - default is no timeout
|
||||
- `document_class` (optional): default class to use for
|
||||
documents returned from queries on this connection
|
||||
- `tz_aware` (optional): if ``True``,
|
||||
:class:`~datetime.datetime` instances returned as values
|
||||
in a document by this :class:`Connection` will be timezone
|
||||
aware (otherwise they will be naive)
|
||||
|
||||
| **Other optional parameters can be passed as keyword arguments:**
|
||||
|
||||
- `socketTimeoutMS`: (integer) How long (in milliseconds) a send or
|
||||
receive on a socket can take before timing out.
|
||||
- `connectTimeoutMS`: (integer) How long (in milliseconds) a
|
||||
connection can take to be opened before timing out.
|
||||
- `auto_start_request`: If ``True`` (the default), each thread that
|
||||
accesses this Connection has a socket allocated to it for the
|
||||
thread's lifetime. This ensures consistent reads, even if you read
|
||||
after an unsafe write.
|
||||
- `use_greenlets`: if ``True``, :meth:`start_request()` will ensure
|
||||
that the current greenlet uses the same socket for all operations
|
||||
until :meth:`end_request()`
|
||||
|
||||
| **Write Concern options:**
|
||||
|
||||
- `safe`: :class:`Connection` **disables** acknowledgement of write
|
||||
operations. Use ``safe=True`` to enable write acknowledgement.
|
||||
- `w`: (integer or string) If this is a replica set, write operations
|
||||
will block until they have been replicated to the specified number
|
||||
or tagged set of servers. `w=<int>` always includes the replica set
|
||||
primary (e.g. w=3 means write to the primary and wait until
|
||||
replicated to **two** secondaries). Implies safe=True.
|
||||
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
|
||||
in milliseconds to control how long to wait for write propagation
|
||||
to complete. If replication does not complete in the given
|
||||
timeframe, a timeout exception is raised. Implies safe=True.
|
||||
- `j`: If ``True`` block until write operations have been committed
|
||||
to the journal. Ignored if the server is running without journaling.
|
||||
Implies safe=True.
|
||||
- `fsync`: If ``True`` force the database to fsync all files before
|
||||
returning. When used with `j` the server awaits the next group
|
||||
commit before returning. Implies safe=True.
|
||||
|
||||
| **Replica-set keyword arguments for connecting with a replica-set
|
||||
- either directly or via a mongos:**
|
||||
| (ignored by standalone mongod instances)
|
||||
|
||||
- `slave_okay` or `slaveOk` (deprecated): Use `read_preference`
|
||||
instead.
|
||||
- `replicaSet`: (string) The name of the replica-set to connect to.
|
||||
The driver will verify that the replica-set it connects to matches
|
||||
this name. Implies that the hosts specified are a seed list and the
|
||||
driver should attempt to find all members of the set. *Ignored by
|
||||
mongos*.
|
||||
- `read_preference`: The read preference for this client. If
|
||||
connecting to a secondary then a read preference mode *other* than
|
||||
PRIMARY is required - otherwise all queries will throw a
|
||||
:class:`~pymongo.errors.AutoReconnect` "not master" error.
|
||||
See :class:`~pymongo.read_preferences.ReadPreference` for all
|
||||
available read preference options.
|
||||
- `tag_sets`: Ignored unless connecting to a replica-set via mongos.
|
||||
Specify a priority-order for tag sets, provide a list of
|
||||
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
|
||||
set, ``{}``, means "read from any member that matches the mode,
|
||||
ignoring tags.
|
||||
|
||||
| **SSL configuration:**
|
||||
|
||||
- `ssl`: If ``True``, create the connection to the server using SSL.
|
||||
- `ssl_keyfile`: The private keyfile used to identify the local
|
||||
connection against mongod. If included with the ``certfile` then
|
||||
only the ``ssl_certfile`` is needed. Implies ``ssl=True``.
|
||||
- `ssl_certfile`: The certificate file used to identify the local
|
||||
connection against mongod. Implies ``ssl=True``.
|
||||
- `ssl_cert_reqs`: The parameter cert_reqs specifies whether a
|
||||
certificate is required from the other side of the connection,
|
||||
and whether it will be validated if provided. It must be one of the
|
||||
three values ``ssl.CERT_NONE`` (certificates ignored),
|
||||
``ssl.CERT_OPTIONAL`` (not required, but validated if provided), or
|
||||
``ssl.CERT_REQUIRED`` (required and validated). If the value of
|
||||
this parameter is not ``ssl.CERT_NONE``, then the ``ssl_ca_certs``
|
||||
parameter must point to a file of CA certificates.
|
||||
Implies ``ssl=True``.
|
||||
- `ssl_ca_certs`: The ca_certs file contains a set of concatenated
|
||||
"certification authority" certificates, which are used to validate
|
||||
certificates passed from the other end of the connection.
|
||||
Implies ``ssl=True``.
|
||||
|
||||
.. seealso:: :meth:`end_request`
|
||||
.. versionchanged:: 2.5
|
||||
Added additional ssl options
|
||||
.. versionchanged:: 2.3
|
||||
Added support for failover between mongos seed list members.
|
||||
.. versionchanged:: 2.2
|
||||
Added `auto_start_request` option back. Added `use_greenlets`
|
||||
option.
|
||||
.. versionchanged:: 2.1
|
||||
Support `w` = integer or string.
|
||||
Added `ssl` option.
|
||||
DEPRECATED slave_okay/slaveOk.
|
||||
.. versionchanged:: 2.0
|
||||
`slave_okay` is a pure keyword argument. Added support for safe,
|
||||
and getlasterror options as keyword arguments.
|
||||
.. versionchanged:: 1.11
|
||||
Added `max_pool_size`. Completely removed previously deprecated
|
||||
`pool_size`, `auto_start_request` and `timeout` parameters.
|
||||
.. versionchanged:: 1.8
|
||||
The `host` parameter can now be a full `mongodb URI
|
||||
<http://dochub.mongodb.org/core/connections>`_, in addition
|
||||
to a simple hostname. It can also be a list of hostnames or
|
||||
URIs.
|
||||
.. versionadded:: 1.8
|
||||
The `tz_aware` parameter.
|
||||
.. versionadded:: 1.7
|
||||
The `document_class` parameter.
|
||||
.. versionadded:: 1.1
|
||||
The `network_timeout` parameter.
|
||||
|
||||
.. mongodoc:: connections
|
||||
"""
|
||||
if network_timeout is not None:
|
||||
if (not isinstance(network_timeout, (int, float)) or
|
||||
network_timeout <= 0):
|
||||
raise ConfigurationError("network_timeout must "
|
||||
"be a positive integer")
|
||||
kwargs['socketTimeoutMS'] = network_timeout * 1000
|
||||
|
||||
kwargs['auto_start_request'] = kwargs.get('auto_start_request', True)
|
||||
kwargs['safe'] = kwargs.get('safe', False)
|
||||
|
||||
super(Connection, self).__init__(host, port,
|
||||
max_pool_size, document_class, tz_aware, _connect, **kwargs)
|
||||
|
||||
def __repr__(self):
|
||||
if len(self.nodes) == 1:
|
||||
return "Connection(%r, %r)" % (self.host, self.port)
|
||||
else:
|
||||
return "Connection(%r)" % ["%s:%d" % n for n in self.nodes]
|
||||
|
||||
def __next__(self):
|
||||
raise TypeError("'Connection' object is not iterable")
|
||||
963
asyncio_mongo/_pymongo/cursor.py
Normal file
963
asyncio_mongo/_pymongo/cursor.py
Normal file
@@ -0,0 +1,963 @@
|
||||
# Copyright 2009-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Cursor class to iterate over Mongo query results."""
|
||||
import copy
|
||||
from collections import deque
|
||||
|
||||
from asyncio_mongo._bson import RE_TYPE
|
||||
from asyncio_mongo._bson.code import Code
|
||||
from asyncio_mongo._bson.son import SON
|
||||
from asyncio_mongo._pymongo import helpers, message, read_preferences
|
||||
from asyncio_mongo._pymongo.read_preferences import ReadPreference, secondary_ok_commands
|
||||
from asyncio_mongo._pymongo.errors import (InvalidOperation,
|
||||
AutoReconnect)
|
||||
|
||||
_QUERY_OPTIONS = {
|
||||
"tailable_cursor": 2,
|
||||
"slave_okay": 4,
|
||||
"oplog_replay": 8,
|
||||
"no_timeout": 16,
|
||||
"await_data": 32,
|
||||
"exhaust": 64,
|
||||
"partial": 128}
|
||||
|
||||
|
||||
# This has to be an old style class due to
|
||||
# http://bugs.jython.org/issue1057
|
||||
class _SocketManager:
|
||||
"""Used with exhaust cursors to ensure the socket is returned.
|
||||
"""
|
||||
def __init__(self, sock, pool):
|
||||
self.sock = sock
|
||||
self.pool = pool
|
||||
self.__closed = False
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
"""Return this instance's socket to the connection pool.
|
||||
"""
|
||||
if not self.__closed:
|
||||
self.__closed = True
|
||||
self.pool.maybe_return_socket(self.sock)
|
||||
self.sock, self.pool = None, None
|
||||
|
||||
|
||||
# TODO might be cool to be able to do find().include("foo") or
|
||||
# find().exclude(["bar", "baz"]) or find().slice("a", 1, 2) as an
|
||||
# alternative to the fields specifier.
|
||||
class Cursor(object):
|
||||
"""A cursor / iterator over Mongo query results.
|
||||
"""
|
||||
|
||||
def __init__(self, collection, spec=None, fields=None, skip=0, limit=0,
|
||||
timeout=True, snapshot=False, tailable=False, sort=None,
|
||||
max_scan=None, as_class=None, slave_okay=False,
|
||||
await_data=False, partial=False, manipulate=True,
|
||||
read_preference=ReadPreference.PRIMARY, tag_sets=[{}],
|
||||
secondary_acceptable_latency_ms=None, exhaust=False,
|
||||
_must_use_master=False, _uuid_subtype=None,
|
||||
_first_batch=None, _cursor_id=None,
|
||||
**kwargs):
|
||||
"""Create a new cursor.
|
||||
|
||||
Should not be called directly by application developers - see
|
||||
:meth:`~pymongo.collection.Collection.find` instead.
|
||||
|
||||
.. mongodoc:: cursors
|
||||
"""
|
||||
self.__id = _cursor_id
|
||||
self.__is_command_cursor = _cursor_id is not None
|
||||
|
||||
if spec is None:
|
||||
spec = {}
|
||||
|
||||
if not isinstance(spec, dict):
|
||||
raise TypeError("spec must be an instance of dict")
|
||||
if not isinstance(skip, int):
|
||||
raise TypeError("skip must be an instance of int")
|
||||
if not isinstance(limit, int):
|
||||
raise TypeError("limit must be an instance of int")
|
||||
if not isinstance(timeout, bool):
|
||||
raise TypeError("timeout must be an instance of bool")
|
||||
if not isinstance(snapshot, bool):
|
||||
raise TypeError("snapshot must be an instance of bool")
|
||||
if not isinstance(tailable, bool):
|
||||
raise TypeError("tailable must be an instance of bool")
|
||||
if not isinstance(slave_okay, bool):
|
||||
raise TypeError("slave_okay must be an instance of bool")
|
||||
if not isinstance(await_data, bool):
|
||||
raise TypeError("await_data must be an instance of bool")
|
||||
if not isinstance(partial, bool):
|
||||
raise TypeError("partial must be an instance of bool")
|
||||
if not isinstance(exhaust, bool):
|
||||
raise TypeError("exhaust must be an instance of bool")
|
||||
|
||||
if fields is not None:
|
||||
if not fields:
|
||||
fields = {"_id": 1}
|
||||
if not isinstance(fields, dict):
|
||||
fields = helpers._fields_list_to_dict(fields)
|
||||
|
||||
if as_class is None:
|
||||
as_class = collection.database.connection.document_class
|
||||
|
||||
self.__collection = collection
|
||||
self.__spec = spec
|
||||
self.__fields = fields
|
||||
self.__skip = skip
|
||||
self.__limit = limit
|
||||
self.__batch_size = 0
|
||||
|
||||
# Exhaust cursor support
|
||||
if self.__collection.database.connection.is_mongos and exhaust:
|
||||
raise InvalidOperation('Exhaust cursors are '
|
||||
'not supported by mongos')
|
||||
if limit and exhaust:
|
||||
raise InvalidOperation("Can't use limit and exhaust together.")
|
||||
self.__exhaust = exhaust
|
||||
self.__exhaust_mgr = None
|
||||
|
||||
# This is ugly. People want to be able to do cursor[5:5] and
|
||||
# get an empty result set (old behavior was an
|
||||
# exception). It's hard to do that right, though, because the
|
||||
# server uses limit(0) to mean 'no limit'. So we set __empty
|
||||
# in that case and check for it when iterating. We also unset
|
||||
# it anytime we change __limit.
|
||||
self.__empty = False
|
||||
|
||||
self.__snapshot = snapshot
|
||||
self.__ordering = sort and helpers._index_document(sort) or None
|
||||
self.__max_scan = max_scan
|
||||
self.__explain = False
|
||||
self.__hint = None
|
||||
self.__as_class = as_class
|
||||
self.__slave_okay = slave_okay
|
||||
self.__manipulate = manipulate
|
||||
self.__read_preference = read_preference
|
||||
self.__tag_sets = tag_sets
|
||||
self.__secondary_acceptable_latency_ms = secondary_acceptable_latency_ms
|
||||
self.__tz_aware = collection.database.connection.tz_aware
|
||||
self.__must_use_master = _must_use_master
|
||||
self.__uuid_subtype = _uuid_subtype or collection.uuid_subtype
|
||||
|
||||
self.__data = deque(_first_batch or [])
|
||||
self.__connection_id = None
|
||||
self.__retrieved = 0
|
||||
self.__killed = False
|
||||
|
||||
self.__query_flags = 0
|
||||
if tailable:
|
||||
self.__query_flags |= _QUERY_OPTIONS["tailable_cursor"]
|
||||
if not timeout:
|
||||
self.__query_flags |= _QUERY_OPTIONS["no_timeout"]
|
||||
if tailable and await_data:
|
||||
self.__query_flags |= _QUERY_OPTIONS["await_data"]
|
||||
if exhaust:
|
||||
self.__query_flags |= _QUERY_OPTIONS["exhaust"]
|
||||
if partial:
|
||||
self.__query_flags |= _QUERY_OPTIONS["partial"]
|
||||
|
||||
# this is for passing network_timeout through if it's specified
|
||||
# need to use kwargs as None is a legit value for network_timeout
|
||||
self.__kwargs = kwargs
|
||||
|
||||
@property
|
||||
def collection(self):
|
||||
"""The :class:`~pymongo.collection.Collection` that this
|
||||
:class:`Cursor` is iterating.
|
||||
|
||||
.. versionadded:: 1.1
|
||||
"""
|
||||
return self.__collection
|
||||
|
||||
def __del__(self):
|
||||
if self.__id and not self.__killed:
|
||||
self.__die()
|
||||
|
||||
def rewind(self):
|
||||
"""Rewind this cursor to its unevaluated state.
|
||||
|
||||
Reset this cursor if it has been partially or completely evaluated.
|
||||
Any options that are present on the cursor will remain in effect.
|
||||
Future iterating performed on this cursor will cause new queries to
|
||||
be sent to the server, even if the resultant data has already been
|
||||
retrieved by this cursor.
|
||||
"""
|
||||
self.__check_not_command_cursor('rewind')
|
||||
self.__data = deque()
|
||||
self.__id = None
|
||||
self.__connection_id = None
|
||||
self.__retrieved = 0
|
||||
self.__killed = False
|
||||
|
||||
return self
|
||||
|
||||
def clone(self):
|
||||
"""Get a clone of this cursor.
|
||||
|
||||
Returns a new Cursor instance with options matching those that have
|
||||
been set on the current instance. The clone will be completely
|
||||
unevaluated, even if the current instance has been partially or
|
||||
completely evaluated.
|
||||
"""
|
||||
return self.__clone(True)
|
||||
|
||||
def __clone(self, deepcopy=True):
|
||||
self.__check_not_command_cursor('clone')
|
||||
clone = Cursor(self.__collection)
|
||||
values_to_clone = ("spec", "fields", "skip", "limit",
|
||||
"snapshot", "ordering", "explain", "hint",
|
||||
"batch_size", "max_scan", "as_class", "slave_okay",
|
||||
"manipulate", "read_preference", "tag_sets",
|
||||
"secondary_acceptable_latency_ms",
|
||||
"must_use_master", "uuid_subtype", "query_flags",
|
||||
"kwargs")
|
||||
data = dict((k, v) for k, v in self.__dict__.items()
|
||||
if k.startswith('_Cursor__') and k[9:] in values_to_clone)
|
||||
if deepcopy:
|
||||
data = self.__deepcopy(data)
|
||||
clone.__dict__.update(data)
|
||||
return clone
|
||||
|
||||
def __die(self):
|
||||
"""Closes this cursor.
|
||||
"""
|
||||
if self.__id and not self.__killed:
|
||||
if self.__exhaust and self.__exhaust_mgr:
|
||||
# If this is an exhaust cursor and we haven't completely
|
||||
# exhausted the result set we *must* close the socket
|
||||
# to stop the server from sending more data.
|
||||
self.__exhaust_mgr.sock.close()
|
||||
else:
|
||||
connection = self.__collection.database.connection
|
||||
if self.__connection_id is not None:
|
||||
connection.close_cursor(self.__id, self.__connection_id)
|
||||
else:
|
||||
connection.close_cursor(self.__id)
|
||||
if self.__exhaust and self.__exhaust_mgr:
|
||||
self.__exhaust_mgr.close()
|
||||
self.__killed = True
|
||||
|
||||
def close(self):
|
||||
"""Explicitly close / kill this cursor. Required for PyPy, Jython and
|
||||
other Python implementations that don't use reference counting
|
||||
garbage collection.
|
||||
"""
|
||||
self.__die()
|
||||
|
||||
def __query_spec(self):
|
||||
"""Get the spec to use for a query.
|
||||
"""
|
||||
operators = {}
|
||||
if self.__ordering:
|
||||
operators["$orderby"] = self.__ordering
|
||||
if self.__explain:
|
||||
operators["$explain"] = True
|
||||
if self.__hint:
|
||||
operators["$hint"] = self.__hint
|
||||
if self.__snapshot:
|
||||
operators["$snapshot"] = True
|
||||
if self.__max_scan:
|
||||
operators["$maxScan"] = self.__max_scan
|
||||
# Only set $readPreference if it's something other than
|
||||
# PRIMARY to avoid problems with mongos versions that
|
||||
# don't support read preferences.
|
||||
if (self.__collection.database.connection.is_mongos and
|
||||
self.__read_preference != ReadPreference.PRIMARY):
|
||||
|
||||
has_tags = self.__tag_sets and self.__tag_sets != [{}]
|
||||
|
||||
# For maximum backwards compatibility, don't set $readPreference
|
||||
# for SECONDARY_PREFERRED unless tags are in use. Just rely on
|
||||
# the slaveOkay bit (set automatically if read preference is not
|
||||
# PRIMARY), which has the same behavior.
|
||||
if (self.__read_preference != ReadPreference.SECONDARY_PREFERRED or
|
||||
has_tags):
|
||||
|
||||
read_pref = {
|
||||
'mode': read_preferences.mongos_mode(self.__read_preference)
|
||||
}
|
||||
if has_tags:
|
||||
read_pref['tags'] = self.__tag_sets
|
||||
|
||||
operators['$readPreference'] = read_pref
|
||||
|
||||
if operators:
|
||||
# Make a shallow copy so we can cleanly rewind or clone.
|
||||
spec = self.__spec.copy()
|
||||
|
||||
# Only commands that can be run on secondaries should have any
|
||||
# operators added to the spec. Command queries can be issued
|
||||
# by db.command or calling find_one on $cmd directly
|
||||
if self.collection.name == "$cmd":
|
||||
# Don't change commands that can't be sent to secondaries
|
||||
command_name = spec and list(spec.keys())[0].lower() or ""
|
||||
if command_name not in secondary_ok_commands:
|
||||
return spec
|
||||
elif command_name == 'mapreduce':
|
||||
# mapreduce shouldn't be changed if its not inline
|
||||
out = spec.get('out')
|
||||
if not isinstance(out, dict) or not out.get('inline'):
|
||||
return spec
|
||||
|
||||
# White-listed commands must be wrapped in $query.
|
||||
if "$query" not in spec:
|
||||
# $query has to come first
|
||||
spec = SON([("$query", spec)])
|
||||
|
||||
if not isinstance(spec, SON):
|
||||
# Ensure the spec is SON. As order is important this will
|
||||
# ensure its set before merging in any extra operators.
|
||||
spec = SON(spec)
|
||||
|
||||
spec.update(operators)
|
||||
return spec
|
||||
# Have to wrap with $query if "query" is the first key.
|
||||
# We can't just use $query anytime "query" is a key as
|
||||
# that breaks commands like count and find_and_modify.
|
||||
# Checking spec.keys()[0] covers the case that the spec
|
||||
# was passed as an instance of SON or OrderedDict.
|
||||
elif ("query" in self.__spec and
|
||||
(len(self.__spec) == 1 or list(self.__spec.keys())[0] == "query")):
|
||||
return SON({"$query": self.__spec})
|
||||
|
||||
return self.__spec
|
||||
|
||||
def __query_options(self):
|
||||
"""Get the query options string to use for this query.
|
||||
"""
|
||||
options = self.__query_flags
|
||||
if (self.__slave_okay
|
||||
or self.__read_preference != ReadPreference.PRIMARY
|
||||
):
|
||||
options |= _QUERY_OPTIONS["slave_okay"]
|
||||
return options
|
||||
|
||||
def __check_okay_to_chain(self):
|
||||
"""Check if it is okay to chain more options onto this cursor.
|
||||
"""
|
||||
if self.__retrieved or self.__id is not None:
|
||||
raise InvalidOperation("cannot set options after executing query")
|
||||
|
||||
def __check_not_command_cursor(self, method_name):
|
||||
"""Check if calling a method on this cursor is valid.
|
||||
"""
|
||||
if self.__is_command_cursor:
|
||||
raise InvalidOperation(
|
||||
"cannot call %s on a command cursor" % method_name)
|
||||
|
||||
def add_option(self, mask):
|
||||
"""Set arbitary query flags using a bitmask.
|
||||
|
||||
To set the tailable flag:
|
||||
cursor.add_option(2)
|
||||
"""
|
||||
if not isinstance(mask, int):
|
||||
raise TypeError("mask must be an int")
|
||||
self.__check_okay_to_chain()
|
||||
|
||||
if mask & _QUERY_OPTIONS["slave_okay"]:
|
||||
self.__slave_okay = True
|
||||
if mask & _QUERY_OPTIONS["exhaust"]:
|
||||
if self.__limit:
|
||||
raise InvalidOperation("Can't use limit and exhaust together.")
|
||||
if self.__collection.database.connection.is_mongos:
|
||||
raise InvalidOperation('Exhaust cursors are '
|
||||
'not supported by mongos')
|
||||
self.__exhaust = True
|
||||
|
||||
self.__query_flags |= mask
|
||||
return self
|
||||
|
||||
def remove_option(self, mask):
|
||||
"""Unset arbitrary query flags using a bitmask.
|
||||
|
||||
To unset the tailable flag:
|
||||
cursor.remove_option(2)
|
||||
"""
|
||||
if not isinstance(mask, int):
|
||||
raise TypeError("mask must be an int")
|
||||
self.__check_okay_to_chain()
|
||||
|
||||
if mask & _QUERY_OPTIONS["slave_okay"]:
|
||||
self.__slave_okay = False
|
||||
if mask & _QUERY_OPTIONS["exhaust"]:
|
||||
self.__exhaust = False
|
||||
|
||||
self.__query_flags &= ~mask
|
||||
return self
|
||||
|
||||
def limit(self, limit):
|
||||
"""Limits the number of results to be returned by this cursor.
|
||||
|
||||
Raises TypeError if limit is not an instance of int. Raises
|
||||
InvalidOperation if this cursor has already been used. The
|
||||
last `limit` applied to this cursor takes precedence. A limit
|
||||
of ``0`` is equivalent to no limit.
|
||||
|
||||
:Parameters:
|
||||
- `limit`: the number of results to return
|
||||
|
||||
.. mongodoc:: limit
|
||||
"""
|
||||
if not isinstance(limit, int):
|
||||
raise TypeError("limit must be an int")
|
||||
if self.__exhaust:
|
||||
raise InvalidOperation("Can't use limit and exhaust together.")
|
||||
self.__check_okay_to_chain()
|
||||
|
||||
self.__empty = False
|
||||
self.__limit = limit
|
||||
return self
|
||||
|
||||
def batch_size(self, batch_size):
|
||||
"""Limits the number of documents returned in one batch. Each batch
|
||||
requires a round trip to the server. It can be adjusted to optimize
|
||||
performance and limit data transfer.
|
||||
|
||||
.. note:: batch_size can not override MongoDB's internal limits on the
|
||||
amount of data it will return to the client in a single batch (i.e
|
||||
if you set batch size to 1,000,000,000, MongoDB will currently only
|
||||
return 4-16MB of results per batch).
|
||||
|
||||
Raises :class:`TypeError` if `batch_size` is not an instance
|
||||
of :class:`int`. Raises :class:`ValueError` if `batch_size` is
|
||||
less than ``0``. Raises
|
||||
:class:`~pymongo.errors.InvalidOperation` if this
|
||||
:class:`Cursor` has already been used. The last `batch_size`
|
||||
applied to this cursor takes precedence.
|
||||
|
||||
:Parameters:
|
||||
- `batch_size`: The size of each batch of results requested.
|
||||
|
||||
.. versionadded:: 1.9
|
||||
"""
|
||||
if not isinstance(batch_size, int):
|
||||
raise TypeError("batch_size must be an int")
|
||||
if batch_size < 0:
|
||||
raise ValueError("batch_size must be >= 0")
|
||||
self.__check_okay_to_chain()
|
||||
|
||||
self.__batch_size = batch_size == 1 and 2 or batch_size
|
||||
return self
|
||||
|
||||
def skip(self, skip):
|
||||
"""Skips the first `skip` results of this cursor.
|
||||
|
||||
Raises TypeError if skip is not an instance of int. Raises
|
||||
InvalidOperation if this cursor has already been used. The last `skip`
|
||||
applied to this cursor takes precedence.
|
||||
|
||||
:Parameters:
|
||||
- `skip`: the number of results to skip
|
||||
"""
|
||||
if not isinstance(skip, int):
|
||||
raise TypeError("skip must be an int")
|
||||
self.__check_okay_to_chain()
|
||||
|
||||
self.__skip = skip
|
||||
return self
|
||||
|
||||
def __getitem__(self, index):
|
||||
"""Get a single document or a slice of documents from this cursor.
|
||||
|
||||
Raises :class:`~pymongo.errors.InvalidOperation` if this
|
||||
cursor has already been used.
|
||||
|
||||
To get a single document use an integral index, e.g.::
|
||||
|
||||
>>> db.test.find()[50]
|
||||
|
||||
An :class:`IndexError` will be raised if the index is negative
|
||||
or greater than the amount of documents in this cursor. Any
|
||||
limit previously applied to this cursor will be ignored.
|
||||
|
||||
To get a slice of documents use a slice index, e.g.::
|
||||
|
||||
>>> db.test.find()[20:25]
|
||||
|
||||
This will return this cursor with a limit of ``5`` and skip of
|
||||
``20`` applied. Using a slice index will override any prior
|
||||
limits or skips applied to this cursor (including those
|
||||
applied through previous calls to this method). Raises
|
||||
:class:`IndexError` when the slice has a step, a negative
|
||||
start value, or a stop value less than or equal to the start
|
||||
value.
|
||||
|
||||
:Parameters:
|
||||
- `index`: An integer or slice index to be applied to this cursor
|
||||
"""
|
||||
self.__check_okay_to_chain()
|
||||
self.__empty = False
|
||||
if isinstance(index, slice):
|
||||
if index.step is not None:
|
||||
raise IndexError("Cursor instances do not support slice steps")
|
||||
|
||||
skip = 0
|
||||
if index.start is not None:
|
||||
if index.start < 0:
|
||||
raise IndexError("Cursor instances do not support"
|
||||
"negative indices")
|
||||
skip = index.start
|
||||
|
||||
if index.stop is not None:
|
||||
limit = index.stop - skip
|
||||
if limit < 0:
|
||||
raise IndexError("stop index must be greater than start"
|
||||
"index for slice %r" % index)
|
||||
if limit == 0:
|
||||
self.__empty = True
|
||||
else:
|
||||
limit = 0
|
||||
|
||||
self.__skip = skip
|
||||
self.__limit = limit
|
||||
return self
|
||||
|
||||
if isinstance(index, int):
|
||||
if index < 0:
|
||||
raise IndexError("Cursor instances do not support negative"
|
||||
"indices")
|
||||
clone = self.clone()
|
||||
clone.skip(index + self.__skip)
|
||||
clone.limit(-1) # use a hard limit
|
||||
for doc in clone:
|
||||
return doc
|
||||
raise IndexError("no such item for Cursor instance")
|
||||
raise TypeError("index %r cannot be applied to Cursor "
|
||||
"instances" % index)
|
||||
|
||||
def max_scan(self, max_scan):
|
||||
"""Limit the number of documents to scan when performing the query.
|
||||
|
||||
Raises :class:`~pymongo.errors.InvalidOperation` if this
|
||||
cursor has already been used. Only the last :meth:`max_scan`
|
||||
applied to this cursor has any effect.
|
||||
|
||||
:Parameters:
|
||||
- `max_scan`: the maximum number of documents to scan
|
||||
|
||||
.. note:: Requires server version **>= 1.5.1**
|
||||
|
||||
.. versionadded:: 1.7
|
||||
"""
|
||||
self.__check_okay_to_chain()
|
||||
self.__max_scan = max_scan
|
||||
return self
|
||||
|
||||
def sort(self, key_or_list, direction=None):
|
||||
"""Sorts this cursor's results.
|
||||
|
||||
Takes either a single key and a direction, or a list of (key,
|
||||
direction) pairs. The key(s) must be an instance of ``(str,
|
||||
unicode)``, and the direction(s) must be one of
|
||||
(:data:`~pymongo.ASCENDING`,
|
||||
:data:`~pymongo.DESCENDING`). Raises
|
||||
:class:`~pymongo.errors.InvalidOperation` if this cursor has
|
||||
already been used. Only the last :meth:`sort` applied to this
|
||||
cursor has any effect.
|
||||
|
||||
:Parameters:
|
||||
- `key_or_list`: a single key or a list of (key, direction)
|
||||
pairs specifying the keys to sort on
|
||||
- `direction` (optional): only used if `key_or_list` is a single
|
||||
key, if not given :data:`~pymongo.ASCENDING` is assumed
|
||||
"""
|
||||
self.__check_okay_to_chain()
|
||||
keys = helpers._index_list(key_or_list, direction)
|
||||
self.__ordering = helpers._index_document(keys)
|
||||
return self
|
||||
|
||||
def count(self, with_limit_and_skip=False):
|
||||
"""Get the size of the results set for this query.
|
||||
|
||||
Returns the number of documents in the results set for this query. Does
|
||||
not take :meth:`limit` and :meth:`skip` into account by default - set
|
||||
`with_limit_and_skip` to ``True`` if that is the desired behavior.
|
||||
Raises :class:`~pymongo.errors.OperationFailure` on a database error.
|
||||
|
||||
With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`
|
||||
or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`,
|
||||
if `read_preference` is not
|
||||
:attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or
|
||||
:attr:`pymongo.read_preferences.ReadPreference.PRIMARY_PREFERRED`, or
|
||||
(deprecated) `slave_okay` is `True`, the count command will be sent to
|
||||
a secondary or slave.
|
||||
|
||||
:Parameters:
|
||||
- `with_limit_and_skip` (optional): take any :meth:`limit` or
|
||||
:meth:`skip` that has been applied to this cursor into account when
|
||||
getting the count
|
||||
|
||||
.. note:: The `with_limit_and_skip` parameter requires server
|
||||
version **>= 1.1.4-**
|
||||
|
||||
.. note:: ``count`` ignores ``network_timeout``. For example, the
|
||||
timeout is ignored in the following code::
|
||||
|
||||
collection.find({}, network_timeout=1).count()
|
||||
|
||||
.. versionadded:: 1.1.1
|
||||
The `with_limit_and_skip` parameter.
|
||||
:meth:`~pymongo.cursor.Cursor.__len__` was deprecated in favor of
|
||||
calling :meth:`count` with `with_limit_and_skip` set to ``True``.
|
||||
"""
|
||||
self.__check_not_command_cursor('count')
|
||||
command = {"query": self.__spec, "fields": self.__fields}
|
||||
|
||||
command['read_preference'] = self.__read_preference
|
||||
command['tag_sets'] = self.__tag_sets
|
||||
command['secondary_acceptable_latency_ms'] = (
|
||||
self.__secondary_acceptable_latency_ms)
|
||||
command['slave_okay'] = self.__slave_okay
|
||||
use_master = not self.__slave_okay and not self.__read_preference
|
||||
command['_use_master'] = use_master
|
||||
|
||||
if with_limit_and_skip:
|
||||
if self.__limit:
|
||||
command["limit"] = self.__limit
|
||||
if self.__skip:
|
||||
command["skip"] = self.__skip
|
||||
|
||||
database = self.__collection.database
|
||||
r = database.command("count", self.__collection.name,
|
||||
allowable_errors=["ns missing"],
|
||||
uuid_subtype=self.__uuid_subtype,
|
||||
**command)
|
||||
if r.get("errmsg", "") == "ns missing":
|
||||
return 0
|
||||
return int(r["n"])
|
||||
|
||||
def distinct(self, key):
|
||||
"""Get a list of distinct values for `key` among all documents
|
||||
in the result set of this query.
|
||||
|
||||
Raises :class:`TypeError` if `key` is not an instance of
|
||||
:class:`basestring` (:class:`str` in python 3).
|
||||
|
||||
With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`
|
||||
or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`,
|
||||
if `read_preference` is
|
||||
not :attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or
|
||||
(deprecated) `slave_okay` is `True` the distinct command will be sent
|
||||
to a secondary or slave.
|
||||
|
||||
:Parameters:
|
||||
- `key`: name of key for which we want to get the distinct values
|
||||
|
||||
.. note:: Requires server version **>= 1.1.3+**
|
||||
|
||||
.. seealso:: :meth:`pymongo.collection.Collection.distinct`
|
||||
|
||||
.. versionadded:: 1.2
|
||||
"""
|
||||
self.__check_not_command_cursor('distinct')
|
||||
if not isinstance(key, str):
|
||||
raise TypeError("key must be an instance "
|
||||
"of %s" % (str.__name__,))
|
||||
|
||||
options = {"key": key}
|
||||
if self.__spec:
|
||||
options["query"] = self.__spec
|
||||
|
||||
options['read_preference'] = self.__read_preference
|
||||
options['tag_sets'] = self.__tag_sets
|
||||
options['secondary_acceptable_latency_ms'] = (
|
||||
self.__secondary_acceptable_latency_ms)
|
||||
options['slave_okay'] = self.__slave_okay
|
||||
use_master = not self.__slave_okay and not self.__read_preference
|
||||
options['_use_master'] = use_master
|
||||
|
||||
database = self.__collection.database
|
||||
return database.command("distinct",
|
||||
self.__collection.name,
|
||||
uuid_subtype=self.__uuid_subtype,
|
||||
**options)["values"]
|
||||
|
||||
def explain(self):
|
||||
"""Returns an explain plan record for this cursor.
|
||||
|
||||
.. mongodoc:: explain
|
||||
"""
|
||||
self.__check_not_command_cursor('explain')
|
||||
c = self.clone()
|
||||
c.__explain = True
|
||||
|
||||
# always use a hard limit for explains
|
||||
if c.__limit:
|
||||
c.__limit = -abs(c.__limit)
|
||||
return next(c)
|
||||
|
||||
def hint(self, index):
|
||||
"""Adds a 'hint', telling Mongo the proper index to use for the query.
|
||||
|
||||
Judicious use of hints can greatly improve query
|
||||
performance. When doing a query on multiple fields (at least
|
||||
one of which is indexed) pass the indexed field as a hint to
|
||||
the query. Hinting will not do anything if the corresponding
|
||||
index does not exist. Raises
|
||||
:class:`~pymongo.errors.InvalidOperation` if this cursor has
|
||||
already been used.
|
||||
|
||||
`index` should be an index as passed to
|
||||
:meth:`~pymongo.collection.Collection.create_index`
|
||||
(e.g. ``[('field', ASCENDING)]``). If `index`
|
||||
is ``None`` any existing hints for this query are cleared. The
|
||||
last hint applied to this cursor takes precedence over all
|
||||
others.
|
||||
|
||||
:Parameters:
|
||||
- `index`: index to hint on (as an index specifier)
|
||||
"""
|
||||
self.__check_okay_to_chain()
|
||||
if index is None:
|
||||
self.__hint = None
|
||||
return self
|
||||
|
||||
self.__hint = helpers._index_document(index)
|
||||
return self
|
||||
|
||||
def where(self, code):
|
||||
"""Adds a $where clause to this query.
|
||||
|
||||
The `code` argument must be an instance of :class:`basestring`
|
||||
(:class:`str` in python 3) or :class:`~bson.code.Code`
|
||||
containing a JavaScript expression. This expression will be
|
||||
evaluated for each document scanned. Only those documents
|
||||
for which the expression evaluates to *true* will be returned
|
||||
as results. The keyword *this* refers to the object currently
|
||||
being scanned.
|
||||
|
||||
Raises :class:`TypeError` if `code` is not an instance of
|
||||
:class:`basestring` (:class:`str` in python 3). Raises
|
||||
:class:`~pymongo.errors.InvalidOperation` if this
|
||||
:class:`Cursor` has already been used. Only the last call to
|
||||
:meth:`where` applied to a :class:`Cursor` has any effect.
|
||||
|
||||
:Parameters:
|
||||
- `code`: JavaScript expression to use as a filter
|
||||
"""
|
||||
self.__check_okay_to_chain()
|
||||
if not isinstance(code, Code):
|
||||
code = Code(code)
|
||||
|
||||
self.__spec["$where"] = code
|
||||
return self
|
||||
|
||||
def __send_message(self, message):
|
||||
"""Send a query or getmore message and handles the response.
|
||||
|
||||
If message is ``None`` this is an exhaust cursor, which reads
|
||||
the next result batch off the exhaust socket instead of
|
||||
sending getMore messages to the server.
|
||||
"""
|
||||
client = self.__collection.database.connection
|
||||
|
||||
if message:
|
||||
kwargs = {"_must_use_master": self.__must_use_master}
|
||||
kwargs["read_preference"] = self.__read_preference
|
||||
kwargs["tag_sets"] = self.__tag_sets
|
||||
kwargs["secondary_acceptable_latency_ms"] = (
|
||||
self.__secondary_acceptable_latency_ms)
|
||||
kwargs['exhaust'] = self.__exhaust
|
||||
if self.__connection_id is not None:
|
||||
kwargs["_connection_to_use"] = self.__connection_id
|
||||
kwargs.update(self.__kwargs)
|
||||
|
||||
try:
|
||||
res = client._send_message_with_response(message, **kwargs)
|
||||
self.__connection_id, (response, sock, pool) = res
|
||||
if self.__exhaust:
|
||||
self.__exhaust_mgr = _SocketManager(sock, pool)
|
||||
except AutoReconnect:
|
||||
# Don't try to send kill cursors on another socket
|
||||
# or to another server. It can cause a _pinValue
|
||||
# assertion on some server releases if we get here
|
||||
# due to a socket timeout.
|
||||
self.__killed = True
|
||||
raise
|
||||
else: # exhaust cursor - no getMore message
|
||||
response = client._exhaust_next(self.__exhaust_mgr.sock)
|
||||
|
||||
try:
|
||||
response = helpers._unpack_response(response, self.__id,
|
||||
self.__as_class,
|
||||
self.__tz_aware,
|
||||
self.__uuid_subtype)
|
||||
except AutoReconnect:
|
||||
# Don't send kill cursors to another server after a "not master"
|
||||
# error. It's completely pointless.
|
||||
self.__killed = True
|
||||
client.disconnect()
|
||||
raise
|
||||
self.__id = response["cursor_id"]
|
||||
|
||||
# starting from doesn't get set on getmore's for tailable cursors
|
||||
if not (self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]):
|
||||
assert response["starting_from"] == self.__retrieved, (
|
||||
"Result batch started from %s, expected %s" % (
|
||||
response['starting_from'], self.__retrieved))
|
||||
|
||||
self.__retrieved += response["number_returned"]
|
||||
self.__data = deque(response["data"])
|
||||
|
||||
if self.__limit and self.__id and self.__limit <= self.__retrieved:
|
||||
self.__die()
|
||||
|
||||
# Don't wait for garbage collection to call __del__, return the
|
||||
# socket to the pool now.
|
||||
if self.__exhaust and self.__id == 0:
|
||||
self.__exhaust_mgr.close()
|
||||
|
||||
def _refresh(self):
|
||||
"""Refreshes the cursor with more data from Mongo.
|
||||
|
||||
Returns the length of self.__data after refresh. Will exit early if
|
||||
self.__data is already non-empty. Raises OperationFailure when the
|
||||
cursor cannot be refreshed due to an error on the query.
|
||||
"""
|
||||
if len(self.__data) or self.__killed:
|
||||
return len(self.__data)
|
||||
|
||||
if self.__id is None: # Query
|
||||
ntoreturn = self.__batch_size
|
||||
if self.__limit:
|
||||
if self.__batch_size:
|
||||
ntoreturn = min(self.__limit, self.__batch_size)
|
||||
else:
|
||||
ntoreturn = self.__limit
|
||||
self.__send_message(
|
||||
message.query(self.__query_options(),
|
||||
self.__collection.full_name,
|
||||
self.__skip, ntoreturn,
|
||||
self.__query_spec(), self.__fields,
|
||||
self.__uuid_subtype))
|
||||
if not self.__id:
|
||||
self.__killed = True
|
||||
elif self.__id: # Get More
|
||||
if self.__limit:
|
||||
limit = self.__limit - self.__retrieved
|
||||
if self.__batch_size:
|
||||
limit = min(limit, self.__batch_size)
|
||||
else:
|
||||
limit = self.__batch_size
|
||||
|
||||
# Exhaust cursors don't send getMore messages.
|
||||
if self.__exhaust:
|
||||
self.__send_message(None)
|
||||
else:
|
||||
self.__send_message(
|
||||
message.get_more(self.__collection.full_name,
|
||||
limit, self.__id))
|
||||
|
||||
else: # Cursor id is zero nothing else to return
|
||||
self.__killed = True
|
||||
|
||||
return len(self.__data)
|
||||
|
||||
@property
|
||||
def alive(self):
|
||||
"""Does this cursor have the potential to return more data?
|
||||
|
||||
This is mostly useful with `tailable cursors
|
||||
<http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_
|
||||
since they will stop iterating even though they *may* return more
|
||||
results in the future.
|
||||
|
||||
.. versionadded:: 1.5
|
||||
"""
|
||||
return bool(len(self.__data) or (not self.__killed))
|
||||
|
||||
@property
|
||||
def cursor_id(self):
|
||||
"""Returns the id of the cursor
|
||||
|
||||
Useful if you need to manage cursor ids and want to handle killing
|
||||
cursors manually using
|
||||
:meth:`~pymongo.mongo_client.MongoClient.kill_cursors`
|
||||
|
||||
.. versionadded:: 2.2
|
||||
"""
|
||||
return self.__id
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
if self.__empty:
|
||||
raise StopIteration
|
||||
db = self.__collection.database
|
||||
if len(self.__data) or self._refresh():
|
||||
if self.__manipulate:
|
||||
return db._fix_outgoing(self.__data.popleft(),
|
||||
self.__collection)
|
||||
else:
|
||||
return self.__data.popleft()
|
||||
else:
|
||||
raise StopIteration
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.__die()
|
||||
|
||||
def __copy__(self):
|
||||
"""Support function for `copy.copy()`.
|
||||
|
||||
.. versionadded:: 2.4
|
||||
"""
|
||||
return self.__clone(deepcopy=False)
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
"""Support function for `copy.deepcopy()`.
|
||||
|
||||
.. versionadded:: 2.4
|
||||
"""
|
||||
return self.__clone(deepcopy=True)
|
||||
|
||||
def __deepcopy(self, x, memo=None):
|
||||
"""Deepcopy helper for the data dictionary or list.
|
||||
|
||||
Regular expressions cannot be deep copied but as they are immutable we
|
||||
don't have to copy them when cloning.
|
||||
"""
|
||||
if not hasattr(x, 'items'):
|
||||
y, is_list, iterator = [], True, enumerate(x)
|
||||
else:
|
||||
y, is_list, iterator = {}, False, iter(x.items())
|
||||
|
||||
if memo is None:
|
||||
memo = {}
|
||||
val_id = id(x)
|
||||
if val_id in memo:
|
||||
return memo.get(val_id)
|
||||
memo[val_id] = y
|
||||
|
||||
for key, value in iterator:
|
||||
if isinstance(value, (dict, list)) and not isinstance(value, SON):
|
||||
value = self.__deepcopy(value, memo)
|
||||
elif not isinstance(value, RE_TYPE):
|
||||
value = copy.deepcopy(value, memo)
|
||||
|
||||
if is_list:
|
||||
y.append(value)
|
||||
else:
|
||||
if not isinstance(key, RE_TYPE):
|
||||
key = copy.deepcopy(key, memo)
|
||||
y[key] = value
|
||||
return y
|
||||
93
asyncio_mongo/_pymongo/cursor_manager.py
Normal file
93
asyncio_mongo/_pymongo/cursor_manager.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# Copyright 2009-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""DEPRECATED - Different managers to handle when cursors are killed after
|
||||
they are closed.
|
||||
|
||||
New cursor managers should be defined as subclasses of CursorManager and can be
|
||||
installed on a connection by calling
|
||||
`pymongo.connection.Connection.set_cursor_manager`.
|
||||
|
||||
.. versionchanged:: 2.1+
|
||||
Deprecated.
|
||||
"""
|
||||
|
||||
import weakref
|
||||
|
||||
|
||||
class CursorManager(object):
|
||||
"""The default cursor manager.
|
||||
|
||||
This manager will kill cursors one at a time as they are closed.
|
||||
"""
|
||||
|
||||
def __init__(self, connection):
|
||||
"""Instantiate the manager.
|
||||
|
||||
:Parameters:
|
||||
- `connection`: a Mongo Connection
|
||||
"""
|
||||
self.__connection = weakref.ref(connection)
|
||||
|
||||
def close(self, cursor_id):
|
||||
"""Close a cursor by killing it immediately.
|
||||
|
||||
Raises TypeError if cursor_id is not an instance of (int, long).
|
||||
|
||||
:Parameters:
|
||||
- `cursor_id`: cursor id to close
|
||||
"""
|
||||
if not isinstance(cursor_id, int):
|
||||
raise TypeError("cursor_id must be an instance of (int, long)")
|
||||
|
||||
self.__connection().kill_cursors([cursor_id])
|
||||
|
||||
|
||||
class BatchCursorManager(CursorManager):
|
||||
"""A cursor manager that kills cursors in batches.
|
||||
"""
|
||||
|
||||
def __init__(self, connection):
|
||||
"""Instantiate the manager.
|
||||
|
||||
:Parameters:
|
||||
- `connection`: a Mongo Connection
|
||||
"""
|
||||
self.__dying_cursors = []
|
||||
self.__max_dying_cursors = 20
|
||||
self.__connection = weakref.ref(connection)
|
||||
|
||||
CursorManager.__init__(self, connection)
|
||||
|
||||
def __del__(self):
|
||||
"""Cleanup - be sure to kill any outstanding cursors.
|
||||
"""
|
||||
self.__connection().kill_cursors(self.__dying_cursors)
|
||||
|
||||
def close(self, cursor_id):
|
||||
"""Close a cursor by killing it in a batch.
|
||||
|
||||
Raises TypeError if cursor_id is not an instance of (int, long).
|
||||
|
||||
:Parameters:
|
||||
- `cursor_id`: cursor id to close
|
||||
"""
|
||||
if not isinstance(cursor_id, int):
|
||||
raise TypeError("cursor_id must be an instance of (int, long)")
|
||||
|
||||
self.__dying_cursors.append(cursor_id)
|
||||
|
||||
if len(self.__dying_cursors) > self.__max_dying_cursors:
|
||||
self.__connection().kill_cursors(self.__dying_cursors)
|
||||
self.__dying_cursors = []
|
||||
875
asyncio_mongo/_pymongo/database.py
Normal file
875
asyncio_mongo/_pymongo/database.py
Normal file
@@ -0,0 +1,875 @@
|
||||
# Copyright 2009-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Database level operations."""
|
||||
|
||||
from asyncio_mongo._bson.binary import OLD_UUID_SUBTYPE
|
||||
from asyncio_mongo._bson.code import Code
|
||||
from asyncio_mongo._bson.dbref import DBRef
|
||||
from asyncio_mongo._bson.son import SON
|
||||
from asyncio_mongo._pymongo import auth, common, helpers
|
||||
from asyncio_mongo._pymongo.collection import Collection
|
||||
from asyncio_mongo._pymongo.errors import (CollectionInvalid,
|
||||
InvalidName,
|
||||
OperationFailure)
|
||||
from asyncio_mongo._pymongo.son_manipulator import ObjectIdInjector
|
||||
from asyncio_mongo._pymongo import read_preferences as rp
|
||||
|
||||
|
||||
def _check_name(name):
|
||||
"""Check if a database name is valid.
|
||||
"""
|
||||
if not name:
|
||||
raise InvalidName("database name cannot be the empty string")
|
||||
|
||||
for invalid_char in [" ", ".", "$", "/", "\\", "\x00"]:
|
||||
if invalid_char in name:
|
||||
raise InvalidName("database names cannot contain the "
|
||||
"character %r" % invalid_char)
|
||||
|
||||
|
||||
class Database(common.BaseObject):
|
||||
"""A Mongo database.
|
||||
"""
|
||||
|
||||
def __init__(self, connection, name):
|
||||
"""Get a database by connection and name.
|
||||
|
||||
Raises :class:`TypeError` if `name` is not an instance of
|
||||
:class:`basestring` (:class:`str` in python 3). Raises
|
||||
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
|
||||
database name.
|
||||
|
||||
:Parameters:
|
||||
- `connection`: a client instance
|
||||
- `name`: database name
|
||||
|
||||
.. mongodoc:: databases
|
||||
"""
|
||||
super(Database,
|
||||
self).__init__(slave_okay=connection.slave_okay,
|
||||
read_preference=connection.read_preference,
|
||||
tag_sets=connection.tag_sets,
|
||||
secondary_acceptable_latency_ms=(
|
||||
connection.secondary_acceptable_latency_ms),
|
||||
safe=connection.safe,
|
||||
**connection.write_concern)
|
||||
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("name must be an instance "
|
||||
"of %s" % (str.__name__,))
|
||||
|
||||
if name != '$external':
|
||||
_check_name(name)
|
||||
|
||||
self.__name = str(name)
|
||||
self.__connection = connection
|
||||
|
||||
self.__incoming_manipulators = []
|
||||
self.__incoming_copying_manipulators = []
|
||||
self.__outgoing_manipulators = []
|
||||
self.__outgoing_copying_manipulators = []
|
||||
self.add_son_manipulator(ObjectIdInjector())
|
||||
|
||||
def add_son_manipulator(self, manipulator):
|
||||
"""Add a new son manipulator to this database.
|
||||
|
||||
Newly added manipulators will be applied before existing ones.
|
||||
|
||||
:Parameters:
|
||||
- `manipulator`: the manipulator to add
|
||||
"""
|
||||
def method_overwritten(instance, method):
|
||||
return getattr(instance, method) != \
|
||||
getattr(super(instance.__class__, instance), method)
|
||||
|
||||
if manipulator.will_copy():
|
||||
if method_overwritten(manipulator, "transform_incoming"):
|
||||
self.__incoming_copying_manipulators.insert(0, manipulator)
|
||||
if method_overwritten(manipulator, "transform_outgoing"):
|
||||
self.__outgoing_copying_manipulators.insert(0, manipulator)
|
||||
else:
|
||||
if method_overwritten(manipulator, "transform_incoming"):
|
||||
self.__incoming_manipulators.insert(0, manipulator)
|
||||
if method_overwritten(manipulator, "transform_outgoing"):
|
||||
self.__outgoing_manipulators.insert(0, manipulator)
|
||||
|
||||
@property
|
||||
def system_js(self):
|
||||
"""A :class:`SystemJS` helper for this :class:`Database`.
|
||||
|
||||
See the documentation for :class:`SystemJS` for more details.
|
||||
|
||||
.. versionadded:: 1.5
|
||||
"""
|
||||
return SystemJS(self)
|
||||
|
||||
@property
|
||||
def connection(self):
|
||||
"""The client instance for this :class:`Database`.
|
||||
|
||||
.. versionchanged:: 1.3
|
||||
``connection`` is now a property rather than a method.
|
||||
"""
|
||||
return self.__connection
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""The name of this :class:`Database`.
|
||||
|
||||
.. versionchanged:: 1.3
|
||||
``name`` is now a property rather than a method.
|
||||
"""
|
||||
return self.__name
|
||||
|
||||
@property
|
||||
def incoming_manipulators(self):
|
||||
"""List all incoming SON manipulators
|
||||
installed on this instance.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
return [manipulator.__class__.__name__
|
||||
for manipulator in self.__incoming_manipulators]
|
||||
|
||||
@property
|
||||
def incoming_copying_manipulators(self):
|
||||
"""List all incoming SON copying manipulators
|
||||
installed on this instance.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
return [manipulator.__class__.__name__
|
||||
for manipulator in self.__incoming_copying_manipulators]
|
||||
|
||||
@property
|
||||
def outgoing_manipulators(self):
|
||||
"""List all outgoing SON manipulators
|
||||
installed on this instance.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
return [manipulator.__class__.__name__
|
||||
for manipulator in self.__outgoing_manipulators]
|
||||
|
||||
@property
|
||||
def outgoing_copying_manipulators(self):
|
||||
"""List all outgoing SON copying manipulators
|
||||
installed on this instance.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
return [manipulator.__class__.__name__
|
||||
for manipulator in self.__outgoing_copying_manipulators]
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, Database):
|
||||
us = (self.__connection, self.__name)
|
||||
them = (other.__connection, other.__name)
|
||||
return us == them
|
||||
return NotImplemented
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def __repr__(self):
|
||||
return "Database(%r, %r)" % (self.__connection, self.__name)
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Get a collection of this database by name.
|
||||
|
||||
Raises InvalidName if an invalid collection name is used.
|
||||
|
||||
:Parameters:
|
||||
- `name`: the name of the collection to get
|
||||
"""
|
||||
return Collection(self, name)
|
||||
|
||||
def __getitem__(self, name):
|
||||
"""Get a collection of this database by name.
|
||||
|
||||
Raises InvalidName if an invalid collection name is used.
|
||||
|
||||
:Parameters:
|
||||
- `name`: the name of the collection to get
|
||||
"""
|
||||
return self.__getattr__(name)
|
||||
|
||||
def create_collection(self, name, **kwargs):
|
||||
"""Create a new :class:`~pymongo.collection.Collection` in this
|
||||
database.
|
||||
|
||||
Normally collection creation is automatic. This method should
|
||||
only be used to specify options on
|
||||
creation. :class:`~pymongo.errors.CollectionInvalid` will be
|
||||
raised if the collection already exists.
|
||||
|
||||
Options should be passed as keyword arguments to this
|
||||
method. Any of the following options are valid:
|
||||
|
||||
- "size": desired initial size for the collection (in
|
||||
bytes). For capped collections this size is the max
|
||||
size of the collection.
|
||||
- "capped": if True, this is a capped collection
|
||||
- "max": maximum number of objects if capped (optional)
|
||||
|
||||
:Parameters:
|
||||
- `name`: the name of the collection to create
|
||||
- `**kwargs` (optional): additional keyword arguments will
|
||||
be passed as options for the create collection command
|
||||
|
||||
.. versionchanged:: 2.2
|
||||
Removed deprecated argument: options
|
||||
|
||||
.. versionchanged:: 1.5
|
||||
deprecating `options` in favor of kwargs
|
||||
"""
|
||||
opts = {"create": True}
|
||||
opts.update(kwargs)
|
||||
|
||||
if name in self.collection_names():
|
||||
raise CollectionInvalid("collection %s already exists" % name)
|
||||
|
||||
return Collection(self, name, **opts)
|
||||
|
||||
def _fix_incoming(self, son, collection):
|
||||
"""Apply manipulators to an incoming SON object before it gets stored.
|
||||
|
||||
:Parameters:
|
||||
- `son`: the son object going into the database
|
||||
- `collection`: the collection the son object is being saved in
|
||||
"""
|
||||
for manipulator in self.__incoming_manipulators:
|
||||
son = manipulator.transform_incoming(son, collection)
|
||||
for manipulator in self.__incoming_copying_manipulators:
|
||||
son = manipulator.transform_incoming(son, collection)
|
||||
return son
|
||||
|
||||
def _fix_outgoing(self, son, collection):
|
||||
"""Apply manipulators to a SON object as it comes out of the database.
|
||||
|
||||
:Parameters:
|
||||
- `son`: the son object coming out of the database
|
||||
- `collection`: the collection the son object was saved in
|
||||
"""
|
||||
for manipulator in reversed(self.__outgoing_manipulators):
|
||||
son = manipulator.transform_outgoing(son, collection)
|
||||
for manipulator in reversed(self.__outgoing_copying_manipulators):
|
||||
son = manipulator.transform_outgoing(son, collection)
|
||||
return son
|
||||
|
||||
def command(self, command, value=1,
|
||||
check=True, allowable_errors=[],
|
||||
uuid_subtype=OLD_UUID_SUBTYPE, **kwargs):
|
||||
"""Issue a MongoDB command.
|
||||
|
||||
Send command `command` to the database and return the
|
||||
response. If `command` is an instance of :class:`basestring`
|
||||
(:class:`str` in python 3) then the command {`command`: `value`}
|
||||
will be sent. Otherwise, `command` must be an instance of
|
||||
:class:`dict` and will be sent as is.
|
||||
|
||||
Any additional keyword arguments will be added to the final
|
||||
command document before it is sent.
|
||||
|
||||
For example, a command like ``{buildinfo: 1}`` can be sent
|
||||
using:
|
||||
|
||||
>>> db.command("buildinfo")
|
||||
|
||||
For a command where the value matters, like ``{collstats:
|
||||
collection_name}`` we can do:
|
||||
|
||||
>>> db.command("collstats", collection_name)
|
||||
|
||||
For commands that take additional arguments we can use
|
||||
kwargs. So ``{filemd5: object_id, root: file_root}`` becomes:
|
||||
|
||||
>>> db.command("filemd5", object_id, root=file_root)
|
||||
|
||||
:Parameters:
|
||||
- `command`: document representing the command to be issued,
|
||||
or the name of the command (for simple commands only).
|
||||
|
||||
.. note:: the order of keys in the `command` document is
|
||||
significant (the "verb" must come first), so commands
|
||||
which require multiple keys (e.g. `findandmodify`)
|
||||
should use an instance of :class:`~bson.son.SON` or
|
||||
a string and kwargs instead of a Python `dict`.
|
||||
|
||||
- `value` (optional): value to use for the command verb when
|
||||
`command` is passed as a string
|
||||
- `check` (optional): check the response for errors, raising
|
||||
:class:`~pymongo.errors.OperationFailure` if there are any
|
||||
- `allowable_errors`: if `check` is ``True``, error messages
|
||||
in this list will be ignored by error-checking
|
||||
- `uuid_subtype` (optional): The BSON binary subtype to use
|
||||
for a UUID used in this command.
|
||||
- `read_preference`: The read preference for this connection.
|
||||
See :class:`~pymongo.read_preferences.ReadPreference` for available
|
||||
options.
|
||||
- `tag_sets`: Read from replica-set members with these tags.
|
||||
To specify a priority-order for tag sets, provide a list of
|
||||
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
|
||||
set, ``{}``, means "read from any member that matches the mode,
|
||||
ignoring tags." ReplicaSetConnection tries each set of tags in turn
|
||||
until it finds a set of tags with at least one matching member.
|
||||
- `secondary_acceptable_latency_ms`: Any replica-set member whose
|
||||
ping time is within secondary_acceptable_latency_ms of the nearest
|
||||
member may accept reads. Default 15 milliseconds.
|
||||
**Ignored by mongos** and must be configured on the command line.
|
||||
See the localThreshold_ option for more information.
|
||||
- `**kwargs` (optional): additional keyword arguments will
|
||||
be added to the command document before it is sent
|
||||
|
||||
.. note:: ``command`` ignores the ``network_timeout`` parameter.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Added `tag_sets` and `secondary_acceptable_latency_ms` options.
|
||||
.. versionchanged:: 2.2
|
||||
Added support for `as_class` - the class you want to use for
|
||||
the resulting documents
|
||||
.. versionchanged:: 1.6
|
||||
Added the `value` argument for string commands, and keyword
|
||||
arguments for additional command options.
|
||||
.. versionchanged:: 1.5
|
||||
`command` can be a string in addition to a full document.
|
||||
.. versionadded:: 1.4
|
||||
|
||||
.. mongodoc:: commands
|
||||
.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold
|
||||
"""
|
||||
|
||||
if isinstance(command, str):
|
||||
command = SON([(command, value)])
|
||||
|
||||
command_name = list(command.keys())[0].lower()
|
||||
must_use_master = kwargs.pop('_use_master', False)
|
||||
if command_name not in rp.secondary_ok_commands:
|
||||
must_use_master = True
|
||||
|
||||
# Special-case: mapreduce can go to secondaries only if inline
|
||||
if command_name == 'mapreduce':
|
||||
out = command.get('out') or kwargs.get('out')
|
||||
if not isinstance(out, dict) or not out.get('inline'):
|
||||
must_use_master = True
|
||||
|
||||
extra_opts = {
|
||||
'as_class': kwargs.pop('as_class', None),
|
||||
'slave_okay': kwargs.pop('slave_okay', self.slave_okay),
|
||||
'_must_use_master': must_use_master,
|
||||
'_uuid_subtype': uuid_subtype
|
||||
}
|
||||
|
||||
extra_opts['read_preference'] = kwargs.pop(
|
||||
'read_preference',
|
||||
self.read_preference)
|
||||
extra_opts['tag_sets'] = kwargs.pop(
|
||||
'tag_sets',
|
||||
self.tag_sets)
|
||||
extra_opts['secondary_acceptable_latency_ms'] = kwargs.pop(
|
||||
'secondary_acceptable_latency_ms',
|
||||
self.secondary_acceptable_latency_ms)
|
||||
|
||||
fields = kwargs.get('fields')
|
||||
if fields is not None and not isinstance(fields, dict):
|
||||
kwargs['fields'] = helpers._fields_list_to_dict(fields)
|
||||
|
||||
command.update(kwargs)
|
||||
|
||||
result = self["$cmd"].find_one(command, **extra_opts)
|
||||
|
||||
if check:
|
||||
msg = "command %s failed: %%s" % repr(command).replace("%", "%%")
|
||||
helpers._check_command_response(result, self.connection.disconnect,
|
||||
msg, allowable_errors)
|
||||
|
||||
return result
|
||||
|
||||
def collection_names(self, include_system_collections=True):
|
||||
"""Get a list of all the collection names in this database.
|
||||
|
||||
:Parameters:
|
||||
- `include_system_collections` (optional): if ``False`` list
|
||||
will not include system collections (e.g ``system.indexes``)
|
||||
"""
|
||||
results = self["system.namespaces"].find(_must_use_master=True)
|
||||
names = [r["name"] for r in results]
|
||||
names = [n[len(self.__name) + 1:] for n in names
|
||||
if n.startswith(self.__name + ".") and "$" not in n]
|
||||
if not include_system_collections:
|
||||
names = [n for n in names if not n.startswith("system.")]
|
||||
return names
|
||||
|
||||
def drop_collection(self, name_or_collection):
|
||||
"""Drop a collection.
|
||||
|
||||
:Parameters:
|
||||
- `name_or_collection`: the name of a collection to drop or the
|
||||
collection object itself
|
||||
"""
|
||||
name = name_or_collection
|
||||
if isinstance(name, Collection):
|
||||
name = name.name
|
||||
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("name_or_collection must be an instance of "
|
||||
"%s or Collection" % (str.__name__,))
|
||||
|
||||
self.__connection._purge_index(self.__name, name)
|
||||
|
||||
self.command("drop", str(name), allowable_errors=["ns not found"])
|
||||
|
||||
def validate_collection(self, name_or_collection,
|
||||
scandata=False, full=False):
|
||||
"""Validate a collection.
|
||||
|
||||
Returns a dict of validation info. Raises CollectionInvalid if
|
||||
validation fails.
|
||||
|
||||
With MongoDB < 1.9 the result dict will include a `result` key
|
||||
with a string value that represents the validation results. With
|
||||
MongoDB >= 1.9 the `result` key no longer exists and the results
|
||||
are split into individual fields in the result dict.
|
||||
|
||||
:Parameters:
|
||||
- `name_or_collection`: A Collection object or the name of a
|
||||
collection to validate.
|
||||
- `scandata`: Do extra checks beyond checking the overall
|
||||
structure of the collection.
|
||||
- `full`: Have the server do a more thorough scan of the
|
||||
collection. Use with `scandata` for a thorough scan
|
||||
of the structure of the collection and the individual
|
||||
documents. Ignored in MongoDB versions before 1.9.
|
||||
|
||||
.. versionchanged:: 1.11
|
||||
validate_collection previously returned a string.
|
||||
.. versionadded:: 1.11
|
||||
Added `scandata` and `full` options.
|
||||
"""
|
||||
name = name_or_collection
|
||||
if isinstance(name, Collection):
|
||||
name = name.name
|
||||
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("name_or_collection must be an instance of "
|
||||
"%s or Collection" % (str.__name__,))
|
||||
|
||||
result = self.command("validate", str(name),
|
||||
scandata=scandata, full=full)
|
||||
|
||||
valid = True
|
||||
# Pre 1.9 results
|
||||
if "result" in result:
|
||||
info = result["result"]
|
||||
if info.find("exception") != -1 or info.find("corrupt") != -1:
|
||||
raise CollectionInvalid("%s invalid: %s" % (name, info))
|
||||
# Sharded results
|
||||
elif "raw" in result:
|
||||
for _, res in result["raw"].items():
|
||||
if "result" in res:
|
||||
info = res["result"]
|
||||
if (info.find("exception") != -1 or
|
||||
info.find("corrupt") != -1):
|
||||
raise CollectionInvalid("%s invalid: "
|
||||
"%s" % (name, info))
|
||||
elif not res.get("valid", False):
|
||||
valid = False
|
||||
break
|
||||
# Post 1.9 non-sharded results.
|
||||
elif not result.get("valid", False):
|
||||
valid = False
|
||||
|
||||
if not valid:
|
||||
raise CollectionInvalid("%s invalid: %r" % (name, result))
|
||||
|
||||
return result
|
||||
|
||||
def current_op(self, include_all=False):
|
||||
"""Get information on operations currently running.
|
||||
|
||||
:Parameters:
|
||||
- `include_all` (optional): if ``True`` also list currently
|
||||
idle operations in the result
|
||||
"""
|
||||
if include_all:
|
||||
return self['$cmd.sys.inprog'].find_one({"$all": True})
|
||||
else:
|
||||
return self['$cmd.sys.inprog'].find_one()
|
||||
|
||||
def profiling_level(self):
|
||||
"""Get the database's current profiling level.
|
||||
|
||||
Returns one of (:data:`~pymongo.OFF`,
|
||||
:data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`).
|
||||
|
||||
.. mongodoc:: profiling
|
||||
"""
|
||||
result = self.command("profile", -1)
|
||||
|
||||
assert result["was"] >= 0 and result["was"] <= 2
|
||||
return result["was"]
|
||||
|
||||
def set_profiling_level(self, level, slow_ms=None):
|
||||
"""Set the database's profiling level.
|
||||
|
||||
:Parameters:
|
||||
- `level`: Specifies a profiling level, see list of possible values
|
||||
below.
|
||||
- `slow_ms`: Optionally modify the threshold for the profile to
|
||||
consider a query or operation. Even if the profiler is off queries
|
||||
slower than the `slow_ms` level will get written to the logs.
|
||||
|
||||
Possible `level` values:
|
||||
|
||||
+----------------------------+------------------------------------+
|
||||
| Level | Setting |
|
||||
+============================+====================================+
|
||||
| :data:`~pymongo.OFF` | Off. No profiling. |
|
||||
+----------------------------+------------------------------------+
|
||||
| :data:`~pymongo.SLOW_ONLY` | On. Only includes slow operations. |
|
||||
+----------------------------+------------------------------------+
|
||||
| :data:`~pymongo.ALL` | On. Includes all operations. |
|
||||
+----------------------------+------------------------------------+
|
||||
|
||||
Raises :class:`ValueError` if level is not one of
|
||||
(:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`,
|
||||
:data:`~pymongo.ALL`).
|
||||
|
||||
.. mongodoc:: profiling
|
||||
"""
|
||||
if not isinstance(level, int) or level < 0 or level > 2:
|
||||
raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)")
|
||||
|
||||
if slow_ms is not None and not isinstance(slow_ms, int):
|
||||
raise TypeError("slow_ms must be an integer")
|
||||
|
||||
if slow_ms is not None:
|
||||
self.command("profile", level, slowms=slow_ms)
|
||||
else:
|
||||
self.command("profile", level)
|
||||
|
||||
def profiling_info(self):
|
||||
"""Returns a list containing current profiling information.
|
||||
|
||||
.. mongodoc:: profiling
|
||||
"""
|
||||
return list(self["system.profile"].find())
|
||||
|
||||
def error(self):
|
||||
"""Get a database error if one occured on the last operation.
|
||||
|
||||
Return None if the last operation was error-free. Otherwise return the
|
||||
error that occurred.
|
||||
"""
|
||||
error = self.command("getlasterror")
|
||||
error_msg = error.get("err", "")
|
||||
if error_msg is None:
|
||||
return None
|
||||
if error_msg.startswith("not master"):
|
||||
self.__connection.disconnect()
|
||||
return error
|
||||
|
||||
def last_status(self):
|
||||
"""Get status information from the last operation.
|
||||
|
||||
Returns a SON object with status information.
|
||||
"""
|
||||
return self.command("getlasterror")
|
||||
|
||||
def previous_error(self):
|
||||
"""Get the most recent error to have occurred on this database.
|
||||
|
||||
Only returns errors that have occurred since the last call to
|
||||
`Database.reset_error_history`. Returns None if no such errors have
|
||||
occurred.
|
||||
"""
|
||||
error = self.command("getpreverror")
|
||||
if error.get("err", 0) is None:
|
||||
return None
|
||||
return error
|
||||
|
||||
def reset_error_history(self):
|
||||
"""Reset the error history of this database.
|
||||
|
||||
Calls to `Database.previous_error` will only return errors that have
|
||||
occurred since the most recent call to this method.
|
||||
"""
|
||||
self.command("reseterror")
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
raise TypeError("'Database' object is not iterable")
|
||||
|
||||
def add_user(self, name, password=None, read_only=None, **kwargs):
|
||||
"""Create user `name` with password `password`.
|
||||
|
||||
Add a new user with permissions for this :class:`Database`.
|
||||
|
||||
.. note:: Will change the password if user `name` already exists.
|
||||
|
||||
:Parameters:
|
||||
- `name`: the name of the user to create
|
||||
- `password` (optional): the password of the user to create. Can not
|
||||
be used with the ``userSource`` argument.
|
||||
- `read_only` (optional): if ``True`` the user will be read only
|
||||
- `**kwargs` (optional): optional fields for the user document
|
||||
(e.g. ``userSource``, ``otherDBRoles``, or ``roles``). See
|
||||
`<http://docs.mongodb.org/manual/reference/privilege-documents>`_
|
||||
for more information.
|
||||
|
||||
.. note:: The use of optional keyword arguments like ``userSource``,
|
||||
``otherDBRoles``, or ``roles`` requires MongoDB >= 2.4.0
|
||||
|
||||
.. versionchanged:: 2.5
|
||||
Added kwargs support for optional fields introduced in MongoDB 2.4
|
||||
|
||||
.. versionchanged:: 2.2
|
||||
Added support for read only users
|
||||
|
||||
.. versionadded:: 1.4
|
||||
"""
|
||||
|
||||
user = self.system.users.find_one({"user": name}) or {"user": name}
|
||||
if password is not None:
|
||||
user["pwd"] = auth._password_digest(name, password)
|
||||
if read_only is not None:
|
||||
user["readOnly"] = common.validate_boolean('read_only', read_only)
|
||||
user.update(kwargs)
|
||||
|
||||
try:
|
||||
self.system.users.save(user, **self._get_wc_override())
|
||||
except OperationFailure as e:
|
||||
# First admin user add fails gle in MongoDB >= 2.1.2
|
||||
# See SERVER-4225 for more information.
|
||||
if 'login' in str(e):
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
def remove_user(self, name):
|
||||
"""Remove user `name` from this :class:`Database`.
|
||||
|
||||
User `name` will no longer have permissions to access this
|
||||
:class:`Database`.
|
||||
|
||||
:Parameters:
|
||||
- `name`: the name of the user to remove
|
||||
|
||||
.. versionadded:: 1.4
|
||||
"""
|
||||
self.system.users.remove({"user": name}, **self._get_wc_override())
|
||||
|
||||
def authenticate(self, name, password=None,
|
||||
source=None, mechanism='MONGODB-CR', **kwargs):
|
||||
"""Authenticate to use this database.
|
||||
|
||||
Authentication lasts for the life of the underlying client
|
||||
instance, or until :meth:`logout` is called.
|
||||
|
||||
Raises :class:`TypeError` if (required) `name`, (optional) `password`,
|
||||
or (optional) `source` is not an instance of :class:`basestring`
|
||||
(:class:`str` in python 3).
|
||||
|
||||
.. note::
|
||||
- This method authenticates the current connection, and
|
||||
will also cause all new :class:`~socket.socket` connections
|
||||
in the underlying client instance to be authenticated automatically.
|
||||
|
||||
- Authenticating more than once on the same database with different
|
||||
credentials is not supported. You must call :meth:`logout` before
|
||||
authenticating with new credentials.
|
||||
|
||||
- When sharing a client instance between multiple threads, all
|
||||
threads will share the authentication. If you need different
|
||||
authentication profiles for different purposes you must use
|
||||
distinct client instances.
|
||||
|
||||
- To get authentication to apply immediately to all
|
||||
existing sockets you may need to reset this client instance's
|
||||
sockets using :meth:`~pymongo.mongo_client.MongoClient.disconnect`.
|
||||
|
||||
:Parameters:
|
||||
- `name`: the name of the user to authenticate.
|
||||
- `password` (optional): the password of the user to authenticate.
|
||||
Not used with GSSAPI or MONGODB-X509 authentication.
|
||||
- `source` (optional): the database to authenticate on. If not
|
||||
specified the current database is used.
|
||||
- `mechanism` (optional): See
|
||||
:data:`~pymongo.auth.MECHANISMS` for options.
|
||||
Defaults to MONGODB-CR (MongoDB Challenge Response protocol)
|
||||
- `gssapiServiceName` (optional): Used with the GSSAPI mechanism
|
||||
to specify the service name portion of the service principal name.
|
||||
Defaults to 'mongodb'.
|
||||
|
||||
.. versionchanged:: 2.5
|
||||
Added the `source` and `mechanism` parameters. :meth:`authenticate`
|
||||
now raises a subclass of :class:`~pymongo.errors.PyMongoError` if
|
||||
authentication fails due to invalid credentials or configuration
|
||||
issues.
|
||||
|
||||
.. mongodoc:: authenticate
|
||||
"""
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("name must be an instance "
|
||||
"of %s" % (str.__name__,))
|
||||
if password is not None and not isinstance(password, str):
|
||||
raise TypeError("password must be an instance "
|
||||
"of %s" % (str.__name__,))
|
||||
if source is not None and not isinstance(source, str):
|
||||
raise TypeError("source must be an instance "
|
||||
"of %s" % (str.__name__,))
|
||||
common.validate_auth_mechanism('mechanism', mechanism)
|
||||
|
||||
validated_options = {}
|
||||
for option, value in kwargs.items():
|
||||
normalized, val = common.validate_auth_option(option, value)
|
||||
validated_options[normalized] = val
|
||||
|
||||
credentials = auth._build_credentials_tuple(mechanism,
|
||||
source or self.name, str(name),
|
||||
password and str(password) or None,
|
||||
validated_options)
|
||||
self.connection._cache_credentials(self.name, credentials)
|
||||
return True
|
||||
|
||||
def logout(self):
|
||||
"""Deauthorize use of this database for this client instance.
|
||||
|
||||
.. note:: Other databases may still be authenticated, and other
|
||||
existing :class:`~socket.socket` connections may remain
|
||||
authenticated for this database unless you reset all sockets
|
||||
with :meth:`~pymongo.mongo_client.MongoClient.disconnect`.
|
||||
"""
|
||||
# Sockets will be deauthenticated as they are used.
|
||||
self.connection._purge_credentials(self.name)
|
||||
|
||||
def dereference(self, dbref):
|
||||
"""Dereference a :class:`~bson.dbref.DBRef`, getting the
|
||||
document it points to.
|
||||
|
||||
Raises :class:`TypeError` if `dbref` is not an instance of
|
||||
:class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if
|
||||
the reference does not point to a valid document. Raises
|
||||
:class:`ValueError` if `dbref` has a database specified that
|
||||
is different from the current database.
|
||||
|
||||
:Parameters:
|
||||
- `dbref`: the reference
|
||||
"""
|
||||
if not isinstance(dbref, DBRef):
|
||||
raise TypeError("cannot dereference a %s" % type(dbref))
|
||||
if dbref.database is not None and dbref.database != self.__name:
|
||||
raise ValueError("trying to dereference a DBRef that points to "
|
||||
"another database (%r not %r)" % (dbref.database,
|
||||
self.__name))
|
||||
return self[dbref.collection].find_one({"_id": dbref.id})
|
||||
|
||||
def eval(self, code, *args):
|
||||
"""Evaluate a JavaScript expression in MongoDB.
|
||||
|
||||
Useful if you need to touch a lot of data lightly; in such a
|
||||
scenario the network transfer of the data could be a
|
||||
bottleneck. The `code` argument must be a JavaScript
|
||||
function. Additional positional arguments will be passed to
|
||||
that function when it is run on the server.
|
||||
|
||||
Raises :class:`TypeError` if `code` is not an instance of
|
||||
:class:`basestring` (:class:`str` in python 3) or `Code`.
|
||||
Raises :class:`~pymongo.errors.OperationFailure` if the eval
|
||||
fails. Returns the result of the evaluation.
|
||||
|
||||
:Parameters:
|
||||
- `code`: string representation of JavaScript code to be
|
||||
evaluated
|
||||
- `args` (optional): additional positional arguments are
|
||||
passed to the `code` being evaluated
|
||||
"""
|
||||
if not isinstance(code, Code):
|
||||
code = Code(code)
|
||||
|
||||
result = self.command("$eval", code, args=args)
|
||||
return result.get("retval", None)
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
"""This is only here so that some API misusages are easier to debug.
|
||||
"""
|
||||
raise TypeError("'Database' object is not callable. If you meant to "
|
||||
"call the '%s' method on a '%s' object it is "
|
||||
"failing because no such method exists." % (
|
||||
self.__name, self.__connection.__class__.__name__))
|
||||
|
||||
|
||||
class SystemJS(object):
|
||||
"""Helper class for dealing with stored JavaScript.
|
||||
"""
|
||||
|
||||
def __init__(self, database):
|
||||
"""Get a system js helper for the database `database`.
|
||||
|
||||
An instance of :class:`SystemJS` can be created with an instance
|
||||
of :class:`Database` through :attr:`Database.system_js`,
|
||||
manual instantiation of this class should not be necessary.
|
||||
|
||||
:class:`SystemJS` instances allow for easy manipulation and
|
||||
access to server-side JavaScript:
|
||||
|
||||
.. doctest::
|
||||
|
||||
>>> db.system_js.add1 = "function (x) { return x + 1; }"
|
||||
>>> db.system.js.find({"_id": "add1"}).count()
|
||||
1
|
||||
>>> db.system_js.add1(5)
|
||||
6.0
|
||||
>>> del db.system_js.add1
|
||||
>>> db.system.js.find({"_id": "add1"}).count()
|
||||
0
|
||||
|
||||
.. note:: Requires server version **>= 1.1.1**
|
||||
|
||||
.. versionadded:: 1.5
|
||||
"""
|
||||
# can't just assign it since we've overridden __setattr__
|
||||
object.__setattr__(self, "_db", database)
|
||||
|
||||
def __setattr__(self, name, code):
|
||||
self._db.system.js.save({"_id": name, "value": Code(code)},
|
||||
**self._db._get_wc_override())
|
||||
|
||||
def __setitem__(self, name, code):
|
||||
self.__setattr__(name, code)
|
||||
|
||||
def __delattr__(self, name):
|
||||
self._db.system.js.remove({"_id": name}, **self._db._get_wc_override())
|
||||
|
||||
def __delitem__(self, name):
|
||||
self.__delattr__(name)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return lambda *args: self._db.eval(Code("function() { "
|
||||
"return this[name].apply("
|
||||
"this, arguments); }",
|
||||
scope={'name': name}), *args)
|
||||
|
||||
def __getitem__(self, name):
|
||||
return self.__getattr__(name)
|
||||
|
||||
def list(self):
|
||||
"""Get a list of the names of the functions stored in this database.
|
||||
|
||||
.. versionadded:: 1.9
|
||||
"""
|
||||
return [x["_id"] for x in self._db.system.js.find(fields=["_id"])]
|
||||
121
asyncio_mongo/_pymongo/errors.py
Normal file
121
asyncio_mongo/_pymongo/errors.py
Normal file
@@ -0,0 +1,121 @@
|
||||
# Copyright 2009-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Exceptions raised by PyMongo."""
|
||||
|
||||
from asyncio_mongo._bson.errors import *
|
||||
|
||||
try:
|
||||
from ssl import CertificateError
|
||||
except ImportError:
|
||||
from asyncio_mongo._pymongo.ssl_match_hostname import CertificateError
|
||||
|
||||
|
||||
class PyMongoError(Exception):
|
||||
"""Base class for all PyMongo exceptions.
|
||||
|
||||
.. versionadded:: 1.4
|
||||
"""
|
||||
|
||||
|
||||
class ConnectionFailure(PyMongoError):
|
||||
"""Raised when a connection to the database cannot be made or is lost.
|
||||
"""
|
||||
|
||||
|
||||
class AutoReconnect(ConnectionFailure):
|
||||
"""Raised when a connection to the database is lost and an attempt to
|
||||
auto-reconnect will be made.
|
||||
|
||||
In order to auto-reconnect you must handle this exception, recognizing that
|
||||
the operation which caused it has not necessarily succeeded. Future
|
||||
operations will attempt to open a new connection to the database (and
|
||||
will continue to raise this exception until the first successful
|
||||
connection is made).
|
||||
"""
|
||||
def __init__(self, message='', errors=None):
|
||||
self.errors = errors or []
|
||||
ConnectionFailure.__init__(self, message)
|
||||
|
||||
|
||||
class ConfigurationError(PyMongoError):
|
||||
"""Raised when something is incorrectly configured.
|
||||
"""
|
||||
|
||||
|
||||
class OperationFailure(PyMongoError):
|
||||
"""Raised when a database operation fails.
|
||||
|
||||
.. versionadded:: 1.8
|
||||
The :attr:`code` attribute.
|
||||
"""
|
||||
|
||||
def __init__(self, error, code=None):
|
||||
self.code = code
|
||||
PyMongoError.__init__(self, error)
|
||||
|
||||
|
||||
class TimeoutError(OperationFailure):
|
||||
"""Raised when a database operation times out.
|
||||
|
||||
.. versionadded:: 1.8
|
||||
"""
|
||||
|
||||
|
||||
class DuplicateKeyError(OperationFailure):
|
||||
"""Raised when a safe insert or update fails due to a duplicate key error.
|
||||
|
||||
.. note:: Requires server version **>= 1.3.0**
|
||||
|
||||
.. versionadded:: 1.4
|
||||
"""
|
||||
|
||||
|
||||
class InvalidOperation(PyMongoError):
|
||||
"""Raised when a client attempts to perform an invalid operation.
|
||||
"""
|
||||
|
||||
|
||||
class InvalidName(PyMongoError):
|
||||
"""Raised when an invalid name is used.
|
||||
"""
|
||||
|
||||
|
||||
class CollectionInvalid(PyMongoError):
|
||||
"""Raised when collection validation fails.
|
||||
"""
|
||||
|
||||
|
||||
class InvalidURI(ConfigurationError):
|
||||
"""Raised when trying to parse an invalid mongodb URI.
|
||||
|
||||
.. versionadded:: 1.5
|
||||
"""
|
||||
|
||||
|
||||
class UnsupportedOption(ConfigurationError):
|
||||
"""Exception for unsupported options.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
|
||||
|
||||
class ExceededMaxWaiters(Exception):
|
||||
"""Raised when a thread tries to get a connection from a pool and
|
||||
``max_pool_size * waitQueueMultiple`` threads are already waiting.
|
||||
|
||||
.. versionadded:: 2.6
|
||||
"""
|
||||
pass
|
||||
|
||||
174
asyncio_mongo/_pymongo/helpers.py
Normal file
174
asyncio_mongo/_pymongo/helpers.py
Normal file
@@ -0,0 +1,174 @@
|
||||
# Copyright 2009-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Bits and pieces used by the driver that don't really fit elsewhere."""
|
||||
|
||||
import random
|
||||
import struct
|
||||
|
||||
import asyncio_mongo._bson as bson
|
||||
import asyncio_mongo._pymongo
|
||||
|
||||
from asyncio_mongo._bson.binary import OLD_UUID_SUBTYPE
|
||||
from asyncio_mongo._bson.son import SON
|
||||
from asyncio_mongo._pymongo.errors import (AutoReconnect,
|
||||
DuplicateKeyError,
|
||||
OperationFailure,
|
||||
TimeoutError)
|
||||
|
||||
|
||||
def _index_list(key_or_list, direction=None):
|
||||
"""Helper to generate a list of (key, direction) pairs.
|
||||
|
||||
Takes such a list, or a single key, or a single key and direction.
|
||||
"""
|
||||
if direction is not None:
|
||||
return [(key_or_list, direction)]
|
||||
else:
|
||||
if isinstance(key_or_list, str):
|
||||
return [(key_or_list, pymongo.ASCENDING)]
|
||||
elif not isinstance(key_or_list, list):
|
||||
raise TypeError("if no direction is specified, "
|
||||
"key_or_list must be an instance of list")
|
||||
return key_or_list
|
||||
|
||||
|
||||
def _index_document(index_list):
|
||||
"""Helper to generate an index specifying document.
|
||||
|
||||
Takes a list of (key, direction) pairs.
|
||||
"""
|
||||
if isinstance(index_list, dict):
|
||||
raise TypeError("passing a dict to sort/create_index/hint is not "
|
||||
"allowed - use a list of tuples instead. did you "
|
||||
"mean %r?" % list(index_list.items()))
|
||||
elif not isinstance(index_list, list):
|
||||
raise TypeError("must use a list of (key, direction) pairs, "
|
||||
"not: " + repr(index_list))
|
||||
if not len(index_list):
|
||||
raise ValueError("key_or_list must not be the empty list")
|
||||
|
||||
index = SON()
|
||||
for (key, value) in index_list:
|
||||
if not isinstance(key, str):
|
||||
raise TypeError("first item in each key pair must be a string")
|
||||
if not isinstance(value, (str, int)):
|
||||
raise TypeError("second item in each key pair must be 1, -1, "
|
||||
"'2d', 'geoHaystack', or another valid MongoDB "
|
||||
"index specifier.")
|
||||
index[key] = value
|
||||
return index
|
||||
|
||||
|
||||
def _unpack_response(response, cursor_id=None, as_class=dict,
|
||||
tz_aware=False, uuid_subtype=OLD_UUID_SUBTYPE):
|
||||
"""Unpack a response from the database.
|
||||
|
||||
Check the response for errors and unpack, returning a dictionary
|
||||
containing the response data.
|
||||
|
||||
:Parameters:
|
||||
- `response`: byte string as returned from the database
|
||||
- `cursor_id` (optional): cursor_id we sent to get this response -
|
||||
used for raising an informative exception when we get cursor id not
|
||||
valid at server response
|
||||
- `as_class` (optional): class to use for resulting documents
|
||||
"""
|
||||
response_flag = struct.unpack("<i", response[:4])[0]
|
||||
if response_flag & 1:
|
||||
# Shouldn't get this response if we aren't doing a getMore
|
||||
assert cursor_id is not None
|
||||
|
||||
raise OperationFailure("cursor id '%s' not valid at server" %
|
||||
cursor_id)
|
||||
elif response_flag & 2:
|
||||
error_object = bson.BSON(response[20:]).decode()
|
||||
if error_object["$err"].startswith("not master"):
|
||||
raise AutoReconnect(error_object["$err"])
|
||||
raise OperationFailure("database error: %s" %
|
||||
error_object["$err"])
|
||||
|
||||
result = {}
|
||||
result["cursor_id"] = struct.unpack("<q", response[4:12])[0]
|
||||
result["starting_from"] = struct.unpack("<i", response[12:16])[0]
|
||||
result["number_returned"] = struct.unpack("<i", response[16:20])[0]
|
||||
result["data"] = bson.decode_all(response[20:],
|
||||
as_class, tz_aware, uuid_subtype)
|
||||
assert len(result["data"]) == result["number_returned"]
|
||||
return result
|
||||
|
||||
|
||||
def _check_command_response(response, reset, msg="%s", allowable_errors=[]):
|
||||
"""Check the response to a command for errors.
|
||||
"""
|
||||
if not response["ok"]:
|
||||
if "wtimeout" in response and response["wtimeout"]:
|
||||
raise TimeoutError(msg % response["errmsg"])
|
||||
|
||||
details = response
|
||||
# Mongos returns the error details in a 'raw' object
|
||||
# for some errors.
|
||||
if "raw" in response:
|
||||
for shard in response["raw"].values():
|
||||
if not shard.get("ok"):
|
||||
# Just grab the first error...
|
||||
details = shard
|
||||
break
|
||||
|
||||
errmsg = details["errmsg"]
|
||||
if not errmsg in allowable_errors:
|
||||
if (errmsg.startswith("not master")
|
||||
or errmsg.startswith("node is recovering")):
|
||||
if reset is not None:
|
||||
reset()
|
||||
raise AutoReconnect(errmsg)
|
||||
if errmsg == "db assertion failure":
|
||||
ex_msg = ("db assertion failure, assertion: '%s'" %
|
||||
details.get("assertion", ""))
|
||||
if "assertionCode" in details:
|
||||
ex_msg += (", assertionCode: %d" %
|
||||
(details["assertionCode"],))
|
||||
raise OperationFailure(ex_msg, details.get("assertionCode"))
|
||||
code = details.get("code")
|
||||
# findAndModify with upsert can raise duplicate key error
|
||||
if code in (11000, 11001, 12582):
|
||||
raise DuplicateKeyError(errmsg, code)
|
||||
raise OperationFailure(msg % errmsg, code)
|
||||
|
||||
|
||||
def _fields_list_to_dict(fields):
|
||||
"""Takes a list of field names and returns a matching dictionary.
|
||||
|
||||
["a", "b"] becomes {"a": 1, "b": 1}
|
||||
|
||||
and
|
||||
|
||||
["a.b.c", "d", "a.c"] becomes {"a.b.c": 1, "d": 1, "a.c": 1}
|
||||
"""
|
||||
as_dict = {}
|
||||
for field in fields:
|
||||
if not isinstance(field, str):
|
||||
raise TypeError("fields must be a list of key names, "
|
||||
"each an instance of %s" % (str.__name__,))
|
||||
as_dict[field] = 1
|
||||
return as_dict
|
||||
|
||||
|
||||
def shuffled(sequence):
|
||||
"""Returns a copy of the sequence (as a :class:`list`) which has been
|
||||
shuffled by :func:`random.shuffle`.
|
||||
"""
|
||||
out = list(sequence)
|
||||
random.shuffle(out)
|
||||
return out
|
||||
336
asyncio_mongo/_pymongo/master_slave_connection.py
Normal file
336
asyncio_mongo/_pymongo/master_slave_connection.py
Normal file
@@ -0,0 +1,336 @@
|
||||
# Copyright 2009-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Master-Slave connection to Mongo.
|
||||
|
||||
Performs all writes to Master instance and distributes reads among all
|
||||
slaves. Reads are tried on each slave in turn until the read succeeds
|
||||
or all slaves failed.
|
||||
"""
|
||||
|
||||
from asyncio_mongo._pymongo import helpers, thread_util
|
||||
from asyncio_mongo._pymongo import ReadPreference
|
||||
from asyncio_mongo._pymongo.common import BaseObject
|
||||
from asyncio_mongo._pymongo.mongo_client import MongoClient
|
||||
from asyncio_mongo._pymongo.database import Database
|
||||
from asyncio_mongo._pymongo.errors import AutoReconnect
|
||||
|
||||
|
||||
class MasterSlaveConnection(BaseObject):
|
||||
"""A master-slave connection to Mongo.
|
||||
"""
|
||||
|
||||
def __init__(self, master, slaves=[], document_class=dict, tz_aware=False):
|
||||
"""Create a new Master-Slave connection.
|
||||
|
||||
The resultant connection should be interacted with using the same
|
||||
mechanisms as a regular `MongoClient`. The `MongoClient` instances used
|
||||
to create this `MasterSlaveConnection` can themselves make use of
|
||||
connection pooling, etc. `MongoClient` instances used as slaves should
|
||||
be created with the read_preference option set to
|
||||
:attr:`~pymongo.read_preferences.ReadPreference.SECONDARY`. Write
|
||||
concerns are inherited from `master` and can be changed in this
|
||||
instance.
|
||||
|
||||
Raises TypeError if `master` is not an instance of `MongoClient` or
|
||||
slaves is not a list of at least one `MongoClient` instances.
|
||||
|
||||
:Parameters:
|
||||
- `master`: `MongoClient` instance for the writable Master
|
||||
- `slaves`: list of `MongoClient` instances for the
|
||||
read-only slaves
|
||||
- `document_class` (optional): default class to use for
|
||||
documents returned from queries on this connection
|
||||
- `tz_aware` (optional): if ``True``,
|
||||
:class:`~datetime.datetime` instances returned as values
|
||||
in a document by this :class:`MasterSlaveConnection` will be timezone
|
||||
aware (otherwise they will be naive)
|
||||
"""
|
||||
if not isinstance(master, MongoClient):
|
||||
raise TypeError("master must be a MongoClient instance")
|
||||
if not isinstance(slaves, list) or len(slaves) == 0:
|
||||
raise TypeError("slaves must be a list of length >= 1")
|
||||
|
||||
for slave in slaves:
|
||||
if not isinstance(slave, MongoClient):
|
||||
raise TypeError("slave %r is not an instance of MongoClient" %
|
||||
slave)
|
||||
|
||||
super(MasterSlaveConnection,
|
||||
self).__init__(read_preference=ReadPreference.SECONDARY,
|
||||
safe=master.safe,
|
||||
**master.write_concern)
|
||||
|
||||
self.__master = master
|
||||
self.__slaves = slaves
|
||||
self.__document_class = document_class
|
||||
self.__tz_aware = tz_aware
|
||||
self.__request_counter = thread_util.Counter(master.use_greenlets)
|
||||
|
||||
@property
|
||||
def master(self):
|
||||
return self.__master
|
||||
|
||||
@property
|
||||
def slaves(self):
|
||||
return self.__slaves
|
||||
|
||||
@property
|
||||
def is_mongos(self):
|
||||
"""If this MasterSlaveConnection is connected to mongos (always False)
|
||||
|
||||
.. versionadded:: 2.3
|
||||
"""
|
||||
return False
|
||||
|
||||
@property
|
||||
def use_greenlets(self):
|
||||
"""Whether calling :meth:`start_request` assigns greenlet-local,
|
||||
rather than thread-local, sockets.
|
||||
|
||||
.. versionadded:: 2.4.2
|
||||
"""
|
||||
return self.master.use_greenlets
|
||||
|
||||
def get_document_class(self):
|
||||
return self.__document_class
|
||||
|
||||
def set_document_class(self, klass):
|
||||
self.__document_class = klass
|
||||
|
||||
document_class = property(get_document_class, set_document_class,
|
||||
doc="""Default class to use for documents
|
||||
returned on this connection.""")
|
||||
|
||||
@property
|
||||
def tz_aware(self):
|
||||
return self.__tz_aware
|
||||
|
||||
@property
|
||||
def max_bson_size(self):
|
||||
"""Return the maximum size BSON object the connected master
|
||||
accepts in bytes. Defaults to 4MB in server < 1.7.4.
|
||||
|
||||
.. versionadded:: 2.6
|
||||
"""
|
||||
return self.master.max_bson_size
|
||||
|
||||
@property
|
||||
def max_message_size(self):
|
||||
"""Return the maximum message size the connected master
|
||||
accepts in bytes.
|
||||
|
||||
.. versionadded:: 2.6
|
||||
"""
|
||||
return self.master.max_message_size
|
||||
|
||||
|
||||
def disconnect(self):
|
||||
"""Disconnect from MongoDB.
|
||||
|
||||
Disconnecting will call disconnect on all master and slave
|
||||
connections.
|
||||
|
||||
.. seealso:: Module :mod:`~pymongo.mongo_client`
|
||||
.. versionadded:: 1.10.1
|
||||
"""
|
||||
self.__master.disconnect()
|
||||
for slave in self.__slaves:
|
||||
slave.disconnect()
|
||||
|
||||
def set_cursor_manager(self, manager_class):
|
||||
"""Set the cursor manager for this connection.
|
||||
|
||||
Helper to set cursor manager for each individual `MongoClient` instance
|
||||
that make up this `MasterSlaveConnection`.
|
||||
"""
|
||||
self.__master.set_cursor_manager(manager_class)
|
||||
for slave in self.__slaves:
|
||||
slave.set_cursor_manager(manager_class)
|
||||
|
||||
def _ensure_connected(self, sync):
|
||||
"""Ensure the master is connected to a mongod/s.
|
||||
"""
|
||||
self.__master._ensure_connected(sync)
|
||||
|
||||
# _connection_to_use is a hack that we need to include to make sure
|
||||
# that killcursor operations can be sent to the same instance on which
|
||||
# the cursor actually resides...
|
||||
def _send_message(self, message,
|
||||
with_last_error=False, _connection_to_use=None):
|
||||
"""Say something to Mongo.
|
||||
|
||||
Sends a message on the Master connection. This is used for inserts,
|
||||
updates, and deletes.
|
||||
|
||||
Raises ConnectionFailure if the message cannot be sent. Returns the
|
||||
request id of the sent message.
|
||||
|
||||
:Parameters:
|
||||
- `operation`: opcode of the message
|
||||
- `data`: data to send
|
||||
- `safe`: perform a getLastError after sending the message
|
||||
"""
|
||||
if _connection_to_use is None or _connection_to_use == -1:
|
||||
return self.__master._send_message(message, with_last_error)
|
||||
return self.__slaves[_connection_to_use]._send_message(
|
||||
message, with_last_error, check_primary=False)
|
||||
|
||||
# _connection_to_use is a hack that we need to include to make sure
|
||||
# that getmore operations can be sent to the same instance on which
|
||||
# the cursor actually resides...
|
||||
def _send_message_with_response(self, message, _connection_to_use=None,
|
||||
_must_use_master=False, **kwargs):
|
||||
"""Receive a message from Mongo.
|
||||
|
||||
Sends the given message and returns a (connection_id, response) pair.
|
||||
|
||||
:Parameters:
|
||||
- `operation`: opcode of the message to send
|
||||
- `data`: data to send
|
||||
"""
|
||||
if _connection_to_use is not None:
|
||||
if _connection_to_use == -1:
|
||||
member = self.__master
|
||||
conn = -1
|
||||
else:
|
||||
member = self.__slaves[_connection_to_use]
|
||||
conn = _connection_to_use
|
||||
return (conn,
|
||||
member._send_message_with_response(message, **kwargs)[1])
|
||||
|
||||
# _must_use_master is set for commands, which must be sent to the
|
||||
# master instance. any queries in a request must be sent to the
|
||||
# master since that is where writes go.
|
||||
if _must_use_master or self.in_request():
|
||||
return (-1, self.__master._send_message_with_response(message,
|
||||
**kwargs)[1])
|
||||
|
||||
# Iterate through the slaves randomly until we have success. Raise
|
||||
# reconnect if they all fail.
|
||||
for connection_id in helpers.shuffled(range(len(self.__slaves))):
|
||||
try:
|
||||
slave = self.__slaves[connection_id]
|
||||
return (connection_id,
|
||||
slave._send_message_with_response(message,
|
||||
**kwargs)[1])
|
||||
except AutoReconnect:
|
||||
pass
|
||||
|
||||
raise AutoReconnect("failed to connect to slaves")
|
||||
|
||||
def start_request(self):
|
||||
"""Start a "request".
|
||||
|
||||
Start a sequence of operations in which order matters. Note
|
||||
that all operations performed within a request will be sent
|
||||
using the Master connection.
|
||||
"""
|
||||
self.__request_counter.inc()
|
||||
self.master.start_request()
|
||||
|
||||
def in_request(self):
|
||||
return bool(self.__request_counter.get())
|
||||
|
||||
def end_request(self):
|
||||
"""End the current "request".
|
||||
|
||||
See documentation for `MongoClient.end_request`.
|
||||
"""
|
||||
self.__request_counter.dec()
|
||||
self.master.end_request()
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, MasterSlaveConnection):
|
||||
us = (self.__master, self.slaves)
|
||||
them = (other.__master, other.__slaves)
|
||||
return us == them
|
||||
return NotImplemented
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def __repr__(self):
|
||||
return "MasterSlaveConnection(%r, %r)" % (self.__master, self.__slaves)
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Get a database by name.
|
||||
|
||||
Raises InvalidName if an invalid database name is used.
|
||||
|
||||
:Parameters:
|
||||
- `name`: the name of the database to get
|
||||
"""
|
||||
return Database(self, name)
|
||||
|
||||
def __getitem__(self, name):
|
||||
"""Get a database by name.
|
||||
|
||||
Raises InvalidName if an invalid database name is used.
|
||||
|
||||
:Parameters:
|
||||
- `name`: the name of the database to get
|
||||
"""
|
||||
return self.__getattr__(name)
|
||||
|
||||
def close_cursor(self, cursor_id, connection_id):
|
||||
"""Close a single database cursor.
|
||||
|
||||
Raises TypeError if cursor_id is not an instance of (int, long). What
|
||||
closing the cursor actually means depends on this connection's cursor
|
||||
manager.
|
||||
|
||||
:Parameters:
|
||||
- `cursor_id`: cursor id to close
|
||||
- `connection_id`: id of the `MongoClient` instance where the cursor
|
||||
was opened
|
||||
"""
|
||||
if connection_id == -1:
|
||||
return self.__master.close_cursor(cursor_id)
|
||||
return self.__slaves[connection_id].close_cursor(cursor_id)
|
||||
|
||||
def database_names(self):
|
||||
"""Get a list of all database names.
|
||||
"""
|
||||
return self.__master.database_names()
|
||||
|
||||
def drop_database(self, name_or_database):
|
||||
"""Drop a database.
|
||||
|
||||
:Parameters:
|
||||
- `name_or_database`: the name of a database to drop or the object
|
||||
itself
|
||||
"""
|
||||
return self.__master.drop_database(name_or_database)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
raise TypeError("'MasterSlaveConnection' object is not iterable")
|
||||
|
||||
def _cached(self, database_name, collection_name, index_name):
|
||||
return self.__master._cached(database_name,
|
||||
collection_name, index_name)
|
||||
|
||||
def _cache_index(self, database_name, collection_name,
|
||||
index_name, cache_for):
|
||||
return self.__master._cache_index(database_name, collection_name,
|
||||
index_name, cache_for)
|
||||
|
||||
def _purge_index(self, database_name,
|
||||
collection_name=None, index_name=None):
|
||||
return self.__master._purge_index(database_name,
|
||||
collection_name,
|
||||
index_name)
|
||||
254
asyncio_mongo/_pymongo/message.py
Normal file
254
asyncio_mongo/_pymongo/message.py
Normal file
@@ -0,0 +1,254 @@
|
||||
# Copyright 2009-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Tools for creating `messages
|
||||
<http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol>`_ to be sent to
|
||||
MongoDB.
|
||||
|
||||
.. note:: This module is for internal use and is generally not needed by
|
||||
application developers.
|
||||
|
||||
.. versionadded:: 1.1.2
|
||||
"""
|
||||
|
||||
import random
|
||||
import struct
|
||||
|
||||
import asyncio_mongo._bson as bson
|
||||
from asyncio_mongo._bson.binary import OLD_UUID_SUBTYPE
|
||||
from asyncio_mongo._bson.py3compat import b
|
||||
from asyncio_mongo._bson.son import SON
|
||||
try:
|
||||
from asyncio_mongo._pymongo import _cmessage
|
||||
_use_c = True
|
||||
except ImportError:
|
||||
_use_c = False
|
||||
from asyncio_mongo._pymongo.errors import InvalidDocument, InvalidOperation, OperationFailure
|
||||
|
||||
|
||||
__ZERO = b("\x00\x00\x00\x00")
|
||||
|
||||
EMPTY = b("")
|
||||
|
||||
MAX_INT32 = 2147483647
|
||||
MIN_INT32 = -2147483648
|
||||
|
||||
|
||||
def __last_error(namespace, args):
|
||||
"""Data to send to do a lastError.
|
||||
"""
|
||||
cmd = SON([("getlasterror", 1)])
|
||||
cmd.update(args)
|
||||
splitns = namespace.split('.', 1)
|
||||
return query(0, splitns[0] + '.$cmd', 0, -1, cmd)
|
||||
|
||||
|
||||
def __pack_message(operation, data):
|
||||
"""Takes message data and adds a message header based on the operation.
|
||||
|
||||
Returns the resultant message string.
|
||||
"""
|
||||
request_id = random.randint(MIN_INT32, MAX_INT32)
|
||||
message = struct.pack("<i", 16 + len(data))
|
||||
message += struct.pack("<i", request_id)
|
||||
message += __ZERO # responseTo
|
||||
message += struct.pack("<i", operation)
|
||||
return (request_id, message + data)
|
||||
|
||||
|
||||
def insert(collection_name, docs, check_keys,
|
||||
safe, last_error_args, continue_on_error, uuid_subtype):
|
||||
"""Get an **insert** message.
|
||||
|
||||
.. note:: As of PyMongo 2.6, this function is no longer used. It
|
||||
is being kept (with tests) for backwards compatibility with 3rd
|
||||
party libraries that may currently be using it, but will likely
|
||||
be removed in a future release.
|
||||
|
||||
"""
|
||||
options = 0
|
||||
if continue_on_error:
|
||||
options += 1
|
||||
data = struct.pack("<i", options)
|
||||
data += bson._make_c_string(collection_name)
|
||||
encoded = [bson.BSON.encode(doc, check_keys, uuid_subtype) for doc in docs]
|
||||
if not encoded:
|
||||
raise InvalidOperation("cannot do an empty bulk insert")
|
||||
max_bson_size = max(list(map(len, encoded)))
|
||||
data += EMPTY.join(encoded)
|
||||
if safe:
|
||||
(_, insert_message) = __pack_message(2002, data)
|
||||
(request_id, error_message, _) = __last_error(collection_name,
|
||||
last_error_args)
|
||||
return (request_id, insert_message + error_message, max_bson_size)
|
||||
else:
|
||||
(request_id, insert_message) = __pack_message(2002, data)
|
||||
return (request_id, insert_message, max_bson_size)
|
||||
if _use_c:
|
||||
insert = _cmessage._insert_message
|
||||
|
||||
|
||||
def update(collection_name, upsert, multi,
|
||||
spec, doc, safe, last_error_args, check_keys, uuid_subtype):
|
||||
"""Get an **update** message.
|
||||
"""
|
||||
options = 0
|
||||
if upsert:
|
||||
options += 1
|
||||
if multi:
|
||||
options += 2
|
||||
|
||||
data = __ZERO
|
||||
data += bson._make_c_string(collection_name)
|
||||
data += struct.pack("<i", options)
|
||||
data += bson.BSON.encode(spec, False, uuid_subtype)
|
||||
encoded = bson.BSON.encode(doc, check_keys, uuid_subtype)
|
||||
data += encoded
|
||||
if safe:
|
||||
(_, update_message) = __pack_message(2001, data)
|
||||
(request_id, error_message, _) = __last_error(collection_name,
|
||||
last_error_args)
|
||||
return (request_id, update_message + error_message, len(encoded))
|
||||
else:
|
||||
(request_id, update_message) = __pack_message(2001, data)
|
||||
return (request_id, update_message, len(encoded))
|
||||
if _use_c:
|
||||
update = _cmessage._update_message
|
||||
|
||||
|
||||
def query(options, collection_name, num_to_skip,
|
||||
num_to_return, query, field_selector=None,
|
||||
uuid_subtype=OLD_UUID_SUBTYPE):
|
||||
"""Get a **query** message.
|
||||
"""
|
||||
data = struct.pack("<I", options)
|
||||
data += bson._make_c_string(collection_name)
|
||||
data += struct.pack("<i", num_to_skip)
|
||||
data += struct.pack("<i", num_to_return)
|
||||
encoded = bson.BSON.encode(query, False, uuid_subtype)
|
||||
data += encoded
|
||||
max_bson_size = len(encoded)
|
||||
if field_selector is not None:
|
||||
encoded = bson.BSON.encode(field_selector, False, uuid_subtype)
|
||||
data += encoded
|
||||
max_bson_size = max(len(encoded), max_bson_size)
|
||||
(request_id, query_message) = __pack_message(2004, data)
|
||||
return (request_id, query_message, max_bson_size)
|
||||
if _use_c:
|
||||
query = _cmessage._query_message
|
||||
|
||||
|
||||
def get_more(collection_name, num_to_return, cursor_id):
|
||||
"""Get a **getMore** message.
|
||||
"""
|
||||
data = __ZERO
|
||||
data += bson._make_c_string(collection_name)
|
||||
data += struct.pack("<i", num_to_return)
|
||||
data += struct.pack("<q", cursor_id)
|
||||
return __pack_message(2005, data)
|
||||
if _use_c:
|
||||
get_more = _cmessage._get_more_message
|
||||
|
||||
|
||||
def delete(collection_name, spec, safe, last_error_args, uuid_subtype):
|
||||
"""Get a **delete** message.
|
||||
"""
|
||||
data = __ZERO
|
||||
data += bson._make_c_string(collection_name)
|
||||
data += __ZERO
|
||||
encoded = bson.BSON.encode(spec, False, uuid_subtype)
|
||||
data += encoded
|
||||
if safe:
|
||||
(_, remove_message) = __pack_message(2006, data)
|
||||
(request_id, error_message, _) = __last_error(collection_name,
|
||||
last_error_args)
|
||||
return (request_id, remove_message + error_message, len(encoded))
|
||||
else:
|
||||
(request_id, remove_message) = __pack_message(2006, data)
|
||||
return (request_id, remove_message, len(encoded))
|
||||
|
||||
|
||||
def kill_cursors(cursor_ids):
|
||||
"""Get a **killCursors** message.
|
||||
"""
|
||||
data = __ZERO
|
||||
data += struct.pack("<i", len(cursor_ids))
|
||||
for cursor_id in cursor_ids:
|
||||
data += struct.pack("<q", cursor_id)
|
||||
return __pack_message(2007, data)
|
||||
|
||||
def _do_batched_insert(collection_name, docs, check_keys,
|
||||
safe, last_error_args, continue_on_error, uuid_subtype, client):
|
||||
"""Insert `docs` using multiple batches.
|
||||
"""
|
||||
def _insert_message(insert_message, send_safe):
|
||||
"""Build the insert message with header and GLE.
|
||||
"""
|
||||
request_id, final_message = __pack_message(2002, insert_message)
|
||||
if send_safe:
|
||||
request_id, error_message, _ = __last_error(collection_name,
|
||||
last_error_args)
|
||||
final_message += error_message
|
||||
return request_id, final_message
|
||||
|
||||
if not docs:
|
||||
raise InvalidOperation("cannot do an empty bulk insert")
|
||||
|
||||
last_error = None
|
||||
begin = struct.pack("<i", int(continue_on_error))
|
||||
begin += bson._make_c_string(collection_name)
|
||||
message_length = len(begin)
|
||||
data = [begin]
|
||||
for doc in docs:
|
||||
encoded = bson.BSON.encode(doc, check_keys, uuid_subtype)
|
||||
encoded_length = len(encoded)
|
||||
if encoded_length > client.max_bson_size:
|
||||
raise InvalidDocument("BSON document too large (%d bytes)"
|
||||
" - the connected server supports"
|
||||
" BSON document sizes up to %d"
|
||||
" bytes." %
|
||||
(encoded_length, client.max_bson_size))
|
||||
message_length += encoded_length
|
||||
if message_length < client.max_message_size:
|
||||
data.append(encoded)
|
||||
continue
|
||||
|
||||
# We have enough data, send this message.
|
||||
send_safe = safe or not continue_on_error
|
||||
try:
|
||||
client._send_message(_insert_message(EMPTY.join(data),
|
||||
send_safe), send_safe)
|
||||
# Exception type could be OperationFailure or a subtype
|
||||
# (e.g. DuplicateKeyError)
|
||||
except OperationFailure as exc:
|
||||
# Like it says, continue on error...
|
||||
if continue_on_error:
|
||||
# Store exception details to re-raise after the final batch.
|
||||
last_error = exc
|
||||
# With unacknowledged writes just return at the first error.
|
||||
elif not safe:
|
||||
return
|
||||
# With acknowledged writes raise immediately.
|
||||
else:
|
||||
raise
|
||||
message_length = len(begin) + encoded_length
|
||||
data = [begin, encoded]
|
||||
|
||||
client._send_message(_insert_message(EMPTY.join(data), safe), safe)
|
||||
|
||||
# Re-raise any exception stored due to continue_on_error
|
||||
if last_error is not None:
|
||||
raise last_error
|
||||
if _use_c:
|
||||
_do_batched_insert = _cmessage._do_batched_insert
|
||||
1338
asyncio_mongo/_pymongo/mongo_client.py
Normal file
1338
asyncio_mongo/_pymongo/mongo_client.py
Normal file
File diff suppressed because it is too large
Load Diff
1855
asyncio_mongo/_pymongo/mongo_replica_set_client.py
Normal file
1855
asyncio_mongo/_pymongo/mongo_replica_set_client.py
Normal file
File diff suppressed because it is too large
Load Diff
555
asyncio_mongo/_pymongo/pool.py
Normal file
555
asyncio_mongo/_pymongo/pool.py
Normal file
@@ -0,0 +1,555 @@
|
||||
# Copyright 2011-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you
|
||||
# may not use this file except in compliance with the License. You
|
||||
# may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied. See the License for the specific language governing
|
||||
# permissions and limitations under the License.
|
||||
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import threading
|
||||
import weakref
|
||||
|
||||
from asyncio_mongo._pymongo import thread_util
|
||||
from asyncio_mongo._pymongo.common import HAS_SSL
|
||||
from asyncio_mongo._pymongo.errors import ConnectionFailure, ConfigurationError
|
||||
|
||||
try:
|
||||
from ssl import match_hostname
|
||||
except ImportError:
|
||||
from asyncio_mongo._pymongo.ssl_match_hostname import match_hostname
|
||||
|
||||
if HAS_SSL:
|
||||
import ssl
|
||||
|
||||
if sys.platform.startswith('java'):
|
||||
from select import cpython_compatible_select as select
|
||||
else:
|
||||
from select import select
|
||||
|
||||
|
||||
NO_REQUEST = None
|
||||
NO_SOCKET_YET = -1
|
||||
|
||||
|
||||
def _closed(sock):
|
||||
"""Return True if we know socket has been closed, False otherwise.
|
||||
"""
|
||||
try:
|
||||
rd, _, _ = select([sock], [], [], 0)
|
||||
# Any exception here is equally bad (select.error, ValueError, etc.).
|
||||
except:
|
||||
return True
|
||||
return len(rd) > 0
|
||||
|
||||
|
||||
class SocketInfo(object):
|
||||
"""Store a socket with some metadata
|
||||
"""
|
||||
def __init__(self, sock, pool_id, host=None):
|
||||
self.sock = sock
|
||||
self.host = host
|
||||
self.authset = set()
|
||||
self.closed = False
|
||||
self.last_checkout = time.time()
|
||||
self.forced = False
|
||||
|
||||
# The pool's pool_id changes with each reset() so we can close sockets
|
||||
# created before the last reset.
|
||||
self.pool_id = pool_id
|
||||
|
||||
def close(self):
|
||||
self.closed = True
|
||||
# Avoid exceptions on interpreter shutdown.
|
||||
try:
|
||||
self.sock.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
def __eq__(self, other):
|
||||
# Need to check if other is NO_REQUEST or NO_SOCKET_YET, and then check
|
||||
# if its sock is the same as ours
|
||||
return hasattr(other, 'sock') and self.sock == other.sock
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.sock)
|
||||
|
||||
def __repr__(self):
|
||||
return "SocketInfo(%s)%s at %s" % (
|
||||
repr(self.sock),
|
||||
self.closed and " CLOSED" or "",
|
||||
id(self)
|
||||
)
|
||||
|
||||
|
||||
# Do *not* explicitly inherit from object or Jython won't call __del__
|
||||
# http://bugs.jython.org/issue1057
|
||||
class Pool:
|
||||
def __init__(self, pair, max_size, net_timeout, conn_timeout, use_ssl,
|
||||
use_greenlets, ssl_keyfile=None, ssl_certfile=None,
|
||||
ssl_cert_reqs=None, ssl_ca_certs=None,
|
||||
wait_queue_timeout=None, wait_queue_multiple=None):
|
||||
"""
|
||||
:Parameters:
|
||||
- `pair`: a (hostname, port) tuple
|
||||
- `max_size`: The maximum number of open sockets. Calls to
|
||||
`get_socket` will block if this is set, this pool has opened
|
||||
`max_size` sockets, and there are none idle. Set to `None` to
|
||||
disable.
|
||||
- `net_timeout`: timeout in seconds for operations on open connection
|
||||
- `conn_timeout`: timeout in seconds for establishing connection
|
||||
- `use_ssl`: bool, if True use an encrypted connection
|
||||
- `use_greenlets`: bool, if True then start_request() assigns a
|
||||
socket to the current greenlet - otherwise it is assigned to the
|
||||
current thread
|
||||
- `ssl_keyfile`: The private keyfile used to identify the local
|
||||
connection against mongod. If included with the ``certfile` then
|
||||
only the ``ssl_certfile`` is needed. Implies ``ssl=True``.
|
||||
- `ssl_certfile`: The certificate file used to identify the local
|
||||
connection against mongod. Implies ``ssl=True``.
|
||||
- `ssl_cert_reqs`: Specifies whether a certificate is required from
|
||||
the other side of the connection, and whether it will be validated
|
||||
if provided. It must be one of the three values ``ssl.CERT_NONE``
|
||||
(certificates ignored), ``ssl.CERT_OPTIONAL``
|
||||
(not required, but validated if provided), or ``ssl.CERT_REQUIRED``
|
||||
(required and validated). If the value of this parameter is not
|
||||
``ssl.CERT_NONE``, then the ``ssl_ca_certs`` parameter must point
|
||||
to a file of CA certificates. Implies ``ssl=True``.
|
||||
- `ssl_ca_certs`: The ca_certs file contains a set of concatenated
|
||||
"certification authority" certificates, which are used to validate
|
||||
certificates passed from the other end of the connection.
|
||||
Implies ``ssl=True``.
|
||||
- `wait_queue_timeout`: (integer) How long (in seconds) a
|
||||
thread will wait for a socket from the pool if the pool has no
|
||||
free sockets.
|
||||
- `wait_queue_multiple`: (integer) Multiplied by max_pool_size to give
|
||||
the number of threads allowed to wait for a socket at one time.
|
||||
"""
|
||||
# Only check a socket's health with _closed() every once in a while.
|
||||
# Can override for testing: 0 to always check, None to never check.
|
||||
self._check_interval_seconds = 1
|
||||
|
||||
self.sockets = set()
|
||||
self.lock = threading.Lock()
|
||||
|
||||
# Keep track of resets, so we notice sockets created before the most
|
||||
# recent reset and close them.
|
||||
self.pool_id = 0
|
||||
self.pid = os.getpid()
|
||||
self.pair = pair
|
||||
self.max_size = max_size
|
||||
self.net_timeout = net_timeout
|
||||
self.conn_timeout = conn_timeout
|
||||
self.wait_queue_timeout = wait_queue_timeout
|
||||
self.wait_queue_multiple = wait_queue_multiple
|
||||
self.use_ssl = use_ssl
|
||||
self.ssl_keyfile = ssl_keyfile
|
||||
self.ssl_certfile = ssl_certfile
|
||||
self.ssl_cert_reqs = ssl_cert_reqs
|
||||
self.ssl_ca_certs = ssl_ca_certs
|
||||
|
||||
if HAS_SSL and use_ssl and not ssl_cert_reqs:
|
||||
self.ssl_cert_reqs = ssl.CERT_NONE
|
||||
|
||||
# Map self._ident.get() -> request socket
|
||||
self._tid_to_sock = {}
|
||||
|
||||
if use_greenlets and not thread_util.have_gevent:
|
||||
raise ConfigurationError(
|
||||
"The Gevent module is not available. "
|
||||
"Install the gevent package from PyPI."
|
||||
)
|
||||
|
||||
self._ident = thread_util.create_ident(use_greenlets)
|
||||
|
||||
# Count the number of calls to start_request() per thread or greenlet
|
||||
self._request_counter = thread_util.Counter(use_greenlets)
|
||||
|
||||
if self.wait_queue_multiple is None or self.max_size is None:
|
||||
max_waiters = None
|
||||
else:
|
||||
max_waiters = self.max_size * self.wait_queue_multiple
|
||||
|
||||
self._socket_semaphore = thread_util.create_semaphore(
|
||||
self.max_size, max_waiters, use_greenlets)
|
||||
|
||||
def reset(self):
|
||||
# Ignore this race condition -- if many threads are resetting at once,
|
||||
# the pool_id will definitely change, which is all we care about.
|
||||
self.pool_id += 1
|
||||
self.pid = os.getpid()
|
||||
|
||||
sockets = None
|
||||
try:
|
||||
# Swapping variables is not atomic. We need to ensure no other
|
||||
# thread is modifying self.sockets, or replacing it, in this
|
||||
# critical section.
|
||||
self.lock.acquire()
|
||||
sockets, self.sockets = self.sockets, set()
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
for sock_info in sockets:
|
||||
sock_info.close()
|
||||
|
||||
def create_connection(self, pair):
|
||||
"""Connect to *pair* and return the socket object.
|
||||
|
||||
This is a modified version of create_connection from
|
||||
CPython >=2.6.
|
||||
"""
|
||||
host, port = pair or self.pair
|
||||
|
||||
# Check if dealing with a unix domain socket
|
||||
if host.endswith('.sock'):
|
||||
if not hasattr(socket, "AF_UNIX"):
|
||||
raise ConnectionFailure("UNIX-sockets are not supported "
|
||||
"on this system")
|
||||
sock = socket.socket(socket.AF_UNIX)
|
||||
try:
|
||||
sock.connect(host)
|
||||
return sock
|
||||
except socket.error as e:
|
||||
if sock is not None:
|
||||
sock.close()
|
||||
raise e
|
||||
|
||||
# Don't try IPv6 if we don't support it. Also skip it if host
|
||||
# is 'localhost' (::1 is fine). Avoids slow connect issues
|
||||
# like PYTHON-356.
|
||||
family = socket.AF_INET
|
||||
if socket.has_ipv6 and host != 'localhost':
|
||||
family = socket.AF_UNSPEC
|
||||
|
||||
err = None
|
||||
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
|
||||
af, socktype, proto, dummy, sa = res
|
||||
sock = None
|
||||
try:
|
||||
sock = socket.socket(af, socktype, proto)
|
||||
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||
sock.settimeout(self.conn_timeout or 20.0)
|
||||
sock.connect(sa)
|
||||
return sock
|
||||
except socket.error as e:
|
||||
err = e
|
||||
if sock is not None:
|
||||
sock.close()
|
||||
|
||||
if err is not None:
|
||||
raise err
|
||||
else:
|
||||
# This likely means we tried to connect to an IPv6 only
|
||||
# host with an OS/kernel or Python interpreter that doesn't
|
||||
# support IPv6. The test case is Jython2.5.1 which doesn't
|
||||
# support IPv6 at all.
|
||||
raise socket.error('getaddrinfo failed')
|
||||
|
||||
def connect(self, pair):
|
||||
"""Connect to Mongo and return a new (connected) socket. Note that the
|
||||
pool does not keep a reference to the socket -- you must call
|
||||
return_socket() when you're done with it.
|
||||
"""
|
||||
sock = self.create_connection(pair)
|
||||
hostname = (pair or self.pair)[0]
|
||||
|
||||
if self.use_ssl:
|
||||
try:
|
||||
sock = ssl.wrap_socket(sock,
|
||||
certfile=self.ssl_certfile,
|
||||
keyfile=self.ssl_keyfile,
|
||||
ca_certs=self.ssl_ca_certs,
|
||||
cert_reqs=self.ssl_cert_reqs)
|
||||
if self.ssl_cert_reqs:
|
||||
match_hostname(sock.getpeercert(), hostname)
|
||||
|
||||
except ssl.SSLError:
|
||||
sock.close()
|
||||
raise ConnectionFailure("SSL handshake failed. MongoDB may "
|
||||
"not be configured with SSL support.")
|
||||
|
||||
sock.settimeout(self.net_timeout)
|
||||
return SocketInfo(sock, self.pool_id, hostname)
|
||||
|
||||
def get_socket(self, pair=None, force=False):
|
||||
"""Get a socket from the pool.
|
||||
|
||||
Returns a :class:`SocketInfo` object wrapping a connected
|
||||
:class:`socket.socket`, and a bool saying whether the socket was from
|
||||
the pool or freshly created.
|
||||
|
||||
:Parameters:
|
||||
- `pair`: optional (hostname, port) tuple
|
||||
- `force`: optional boolean, forces a connection to be returned
|
||||
without blocking, even if `max_size` has been reached.
|
||||
"""
|
||||
# We use the pid here to avoid issues with fork / multiprocessing.
|
||||
# See test.test_client:TestClient.test_fork for an example of
|
||||
# what could go wrong otherwise
|
||||
if self.pid != os.getpid():
|
||||
self.reset()
|
||||
|
||||
# Have we opened a socket for this request?
|
||||
req_state = self._get_request_state()
|
||||
if req_state not in (NO_SOCKET_YET, NO_REQUEST):
|
||||
# There's a socket for this request, check it and return it
|
||||
checked_sock = self._check(req_state, pair)
|
||||
if checked_sock != req_state:
|
||||
self._set_request_state(checked_sock)
|
||||
|
||||
checked_sock.last_checkout = time.time()
|
||||
return checked_sock
|
||||
|
||||
forced = False
|
||||
# We're not in a request, just get any free socket or create one
|
||||
if force:
|
||||
# If we're doing an internal operation, attempt to play nicely with
|
||||
# max_size, but if there is no open "slot" force the connection
|
||||
# and mark it as forced so we don't release the semaphore without
|
||||
# having acquired it for this socket.
|
||||
if not self._socket_semaphore.acquire(False):
|
||||
forced = True
|
||||
elif not self._socket_semaphore.acquire(True, self.wait_queue_timeout):
|
||||
self._raise_wait_queue_timeout()
|
||||
|
||||
# We've now acquired the semaphore and must release it on error.
|
||||
try:
|
||||
sock_info, from_pool = None, None
|
||||
try:
|
||||
try:
|
||||
# set.pop() isn't atomic in Jython less than 2.7, see
|
||||
# http://bugs.jython.org/issue1854
|
||||
self.lock.acquire()
|
||||
sock_info, from_pool = self.sockets.pop(), True
|
||||
finally:
|
||||
self.lock.release()
|
||||
except KeyError:
|
||||
sock_info, from_pool = self.connect(pair), False
|
||||
|
||||
if from_pool:
|
||||
sock_info = self._check(sock_info, pair)
|
||||
|
||||
sock_info.forced = forced
|
||||
|
||||
if req_state == NO_SOCKET_YET:
|
||||
# start_request has been called but we haven't assigned a
|
||||
# socket to the request yet. Let's use this socket for this
|
||||
# request until end_request.
|
||||
self._set_request_state(sock_info)
|
||||
except:
|
||||
if not forced:
|
||||
self._socket_semaphore.release()
|
||||
raise
|
||||
|
||||
sock_info.last_checkout = time.time()
|
||||
return sock_info
|
||||
|
||||
def start_request(self):
|
||||
if self._get_request_state() == NO_REQUEST:
|
||||
# Add a placeholder value so we know we're in a request, but we
|
||||
# have no socket assigned to the request yet.
|
||||
self._set_request_state(NO_SOCKET_YET)
|
||||
|
||||
self._request_counter.inc()
|
||||
|
||||
def in_request(self):
|
||||
return bool(self._request_counter.get())
|
||||
|
||||
def end_request(self):
|
||||
# Check if start_request has ever been called in this thread / greenlet
|
||||
count = self._request_counter.get()
|
||||
if count:
|
||||
self._request_counter.dec()
|
||||
if count == 1:
|
||||
# End request
|
||||
sock_info = self._get_request_state()
|
||||
self._set_request_state(NO_REQUEST)
|
||||
if sock_info not in (NO_REQUEST, NO_SOCKET_YET):
|
||||
self._return_socket(sock_info)
|
||||
|
||||
def discard_socket(self, sock_info):
|
||||
"""Close and discard the active socket.
|
||||
"""
|
||||
if sock_info not in (NO_REQUEST, NO_SOCKET_YET):
|
||||
sock_info.close()
|
||||
|
||||
if sock_info == self._get_request_state():
|
||||
# Discarding request socket; prepare to use a new request
|
||||
# socket on next get_socket().
|
||||
self._set_request_state(NO_SOCKET_YET)
|
||||
|
||||
def maybe_return_socket(self, sock_info):
|
||||
"""Return the socket to the pool unless it's the request socket.
|
||||
"""
|
||||
# These sentinel values should only be used internally.
|
||||
assert sock_info not in (NO_REQUEST, NO_SOCKET_YET)
|
||||
|
||||
if self.pid != os.getpid():
|
||||
if not sock_info.forced:
|
||||
self._socket_semaphore.release()
|
||||
self.reset()
|
||||
else:
|
||||
if sock_info.closed:
|
||||
if sock_info.forced:
|
||||
sock_info.forced = False
|
||||
elif sock_info != self._get_request_state():
|
||||
self._socket_semaphore.release()
|
||||
return
|
||||
|
||||
if sock_info != self._get_request_state():
|
||||
self._return_socket(sock_info)
|
||||
|
||||
def _return_socket(self, sock_info):
|
||||
"""Return socket to the pool. If pool is full the socket is discarded.
|
||||
"""
|
||||
try:
|
||||
self.lock.acquire()
|
||||
too_many_sockets = (self.max_size is not None
|
||||
and len(self.sockets) >= self.max_size)
|
||||
|
||||
if not too_many_sockets and sock_info.pool_id == self.pool_id:
|
||||
self.sockets.add(sock_info)
|
||||
else:
|
||||
sock_info.close()
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
if sock_info.forced:
|
||||
sock_info.forced = False
|
||||
else:
|
||||
self._socket_semaphore.release()
|
||||
|
||||
def _check(self, sock_info, pair):
|
||||
"""This side-effecty function checks if this pool has been reset since
|
||||
the last time this socket was used, or if the socket has been closed by
|
||||
some external network error, and if so, attempts to create a new socket.
|
||||
If this connection attempt fails we reset the pool and reraise the
|
||||
error.
|
||||
|
||||
Checking sockets lets us avoid seeing *some*
|
||||
:class:`~pymongo.errors.AutoReconnect` exceptions on server
|
||||
hiccups, etc. We only do this if it's been > 1 second since
|
||||
the last socket checkout, to keep performance reasonable - we
|
||||
can't avoid AutoReconnects completely anyway.
|
||||
"""
|
||||
error = False
|
||||
|
||||
# How long since socket was last checked out.
|
||||
age = time.time() - sock_info.last_checkout
|
||||
|
||||
if sock_info.closed:
|
||||
error = True
|
||||
|
||||
elif self.pool_id != sock_info.pool_id:
|
||||
sock_info.close()
|
||||
error = True
|
||||
|
||||
elif (self._check_interval_seconds is not None
|
||||
and (
|
||||
0 == self._check_interval_seconds
|
||||
or age > self._check_interval_seconds)):
|
||||
if _closed(sock_info.sock):
|
||||
sock_info.close()
|
||||
error = True
|
||||
|
||||
if not error:
|
||||
return sock_info
|
||||
else:
|
||||
try:
|
||||
return self.connect(pair)
|
||||
except socket.error:
|
||||
self.reset()
|
||||
raise
|
||||
|
||||
def _set_request_state(self, sock_info):
|
||||
ident = self._ident
|
||||
tid = ident.get()
|
||||
|
||||
if sock_info == NO_REQUEST:
|
||||
# Ending a request
|
||||
ident.unwatch(tid)
|
||||
self._tid_to_sock.pop(tid, None)
|
||||
else:
|
||||
self._tid_to_sock[tid] = sock_info
|
||||
|
||||
if not ident.watching():
|
||||
# Closure over tid, poolref, and ident. Don't refer directly to
|
||||
# self, otherwise there's a cycle.
|
||||
|
||||
# Do not access threadlocals in this function, or any
|
||||
# function it calls! In the case of the Pool subclass and
|
||||
# mod_wsgi 2.x, on_thread_died() is triggered when mod_wsgi
|
||||
# calls PyThreadState_Clear(), which deferences the
|
||||
# ThreadVigil and triggers the weakref callback. Accessing
|
||||
# thread locals in this function, while PyThreadState_Clear()
|
||||
# is in progress can cause leaks, see PYTHON-353.
|
||||
poolref = weakref.ref(self)
|
||||
|
||||
def on_thread_died(ref):
|
||||
try:
|
||||
ident.unwatch(tid)
|
||||
pool = poolref()
|
||||
if pool:
|
||||
# End the request
|
||||
request_sock = pool._tid_to_sock.pop(tid, None)
|
||||
|
||||
# Was thread ever assigned a socket before it died?
|
||||
if request_sock not in (NO_REQUEST, NO_SOCKET_YET):
|
||||
pool._return_socket(request_sock)
|
||||
except:
|
||||
# Random exceptions on interpreter shutdown.
|
||||
pass
|
||||
|
||||
ident.watch(on_thread_died)
|
||||
|
||||
def _get_request_state(self):
|
||||
tid = self._ident.get()
|
||||
return self._tid_to_sock.get(tid, NO_REQUEST)
|
||||
|
||||
def _raise_wait_queue_timeout(self):
|
||||
raise ConnectionFailure(
|
||||
'Timed out waiting for socket from pool with max_size %r and'
|
||||
' wait_queue_timeout %r' % (
|
||||
self.max_size, self.wait_queue_timeout))
|
||||
|
||||
def __del__(self):
|
||||
# Avoid ResourceWarnings in Python 3
|
||||
for sock_info in self.sockets:
|
||||
sock_info.close()
|
||||
|
||||
for request_sock in list(self._tid_to_sock.values()):
|
||||
if request_sock not in (NO_REQUEST, NO_SOCKET_YET):
|
||||
request_sock.close()
|
||||
|
||||
|
||||
class Request(object):
|
||||
"""
|
||||
A context manager returned by :meth:`start_request`, so you can do
|
||||
`with client.start_request(): do_something()` in Python 2.5+.
|
||||
"""
|
||||
def __init__(self, connection):
|
||||
self.connection = connection
|
||||
|
||||
def end(self):
|
||||
self.connection.end_request()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.end()
|
||||
# Returning False means, "Don't suppress exceptions if any were
|
||||
# thrown within the block"
|
||||
return False
|
||||
211
asyncio_mongo/_pymongo/read_preferences.py
Normal file
211
asyncio_mongo/_pymongo/read_preferences.py
Normal file
@@ -0,0 +1,211 @@
|
||||
# Copyright 2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License",
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Utilities for choosing which member of a replica set to read from."""
|
||||
|
||||
import random
|
||||
|
||||
from asyncio_mongo._pymongo.errors import ConfigurationError
|
||||
|
||||
|
||||
class ReadPreference:
|
||||
"""An enum that defines the read preference modes supported by PyMongo.
|
||||
Used in three cases:
|
||||
|
||||
:class:`~pymongo.mongo_client.MongoClient` connected to a single host:
|
||||
|
||||
* `PRIMARY`: Queries are allowed if the host is standalone or the replica
|
||||
set primary.
|
||||
* All other modes allow queries to standalone servers, to the primary, or
|
||||
to secondaries.
|
||||
|
||||
:class:`~pymongo.mongo_client.MongoClient` connected to a mongos, with a
|
||||
sharded cluster of replica sets:
|
||||
|
||||
* `PRIMARY`: Queries are sent to the primary of a shard.
|
||||
* `PRIMARY_PREFERRED`: Queries are sent to the primary if available,
|
||||
otherwise a secondary.
|
||||
* `SECONDARY`: Queries are distributed among shard secondaries. An error
|
||||
is raised if no secondaries are available.
|
||||
* `SECONDARY_PREFERRED`: Queries are distributed among shard secondaries,
|
||||
or the primary if no secondary is available.
|
||||
* `NEAREST`: Queries are distributed among all members of a shard.
|
||||
|
||||
:class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`:
|
||||
|
||||
* `PRIMARY`: Queries are sent to the primary of the replica set.
|
||||
* `PRIMARY_PREFERRED`: Queries are sent to the primary if available,
|
||||
otherwise a secondary.
|
||||
* `SECONDARY`: Queries are distributed among secondaries. An error
|
||||
is raised if no secondaries are available.
|
||||
* `SECONDARY_PREFERRED`: Queries are distributed among secondaries,
|
||||
or the primary if no secondary is available.
|
||||
* `NEAREST`: Queries are distributed among all members.
|
||||
"""
|
||||
|
||||
PRIMARY = 0
|
||||
PRIMARY_PREFERRED = 1
|
||||
SECONDARY = 2
|
||||
SECONDARY_ONLY = 2
|
||||
SECONDARY_PREFERRED = 3
|
||||
NEAREST = 4
|
||||
|
||||
# For formatting error messages
|
||||
modes = {
|
||||
ReadPreference.PRIMARY: 'PRIMARY',
|
||||
ReadPreference.PRIMARY_PREFERRED: 'PRIMARY_PREFERRED',
|
||||
ReadPreference.SECONDARY: 'SECONDARY',
|
||||
ReadPreference.SECONDARY_PREFERRED: 'SECONDARY_PREFERRED',
|
||||
ReadPreference.NEAREST: 'NEAREST',
|
||||
}
|
||||
|
||||
_mongos_modes = [
|
||||
'primary',
|
||||
'primaryPreferred',
|
||||
'secondary',
|
||||
'secondaryPreferred',
|
||||
'nearest',
|
||||
]
|
||||
|
||||
def mongos_mode(mode):
|
||||
return _mongos_modes[mode]
|
||||
|
||||
def mongos_enum(enum):
|
||||
return _mongos_modes.index(enum)
|
||||
|
||||
def select_primary(members):
|
||||
for member in members:
|
||||
if member.is_primary:
|
||||
if member.up:
|
||||
return member
|
||||
else:
|
||||
return None
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def select_member_with_tags(members, tags, secondary_only, latency):
|
||||
candidates = []
|
||||
|
||||
for candidate in members:
|
||||
if not candidate.up:
|
||||
continue
|
||||
|
||||
if secondary_only and candidate.is_primary:
|
||||
continue
|
||||
|
||||
if not (candidate.is_primary or candidate.is_secondary):
|
||||
# In RECOVERING or similar state
|
||||
continue
|
||||
|
||||
if candidate.matches_tags(tags):
|
||||
candidates.append(candidate)
|
||||
|
||||
if not candidates:
|
||||
return None
|
||||
|
||||
# ping_time is in seconds
|
||||
fastest = min([candidate.get_avg_ping_time() for candidate in candidates])
|
||||
near_candidates = [
|
||||
candidate for candidate in candidates
|
||||
if candidate.get_avg_ping_time() - fastest < latency / 1000.]
|
||||
|
||||
return random.choice(near_candidates)
|
||||
|
||||
|
||||
def select_member(
|
||||
members,
|
||||
mode=ReadPreference.PRIMARY,
|
||||
tag_sets=None,
|
||||
latency=15
|
||||
):
|
||||
"""Return a Member or None.
|
||||
"""
|
||||
if tag_sets is None:
|
||||
tag_sets = [{}]
|
||||
|
||||
# For brevity
|
||||
PRIMARY = ReadPreference.PRIMARY
|
||||
PRIMARY_PREFERRED = ReadPreference.PRIMARY_PREFERRED
|
||||
SECONDARY = ReadPreference.SECONDARY
|
||||
SECONDARY_PREFERRED = ReadPreference.SECONDARY_PREFERRED
|
||||
NEAREST = ReadPreference.NEAREST
|
||||
|
||||
if mode == PRIMARY:
|
||||
if tag_sets != [{}]:
|
||||
raise ConfigurationError("PRIMARY cannot be combined with tags")
|
||||
return select_primary(members)
|
||||
|
||||
elif mode == PRIMARY_PREFERRED:
|
||||
# Recurse.
|
||||
candidate_primary = select_member(members, PRIMARY, [{}], latency)
|
||||
if candidate_primary:
|
||||
return candidate_primary
|
||||
else:
|
||||
return select_member(members, SECONDARY, tag_sets, latency)
|
||||
|
||||
elif mode == SECONDARY:
|
||||
for tags in tag_sets:
|
||||
candidate = select_member_with_tags(members, tags, True, latency)
|
||||
if candidate:
|
||||
return candidate
|
||||
|
||||
return None
|
||||
|
||||
elif mode == SECONDARY_PREFERRED:
|
||||
# Recurse.
|
||||
candidate_secondary = select_member(
|
||||
members, SECONDARY, tag_sets, latency)
|
||||
if candidate_secondary:
|
||||
return candidate_secondary
|
||||
else:
|
||||
return select_member(members, PRIMARY, [{}], latency)
|
||||
|
||||
elif mode == NEAREST:
|
||||
for tags in tag_sets:
|
||||
candidate = select_member_with_tags(members, tags, False, latency)
|
||||
if candidate:
|
||||
return candidate
|
||||
|
||||
# Ran out of tags.
|
||||
return None
|
||||
|
||||
else:
|
||||
raise ConfigurationError("Invalid mode %s" % repr(mode))
|
||||
|
||||
|
||||
"""Commands that may be sent to replica-set secondaries, depending on
|
||||
ReadPreference and tags. All other commands are always run on the primary.
|
||||
"""
|
||||
secondary_ok_commands = frozenset([
|
||||
"group", "aggregate", "collstats", "dbstats", "count", "distinct",
|
||||
"geonear", "geosearch", "geowalk", "mapreduce", "getnonce", "authenticate",
|
||||
"text",
|
||||
])
|
||||
|
||||
|
||||
class MovingAverage(object):
|
||||
def __init__(self, samples):
|
||||
"""Immutable structure to track a 5-sample moving average.
|
||||
"""
|
||||
self.samples = samples[-5:]
|
||||
assert self.samples
|
||||
self.average = sum(self.samples) / float(len(self.samples))
|
||||
|
||||
def clone_with(self, sample):
|
||||
"""Get a copy of this instance plus a new sample"""
|
||||
return MovingAverage(self.samples + [sample])
|
||||
|
||||
def get(self):
|
||||
return self.average
|
||||
222
asyncio_mongo/_pymongo/replica_set_connection.py
Normal file
222
asyncio_mongo/_pymongo/replica_set_connection.py
Normal file
@@ -0,0 +1,222 @@
|
||||
# Copyright 2011-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you
|
||||
# may not use this file except in compliance with the License. You
|
||||
# may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied. See the License for the specific language governing
|
||||
# permissions and limitations under the License.
|
||||
|
||||
"""Tools for connecting to a MongoDB replica set.
|
||||
|
||||
.. warning::
|
||||
**DEPRECATED:** Please use :mod:`~pymongo.mongo_replica_set_client` instead.
|
||||
|
||||
.. seealso:: :doc:`/examples/high_availability` for more examples of
|
||||
how to connect to a replica set.
|
||||
|
||||
To get a :class:`~pymongo.database.Database` instance from a
|
||||
:class:`ReplicaSetConnection` use either dictionary-style or
|
||||
attribute-style access:
|
||||
|
||||
.. doctest::
|
||||
|
||||
>>> from asyncio_mongo._pymongo import ReplicaSetConnection
|
||||
>>> c = ReplicaSetConnection('localhost:27017', replicaSet='repl0')
|
||||
>>> c.test_database
|
||||
Database(ReplicaSetConnection([u'...', u'...']), u'test_database')
|
||||
>>> c['test_database']
|
||||
Database(ReplicaSetConnection([u'...', u'...']), u'test_database')
|
||||
"""
|
||||
from asyncio_mongo._pymongo.mongo_replica_set_client import MongoReplicaSetClient
|
||||
from asyncio_mongo._pymongo.errors import ConfigurationError
|
||||
|
||||
|
||||
class ReplicaSetConnection(MongoReplicaSetClient):
|
||||
"""Connection to a MongoDB replica set.
|
||||
"""
|
||||
|
||||
def __init__(self, hosts_or_uri=None, max_pool_size=None,
|
||||
document_class=dict, tz_aware=False, **kwargs):
|
||||
"""Create a new connection to a MongoDB replica set.
|
||||
|
||||
.. warning::
|
||||
**DEPRECATED:** :class:`ReplicaSetConnection` is deprecated. Please
|
||||
use :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`
|
||||
instead
|
||||
|
||||
The resultant connection object has connection-pooling built
|
||||
in. It also performs auto-reconnection when necessary. If an
|
||||
operation fails because of a connection error,
|
||||
:class:`~pymongo.errors.ConnectionFailure` is raised. If
|
||||
auto-reconnection will be performed,
|
||||
:class:`~pymongo.errors.AutoReconnect` will be
|
||||
raised. Application code should handle this exception
|
||||
(recognizing that the operation failed) and then continue to
|
||||
execute.
|
||||
|
||||
Raises :class:`~pymongo.errors.ConnectionFailure` if
|
||||
the connection cannot be made.
|
||||
|
||||
The `hosts_or_uri` parameter can be a full `mongodb URI
|
||||
<http://dochub.mongodb.org/core/connections>`_, in addition to
|
||||
a string of `host:port` pairs (e.g. 'host1:port1,host2:port2').
|
||||
If `hosts_or_uri` is None 'localhost:27017' will be used.
|
||||
|
||||
.. note:: Instances of :class:`~ReplicaSetConnection` start a
|
||||
background task to monitor the state of the replica set. This allows
|
||||
it to quickly respond to changes in replica set configuration.
|
||||
Before discarding an instance of :class:`~ReplicaSetConnection` make
|
||||
sure you call :meth:`~close` to ensure that the monitor task is
|
||||
cleanly shut down.
|
||||
|
||||
:Parameters:
|
||||
- `hosts_or_uri` (optional): A MongoDB URI or string of `host:port`
|
||||
pairs. If a host is an IPv6 literal it must be enclosed in '[' and
|
||||
']' characters following the RFC2732 URL syntax (e.g. '[::1]' for
|
||||
localhost)
|
||||
- `max_pool_size` (optional): The maximum number of connections
|
||||
each pool will open simultaneously. If this is set, operations
|
||||
will block if there are `max_pool_size` outstanding connections
|
||||
from the pool. By default the pool size is unlimited.
|
||||
- `document_class` (optional): default class to use for
|
||||
documents returned from queries on this connection
|
||||
- `tz_aware` (optional): if ``True``,
|
||||
:class:`~datetime.datetime` instances returned as values
|
||||
in a document by this :class:`ReplicaSetConnection` will be timezone
|
||||
aware (otherwise they will be naive)
|
||||
- `replicaSet`: (required) The name of the replica set to connect to.
|
||||
The driver will verify that each host it connects to is a member of
|
||||
this replica set. Can be passed as a keyword argument or as a
|
||||
MongoDB URI option.
|
||||
|
||||
| **Other optional parameters can be passed as keyword arguments:**
|
||||
|
||||
- `host`: For compatibility with connection.Connection. If both
|
||||
`host` and `hosts_or_uri` are specified `host` takes precedence.
|
||||
- `port`: For compatibility with connection.Connection. The default
|
||||
port number to use for hosts.
|
||||
- `network_timeout`: For compatibility with connection.Connection.
|
||||
The timeout (in seconds) to use for socket operations - default
|
||||
is no timeout. If both `network_timeout` and `socketTimeoutMS` are
|
||||
specified `network_timeout` takes precedence, matching
|
||||
connection.Connection.
|
||||
- `socketTimeoutMS`: (integer) How long (in milliseconds) a send or
|
||||
receive on a socket can take before timing out.
|
||||
- `connectTimeoutMS`: (integer) How long (in milliseconds) a
|
||||
connection can take to be opened before timing out.
|
||||
- `waitQueueTimeoutMS`: (integer) How long (in milliseconds) a
|
||||
thread will wait for a socket from the pool if the pool has no
|
||||
free sockets. Defaults to ``None`` (no timeout).
|
||||
- `waitQueueMultiple`: (integer) Multiplied by max_pool_size to give
|
||||
the number of threads allowed to wait for a socket at one time.
|
||||
Defaults to ``None`` (no waiters).
|
||||
- `auto_start_request`: If ``True`` (the default), each thread that
|
||||
accesses this :class:`ReplicaSetConnection` has a socket allocated
|
||||
to it for the thread's lifetime, for each member of the set. For
|
||||
:class:`~pymongo.read_preferences.ReadPreference` PRIMARY,
|
||||
auto_start_request=True ensures consistent reads, even if you read
|
||||
after an unsafe write. For read preferences other than PRIMARY,
|
||||
there are no consistency guarantees.
|
||||
- `use_greenlets`: if ``True``, use a background Greenlet instead of
|
||||
a background thread to monitor state of replica set. Additionally,
|
||||
:meth:`start_request()` will ensure that the current greenlet uses
|
||||
the same socket for all operations until :meth:`end_request()`.
|
||||
`use_greenlets` with ReplicaSetConnection requires `Gevent
|
||||
<http://gevent.org/>`_ to be installed.
|
||||
|
||||
| **Write Concern options:**
|
||||
|
||||
- `safe`: :class:`ReplicaSetConnection` **disables** acknowledgement
|
||||
of write operations. Use ``safe=True`` to enable write
|
||||
acknowledgement.
|
||||
- `w`: (integer or string) Write operations will block until they have
|
||||
been replicated to the specified number or tagged set of servers.
|
||||
`w=<int>` always includes the replica set primary (e.g. w=3 means
|
||||
write to the primary and wait until replicated to **two**
|
||||
secondaries). Implies safe=True.
|
||||
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
|
||||
in milliseconds to control how long to wait for write propagation
|
||||
to complete. If replication does not complete in the given
|
||||
timeframe, a timeout exception is raised. Implies safe=True.
|
||||
- `j`: If ``True`` block until write operations have been committed
|
||||
to the journal. Ignored if the server is running without journaling.
|
||||
Implies safe=True.
|
||||
- `fsync`: If ``True`` force the database to fsync all files before
|
||||
returning. When used with `j` the server awaits the next group
|
||||
commit before returning. Implies safe=True.
|
||||
|
||||
| **Read preference options:**
|
||||
|
||||
- `slave_okay` or `slaveOk` (deprecated): Use `read_preference`
|
||||
instead.
|
||||
- `read_preference`: The read preference for this connection.
|
||||
See :class:`~pymongo.read_preferences.ReadPreference` for available
|
||||
- `tag_sets`: Read from replica-set members with these tags.
|
||||
To specify a priority-order for tag sets, provide a list of
|
||||
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
|
||||
set, ``{}``, means "read from any member that matches the mode,
|
||||
ignoring tags." :class:`MongoReplicaSetClient` tries each set of
|
||||
tags in turn until it finds a set of tags with at least one matching
|
||||
member.
|
||||
- `secondary_acceptable_latency_ms`: (integer) Any replica-set member
|
||||
whose ping time is within secondary_acceptable_latency_ms of the
|
||||
nearest member may accept reads. Default 15 milliseconds.
|
||||
**Ignored by mongos** and must be configured on the command line.
|
||||
See the localThreshold_ option for more information.
|
||||
|
||||
| **SSL configuration:**
|
||||
|
||||
- `ssl`: If ``True``, create the connection to the servers using SSL.
|
||||
- `ssl_keyfile`: The private keyfile used to identify the local
|
||||
connection against mongod. If included with the ``certfile` then
|
||||
only the ``ssl_certfile`` is needed. Implies ``ssl=True``.
|
||||
- `ssl_certfile`: The certificate file used to identify the local
|
||||
connection against mongod. Implies ``ssl=True``.
|
||||
- `ssl_cert_reqs`: Specifies whether a certificate is required from
|
||||
the other side of the connection, and whether it will be validated
|
||||
if provided. It must be one of the three values ``ssl.CERT_NONE``
|
||||
(certificates ignored), ``ssl.CERT_OPTIONAL``
|
||||
(not required, but validated if provided), or ``ssl.CERT_REQUIRED``
|
||||
(required and validated). If the value of this parameter is not
|
||||
``ssl.CERT_NONE``, then the ``ssl_ca_certs`` parameter must point
|
||||
to a file of CA certificates. Implies ``ssl=True``.
|
||||
- `ssl_ca_certs`: The ca_certs file contains a set of concatenated
|
||||
"certification authority" certificates, which are used to validate
|
||||
certificates passed from the other end of the connection.
|
||||
Implies ``ssl=True``.
|
||||
|
||||
.. versionchanged:: 2.5
|
||||
Added additional ssl options
|
||||
.. versionchanged:: 2.3
|
||||
Added `tag_sets` and `secondary_acceptable_latency_ms` options.
|
||||
.. versionchanged:: 2.2
|
||||
Added `auto_start_request` and `use_greenlets` options.
|
||||
Added support for `host`, `port`, and `network_timeout` keyword
|
||||
arguments for compatibility with connection.Connection.
|
||||
.. versionadded:: 2.1
|
||||
|
||||
.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold
|
||||
"""
|
||||
network_timeout = kwargs.pop('network_timeout', None)
|
||||
if network_timeout is not None:
|
||||
if (not isinstance(network_timeout, (int, float)) or
|
||||
network_timeout <= 0):
|
||||
raise ConfigurationError("network_timeout must "
|
||||
"be a positive integer")
|
||||
kwargs['socketTimeoutMS'] = network_timeout * 1000
|
||||
|
||||
kwargs['auto_start_request'] = kwargs.get('auto_start_request', True)
|
||||
kwargs['safe'] = kwargs.get('safe', False)
|
||||
|
||||
super(ReplicaSetConnection, self).__init__(
|
||||
hosts_or_uri, max_pool_size, document_class, tz_aware, **kwargs)
|
||||
|
||||
def __repr__(self):
|
||||
return "ReplicaSetConnection(%r)" % (["%s:%d" % n
|
||||
for n in self.hosts],)
|
||||
177
asyncio_mongo/_pymongo/son_manipulator.py
Normal file
177
asyncio_mongo/_pymongo/son_manipulator.py
Normal file
@@ -0,0 +1,177 @@
|
||||
# Copyright 2009-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Manipulators that can edit SON objects as they enter and exit a database.
|
||||
|
||||
New manipulators should be defined as subclasses of SONManipulator and can be
|
||||
installed on a database by calling
|
||||
`pymongo.database.Database.add_son_manipulator`."""
|
||||
|
||||
from asyncio_mongo._bson.dbref import DBRef
|
||||
from asyncio_mongo._bson.objectid import ObjectId
|
||||
from asyncio_mongo._bson.son import SON
|
||||
|
||||
|
||||
class SONManipulator(object):
|
||||
"""A base son manipulator.
|
||||
|
||||
This manipulator just saves and restores objects without changing them.
|
||||
"""
|
||||
|
||||
def will_copy(self):
|
||||
"""Will this SON manipulator make a copy of the incoming document?
|
||||
|
||||
Derived classes that do need to make a copy should override this
|
||||
method, returning True instead of False. All non-copying manipulators
|
||||
will be applied first (so that the user's document will be updated
|
||||
appropriately), followed by copying manipulators.
|
||||
"""
|
||||
return False
|
||||
|
||||
def transform_incoming(self, son, collection):
|
||||
"""Manipulate an incoming SON object.
|
||||
|
||||
:Parameters:
|
||||
- `son`: the SON object to be inserted into the database
|
||||
- `collection`: the collection the object is being inserted into
|
||||
"""
|
||||
if self.will_copy():
|
||||
return SON(son)
|
||||
return son
|
||||
|
||||
def transform_outgoing(self, son, collection):
|
||||
"""Manipulate an outgoing SON object.
|
||||
|
||||
:Parameters:
|
||||
- `son`: the SON object being retrieved from the database
|
||||
- `collection`: the collection this object was stored in
|
||||
"""
|
||||
if self.will_copy():
|
||||
return SON(son)
|
||||
return son
|
||||
|
||||
|
||||
class ObjectIdInjector(SONManipulator):
|
||||
"""A son manipulator that adds the _id field if it is missing.
|
||||
"""
|
||||
|
||||
def transform_incoming(self, son, collection):
|
||||
"""Add an _id field if it is missing.
|
||||
"""
|
||||
if not "_id" in son:
|
||||
son["_id"] = ObjectId()
|
||||
return son
|
||||
|
||||
|
||||
# This is now handled during BSON encoding (for performance reasons),
|
||||
# but I'm keeping this here as a reference for those implementing new
|
||||
# SONManipulators.
|
||||
class ObjectIdShuffler(SONManipulator):
|
||||
"""A son manipulator that moves _id to the first position.
|
||||
"""
|
||||
|
||||
def will_copy(self):
|
||||
"""We need to copy to be sure that we are dealing with SON, not a dict.
|
||||
"""
|
||||
return True
|
||||
|
||||
def transform_incoming(self, son, collection):
|
||||
"""Move _id to the front if it's there.
|
||||
"""
|
||||
if not "_id" in son:
|
||||
return son
|
||||
transformed = SON({"_id": son["_id"]})
|
||||
transformed.update(son)
|
||||
return transformed
|
||||
|
||||
|
||||
class NamespaceInjector(SONManipulator):
|
||||
"""A son manipulator that adds the _ns field.
|
||||
"""
|
||||
|
||||
def transform_incoming(self, son, collection):
|
||||
"""Add the _ns field to the incoming object
|
||||
"""
|
||||
son["_ns"] = collection.name
|
||||
return son
|
||||
|
||||
|
||||
class AutoReference(SONManipulator):
|
||||
"""Transparently reference and de-reference already saved embedded objects.
|
||||
|
||||
This manipulator should probably only be used when the NamespaceInjector is
|
||||
also being used, otherwise it doesn't make too much sense - documents can
|
||||
only be auto-referenced if they have an *_ns* field.
|
||||
|
||||
NOTE: this will behave poorly if you have a circular reference.
|
||||
|
||||
TODO: this only works for documents that are in the same database. To fix
|
||||
this we'll need to add a DatabaseInjector that adds *_db* and then make
|
||||
use of the optional *database* support for DBRefs.
|
||||
"""
|
||||
|
||||
def __init__(self, db):
|
||||
self.database = db
|
||||
|
||||
def will_copy(self):
|
||||
"""We need to copy so the user's document doesn't get transformed refs.
|
||||
"""
|
||||
return True
|
||||
|
||||
def transform_incoming(self, son, collection):
|
||||
"""Replace embedded documents with DBRefs.
|
||||
"""
|
||||
|
||||
def transform_value(value):
|
||||
if isinstance(value, dict):
|
||||
if "_id" in value and "_ns" in value:
|
||||
return DBRef(value["_ns"], transform_value(value["_id"]))
|
||||
else:
|
||||
return transform_dict(SON(value))
|
||||
elif isinstance(value, list):
|
||||
return [transform_value(v) for v in value]
|
||||
return value
|
||||
|
||||
def transform_dict(object):
|
||||
for (key, value) in list(object.items()):
|
||||
object[key] = transform_value(value)
|
||||
return object
|
||||
|
||||
return transform_dict(SON(son))
|
||||
|
||||
def transform_outgoing(self, son, collection):
|
||||
"""Replace DBRefs with embedded documents.
|
||||
"""
|
||||
|
||||
def transform_value(value):
|
||||
if isinstance(value, DBRef):
|
||||
return self.database.dereference(value)
|
||||
elif isinstance(value, list):
|
||||
return [transform_value(v) for v in value]
|
||||
elif isinstance(value, dict):
|
||||
return transform_dict(SON(value))
|
||||
return value
|
||||
|
||||
def transform_dict(object):
|
||||
for (key, value) in list(object.items()):
|
||||
object[key] = transform_value(value)
|
||||
return object
|
||||
|
||||
return transform_dict(SON(son))
|
||||
|
||||
# TODO make a generic translator for custom types. Take encode, decode,
|
||||
# should_encode and should_decode functions and just encode and decode where
|
||||
# necessary. See examples/custom_type.py for where this would be useful.
|
||||
# Alternatively it could take a should_encode, to_binary, from_binary and
|
||||
# binary subtype.
|
||||
69
asyncio_mongo/_pymongo/ssl_match_hostname.py
Normal file
69
asyncio_mongo/_pymongo/ssl_match_hostname.py
Normal file
@@ -0,0 +1,69 @@
|
||||
# Backport of the match_hostname logic introduced in python 3.2
|
||||
# http://svn.python.org/projects/python/branches/release32-maint/Lib/ssl.py
|
||||
|
||||
import re
|
||||
|
||||
|
||||
class CertificateError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def _dnsname_to_pat(dn, max_wildcards=1):
|
||||
pats = []
|
||||
for frag in dn.split(r'.'):
|
||||
if frag.count('*') > max_wildcards:
|
||||
# Issue #17980: avoid denials of service by refusing more
|
||||
# than one wildcard per fragment. A survery of established
|
||||
# policy among SSL implementations showed it to be a
|
||||
# reasonable choice.
|
||||
raise CertificateError(
|
||||
"too many wildcards in certificate DNS name: " + repr(dn))
|
||||
if frag == '*':
|
||||
# When '*' is a fragment by itself, it matches a non-empty dotless
|
||||
# fragment.
|
||||
pats.append('[^.]+')
|
||||
else:
|
||||
# Otherwise, '*' matches any dotless fragment.
|
||||
frag = re.escape(frag)
|
||||
pats.append(frag.replace(r'\*', '[^.]*'))
|
||||
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
|
||||
|
||||
|
||||
def match_hostname(cert, hostname):
|
||||
"""Verify that *cert* (in decoded format as returned by
|
||||
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
|
||||
are mostly followed, but IP addresses are not accepted for *hostname*.
|
||||
|
||||
CertificateError is raised on failure. On success, the function
|
||||
returns nothing.
|
||||
"""
|
||||
if not cert:
|
||||
raise ValueError("empty or no certificate")
|
||||
dnsnames = []
|
||||
san = cert.get('subjectAltName', ())
|
||||
for key, value in san:
|
||||
if key == 'DNS':
|
||||
if _dnsname_to_pat(value).match(hostname):
|
||||
return
|
||||
dnsnames.append(value)
|
||||
if not san:
|
||||
# The subject is only checked when subjectAltName is empty
|
||||
for sub in cert.get('subject', ()):
|
||||
for key, value in sub:
|
||||
# XXX according to RFC 2818, the most specific Common Name
|
||||
# must be used.
|
||||
if key == 'commonName':
|
||||
if _dnsname_to_pat(value).match(hostname):
|
||||
return
|
||||
dnsnames.append(value)
|
||||
if len(dnsnames) > 1:
|
||||
raise CertificateError("hostname %r "
|
||||
"doesn't match either of %s"
|
||||
% (hostname, ', '.join(map(repr, dnsnames))))
|
||||
elif len(dnsnames) == 1:
|
||||
raise CertificateError("hostname %r "
|
||||
"doesn't match %r"
|
||||
% (hostname, dnsnames[0]))
|
||||
else:
|
||||
raise CertificateError("no appropriate commonName or "
|
||||
"subjectAltName fields were found")
|
||||
303
asyncio_mongo/_pymongo/thread_util.py
Normal file
303
asyncio_mongo/_pymongo/thread_util.py
Normal file
@@ -0,0 +1,303 @@
|
||||
# Copyright 2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Utilities to abstract the differences between threads and greenlets."""
|
||||
|
||||
import threading
|
||||
import sys
|
||||
import weakref
|
||||
try:
|
||||
from time import monotonic as _time
|
||||
except ImportError:
|
||||
from time import time as _time
|
||||
|
||||
have_gevent = True
|
||||
try:
|
||||
import greenlet
|
||||
|
||||
try:
|
||||
# gevent-1.0rc2 and later.
|
||||
from gevent.lock import BoundedSemaphore as GeventBoundedSemaphore
|
||||
except ImportError:
|
||||
from gevent.coros import BoundedSemaphore as GeventBoundedSemaphore
|
||||
|
||||
from gevent.greenlet import SpawnedLink
|
||||
|
||||
except ImportError:
|
||||
have_gevent = False
|
||||
|
||||
from asyncio_mongo._pymongo.errors import ExceededMaxWaiters
|
||||
|
||||
|
||||
# Do we have to work around http://bugs.python.org/issue1868?
|
||||
issue1868 = (sys.version_info[:3] <= (2, 7, 0))
|
||||
|
||||
|
||||
class Ident(object):
|
||||
def __init__(self):
|
||||
self._refs = {}
|
||||
|
||||
def watching(self):
|
||||
"""Is the current thread or greenlet being watched for death?"""
|
||||
return self.get() in self._refs
|
||||
|
||||
def unwatch(self, tid):
|
||||
self._refs.pop(tid, None)
|
||||
|
||||
def get(self):
|
||||
"""An id for this thread or greenlet"""
|
||||
raise NotImplementedError
|
||||
|
||||
def watch(self, callback):
|
||||
"""Run callback when this thread or greenlet dies. callback takes
|
||||
one meaningless argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class ThreadIdent(Ident):
|
||||
class _DummyLock(object):
|
||||
def acquire(self):
|
||||
pass
|
||||
|
||||
def release(self):
|
||||
pass
|
||||
|
||||
def __init__(self):
|
||||
super(ThreadIdent, self).__init__()
|
||||
self._local = threading.local()
|
||||
if issue1868:
|
||||
self._lock = threading.Lock()
|
||||
else:
|
||||
self._lock = ThreadIdent._DummyLock()
|
||||
|
||||
# We watch for thread-death using a weakref callback to a thread local.
|
||||
# Weakrefs are permitted on subclasses of object but not object() itself.
|
||||
class ThreadVigil(object):
|
||||
pass
|
||||
|
||||
def _make_vigil(self):
|
||||
# Threadlocals in Python <= 2.7.0 have race conditions when setting
|
||||
# attributes and possibly when getting them, too, leading to weakref
|
||||
# callbacks not getting called later.
|
||||
self._lock.acquire()
|
||||
try:
|
||||
vigil = getattr(self._local, 'vigil', None)
|
||||
if not vigil:
|
||||
self._local.vigil = vigil = ThreadIdent.ThreadVigil()
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
return vigil
|
||||
|
||||
def get(self):
|
||||
return id(self._make_vigil())
|
||||
|
||||
def watch(self, callback):
|
||||
vigil = self._make_vigil()
|
||||
self._refs[id(vigil)] = weakref.ref(vigil, callback)
|
||||
|
||||
|
||||
class GreenletIdent(Ident):
|
||||
def get(self):
|
||||
return id(greenlet.getcurrent())
|
||||
|
||||
def watch(self, callback):
|
||||
current = greenlet.getcurrent()
|
||||
tid = self.get()
|
||||
|
||||
if hasattr(current, 'link'):
|
||||
# This is a Gevent Greenlet (capital G), which inherits from
|
||||
# greenlet and provides a 'link' method to detect when the
|
||||
# Greenlet exits.
|
||||
link = SpawnedLink(callback)
|
||||
current.rawlink(link)
|
||||
self._refs[tid] = link
|
||||
else:
|
||||
# This is a non-Gevent greenlet (small g), or it's the main
|
||||
# greenlet.
|
||||
self._refs[tid] = weakref.ref(current, callback)
|
||||
|
||||
def unwatch(self, tid):
|
||||
""" call unlink if link before """
|
||||
link = self._refs.pop(tid, None)
|
||||
current = greenlet.getcurrent()
|
||||
if hasattr(current, 'unlink'):
|
||||
# This is a Gevent enhanced Greenlet. Remove the SpawnedLink we
|
||||
# linked to it.
|
||||
current.unlink(link)
|
||||
|
||||
|
||||
def create_ident(use_greenlets):
|
||||
if use_greenlets:
|
||||
return GreenletIdent()
|
||||
else:
|
||||
return ThreadIdent()
|
||||
|
||||
|
||||
class Counter(object):
|
||||
"""A thread- or greenlet-local counter.
|
||||
"""
|
||||
def __init__(self, use_greenlets):
|
||||
self.ident = create_ident(use_greenlets)
|
||||
self._counters = {}
|
||||
|
||||
def inc(self):
|
||||
# Copy these references so on_thread_died needn't close over self
|
||||
ident = self.ident
|
||||
_counters = self._counters
|
||||
|
||||
tid = ident.get()
|
||||
_counters.setdefault(tid, 0)
|
||||
_counters[tid] += 1
|
||||
|
||||
if not ident.watching():
|
||||
# Before the tid is possibly reused, remove it from _counters
|
||||
def on_thread_died(ref):
|
||||
ident.unwatch(tid)
|
||||
_counters.pop(tid, None)
|
||||
|
||||
ident.watch(on_thread_died)
|
||||
|
||||
return _counters[tid]
|
||||
|
||||
def dec(self):
|
||||
tid = self.ident.get()
|
||||
if self._counters.get(tid, 0) > 0:
|
||||
self._counters[tid] -= 1
|
||||
return self._counters[tid]
|
||||
else:
|
||||
return 0
|
||||
|
||||
def get(self):
|
||||
return self._counters.get(self.ident.get(), 0)
|
||||
|
||||
|
||||
### Begin backport from CPython 3.2 for timeout support for Semaphore.acquire
|
||||
class Semaphore:
|
||||
|
||||
# After Tim Peters' semaphore class, but not quite the same (no maximum)
|
||||
|
||||
def __init__(self, value=1):
|
||||
if value < 0:
|
||||
raise ValueError("semaphore initial value must be >= 0")
|
||||
self._cond = threading.Condition(threading.Lock())
|
||||
self._value = value
|
||||
|
||||
def acquire(self, blocking=True, timeout=None):
|
||||
if not blocking and timeout is not None:
|
||||
raise ValueError("can't specify timeout for non-blocking acquire")
|
||||
rc = False
|
||||
endtime = None
|
||||
self._cond.acquire()
|
||||
while self._value == 0:
|
||||
if not blocking:
|
||||
break
|
||||
if timeout is not None:
|
||||
if endtime is None:
|
||||
endtime = _time() + timeout
|
||||
else:
|
||||
timeout = endtime - _time()
|
||||
if timeout <= 0:
|
||||
break
|
||||
self._cond.wait(timeout)
|
||||
else:
|
||||
self._value = self._value - 1
|
||||
rc = True
|
||||
self._cond.release()
|
||||
return rc
|
||||
|
||||
__enter__ = acquire
|
||||
|
||||
def release(self):
|
||||
self._cond.acquire()
|
||||
self._value = self._value + 1
|
||||
self._cond.notify()
|
||||
self._cond.release()
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
self.release()
|
||||
|
||||
@property
|
||||
def counter(self):
|
||||
return self._value
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
"""Semaphore that checks that # releases is <= # acquires"""
|
||||
def __init__(self, value=1):
|
||||
Semaphore.__init__(self, value)
|
||||
self._initial_value = value
|
||||
|
||||
def release(self):
|
||||
if self._value >= self._initial_value:
|
||||
raise ValueError("Semaphore released too many times")
|
||||
return Semaphore.release(self)
|
||||
### End backport from CPython 3.2
|
||||
|
||||
|
||||
class DummySemaphore(object):
|
||||
def __init__(self, value=None):
|
||||
pass
|
||||
|
||||
def acquire(self, blocking=True, timeout=None):
|
||||
return True
|
||||
|
||||
def release(self):
|
||||
pass
|
||||
|
||||
|
||||
class MaxWaitersBoundedSemaphore(object):
|
||||
def __init__(self, semaphore_class, value=1, max_waiters=1):
|
||||
self.waiter_semaphore = semaphore_class(max_waiters)
|
||||
self.semaphore = semaphore_class(value)
|
||||
|
||||
def acquire(self, blocking=True, timeout=None):
|
||||
if not self.waiter_semaphore.acquire(False):
|
||||
raise ExceededMaxWaiters()
|
||||
try:
|
||||
return self.semaphore.acquire(blocking, timeout)
|
||||
finally:
|
||||
self.waiter_semaphore.release()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.semaphore, name)
|
||||
|
||||
|
||||
class MaxWaitersBoundedSemaphoreThread(MaxWaitersBoundedSemaphore):
|
||||
def __init__(self, value=1, max_waiters=1):
|
||||
MaxWaitersBoundedSemaphore.__init__(
|
||||
self, BoundedSemaphore, value, max_waiters)
|
||||
|
||||
|
||||
if have_gevent:
|
||||
class MaxWaitersBoundedSemaphoreGevent(MaxWaitersBoundedSemaphore):
|
||||
def __init__(self, value=1, max_waiters=1):
|
||||
MaxWaitersBoundedSemaphore.__init__(
|
||||
self, GeventBoundedSemaphore, value, max_waiters)
|
||||
|
||||
|
||||
def create_semaphore(max_size, max_waiters, use_greenlets):
|
||||
if max_size is None:
|
||||
return DummySemaphore()
|
||||
elif use_greenlets:
|
||||
if max_waiters is None:
|
||||
return GeventBoundedSemaphore(max_size)
|
||||
else:
|
||||
return MaxWaitersBoundedSemaphoreGevent(max_size, max_waiters)
|
||||
else:
|
||||
if max_waiters is None:
|
||||
return BoundedSemaphore(max_size)
|
||||
else:
|
||||
return MaxWaitersBoundedSemaphoreThread(max_size, max_waiters)
|
||||
301
asyncio_mongo/_pymongo/uri_parser.py
Normal file
301
asyncio_mongo/_pymongo/uri_parser.py
Normal file
@@ -0,0 +1,301 @@
|
||||
# Copyright 2011-2012 10gen, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you
|
||||
# may not use this file except in compliance with the License. You
|
||||
# may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied. See the License for the specific language governing
|
||||
# permissions and limitations under the License.
|
||||
|
||||
|
||||
"""Tools to parse and validate a MongoDB URI."""
|
||||
|
||||
from urllib.parse import unquote_plus
|
||||
|
||||
from asyncio_mongo._pymongo.common import validate
|
||||
from asyncio_mongo._pymongo.errors import (ConfigurationError,
|
||||
InvalidURI,
|
||||
UnsupportedOption)
|
||||
|
||||
SCHEME = 'mongodb://'
|
||||
SCHEME_LEN = len(SCHEME)
|
||||
DEFAULT_PORT = 27017
|
||||
|
||||
|
||||
def _partition(entity, sep):
|
||||
"""Python2.4 doesn't have a partition method so we provide
|
||||
our own that mimics str.partition from later releases.
|
||||
|
||||
Split the string at the first occurrence of sep, and return a
|
||||
3-tuple containing the part before the separator, the separator
|
||||
itself, and the part after the separator. If the separator is not
|
||||
found, return a 3-tuple containing the string itself, followed
|
||||
by two empty strings.
|
||||
"""
|
||||
parts = entity.split(sep, 1)
|
||||
if len(parts) == 2:
|
||||
return parts[0], sep, parts[1]
|
||||
else:
|
||||
return entity, '', ''
|
||||
|
||||
|
||||
def _rpartition(entity, sep):
|
||||
"""Python2.4 doesn't have an rpartition method so we provide
|
||||
our own that mimics str.rpartition from later releases.
|
||||
|
||||
Split the string at the last occurrence of sep, and return a
|
||||
3-tuple containing the part before the separator, the separator
|
||||
itself, and the part after the separator. If the separator is not
|
||||
found, return a 3-tuple containing two empty strings, followed
|
||||
by the string itself.
|
||||
"""
|
||||
idx = entity.rfind(sep)
|
||||
if idx == -1:
|
||||
return '', '', entity
|
||||
return entity[:idx], sep, entity[idx + 1:]
|
||||
|
||||
|
||||
def parse_userinfo(userinfo):
|
||||
"""Validates the format of user information in a MongoDB URI.
|
||||
Reserved characters like ':', '/', '+' and '@' must be escaped
|
||||
following RFC 2396.
|
||||
|
||||
Returns a 2-tuple containing the unescaped username followed
|
||||
by the unescaped password.
|
||||
|
||||
:Paramaters:
|
||||
- `userinfo`: A string of the form <username>:<password>
|
||||
|
||||
.. versionchanged:: 2.2
|
||||
Now uses `urllib.unquote_plus` so `+` characters must be escaped.
|
||||
"""
|
||||
if '@' in userinfo or userinfo.count(':') > 1:
|
||||
raise InvalidURI("':' or '@' characters in a username or password "
|
||||
"must be escaped according to RFC 2396.")
|
||||
user, _, passwd = _partition(userinfo, ":")
|
||||
# No password is expected with GSSAPI authentication.
|
||||
if not user:
|
||||
raise InvalidURI("The empty string is not valid username.")
|
||||
user = unquote_plus(user)
|
||||
passwd = unquote_plus(passwd)
|
||||
|
||||
return user, passwd
|
||||
|
||||
|
||||
def parse_ipv6_literal_host(entity, default_port):
|
||||
"""Validates an IPv6 literal host:port string.
|
||||
|
||||
Returns a 2-tuple of IPv6 literal followed by port where
|
||||
port is default_port if it wasn't specified in entity.
|
||||
|
||||
:Parameters:
|
||||
- `entity`: A string that represents an IPv6 literal enclosed
|
||||
in braces (e.g. '[::1]' or '[::1]:27017').
|
||||
- `default_port`: The port number to use when one wasn't
|
||||
specified in entity.
|
||||
"""
|
||||
if entity.find(']') == -1:
|
||||
raise ConfigurationError("an IPv6 address literal must be "
|
||||
"enclosed in '[' and ']' according "
|
||||
"to RFC 2732.")
|
||||
i = entity.find(']:')
|
||||
if i == -1:
|
||||
return entity[1:-1], default_port
|
||||
return entity[1: i], entity[i + 2:]
|
||||
|
||||
|
||||
def parse_host(entity, default_port=DEFAULT_PORT):
|
||||
"""Validates a host string
|
||||
|
||||
Returns a 2-tuple of host followed by port where port is default_port
|
||||
if it wasn't specified in the string.
|
||||
|
||||
:Parameters:
|
||||
- `entity`: A host or host:port string where host could be a
|
||||
hostname or IP address.
|
||||
- `default_port`: The port number to use when one wasn't
|
||||
specified in entity.
|
||||
"""
|
||||
host = entity
|
||||
port = default_port
|
||||
if entity[0] == '[':
|
||||
host, port = parse_ipv6_literal_host(entity, default_port)
|
||||
elif entity.find(':') != -1:
|
||||
if entity.count(':') > 1:
|
||||
raise ConfigurationError("Reserved characters such as ':' must be "
|
||||
"escaped according RFC 2396. An IPv6 "
|
||||
"address literal must be enclosed in '[' "
|
||||
"and ']' according to RFC 2732.")
|
||||
host, port = host.split(':', 1)
|
||||
if isinstance(port, str):
|
||||
if not port.isdigit():
|
||||
raise ConfigurationError("Port number must be an integer.")
|
||||
port = int(port)
|
||||
return host, port
|
||||
|
||||
|
||||
def validate_options(opts):
|
||||
"""Validates and normalizes options passed in a MongoDB URI.
|
||||
|
||||
Returns a new dictionary of validated and normalized options.
|
||||
|
||||
:Parameters:
|
||||
- `opts`: A dict of MongoDB URI options.
|
||||
"""
|
||||
normalized = {}
|
||||
for option, value in opts.items():
|
||||
option, value = validate(option, value)
|
||||
# str(option) to ensure that a unicode URI results in plain 'str'
|
||||
# option names. 'normalized' is then suitable to be passed as kwargs
|
||||
# in all Python versions.
|
||||
normalized[str(option)] = value
|
||||
return normalized
|
||||
|
||||
|
||||
def split_options(opts):
|
||||
"""Takes the options portion of a MongoDB URI, validates each option
|
||||
and returns the options in a dictionary. The option names will be returned
|
||||
lowercase even if camelCase options are used.
|
||||
|
||||
:Parameters:
|
||||
- `opt`: A string representing MongoDB URI options.
|
||||
"""
|
||||
and_idx = opts.find("&")
|
||||
semi_idx = opts.find(";")
|
||||
try:
|
||||
if and_idx >= 0 and semi_idx >= 0:
|
||||
raise InvalidURI("Can not mix '&' and ';' for option separators.")
|
||||
elif and_idx >= 0:
|
||||
options = dict([kv.split("=") for kv in opts.split("&")])
|
||||
elif semi_idx >= 0:
|
||||
options = dict([kv.split("=") for kv in opts.split(";")])
|
||||
elif opts.find("=") != -1:
|
||||
options = dict([opts.split("=")])
|
||||
else:
|
||||
raise ValueError
|
||||
except ValueError:
|
||||
raise InvalidURI("MongoDB URI options are key=value pairs.")
|
||||
|
||||
return validate_options(options)
|
||||
|
||||
|
||||
def split_hosts(hosts, default_port=DEFAULT_PORT):
|
||||
"""Takes a string of the form host1[:port],host2[:port]... and
|
||||
splits it into (host, port) tuples. If [:port] isn't present the
|
||||
default_port is used.
|
||||
|
||||
Returns a set of 2-tuples containing the host name (or IP) followed by
|
||||
port number.
|
||||
|
||||
:Parameters:
|
||||
- `hosts`: A string of the form host1[:port],host2[:port],...
|
||||
- `default_port`: The port number to use when one wasn't specified
|
||||
for a host.
|
||||
"""
|
||||
nodes = []
|
||||
for entity in hosts.split(','):
|
||||
if not entity:
|
||||
raise ConfigurationError("Empty host "
|
||||
"(or extra comma in host list).")
|
||||
port = default_port
|
||||
# Unix socket entities don't have ports
|
||||
if entity.endswith('.sock'):
|
||||
port = None
|
||||
nodes.append(parse_host(entity, port))
|
||||
return nodes
|
||||
|
||||
|
||||
def parse_uri(uri, default_port=DEFAULT_PORT):
|
||||
"""Parse and validate a MongoDB URI.
|
||||
|
||||
Returns a dict of the form::
|
||||
|
||||
{
|
||||
'nodelist': <list of (host, port) tuples>,
|
||||
'username': <username> or None,
|
||||
'password': <password> or None,
|
||||
'database': <database name> or None,
|
||||
'collection': <collection name> or None,
|
||||
'options': <dict of MongoDB URI options>
|
||||
}
|
||||
|
||||
:Parameters:
|
||||
- `uri`: The MongoDB URI to parse.
|
||||
- `default_port`: The port number to use when one wasn't specified
|
||||
for a host in the URI.
|
||||
"""
|
||||
if not uri.startswith(SCHEME):
|
||||
raise InvalidURI("Invalid URI scheme: URI "
|
||||
"must begin with '%s'" % (SCHEME,))
|
||||
|
||||
scheme_free = uri[SCHEME_LEN:]
|
||||
|
||||
if not scheme_free:
|
||||
raise InvalidURI("Must provide at least one hostname or IP.")
|
||||
|
||||
nodes = None
|
||||
user = None
|
||||
passwd = None
|
||||
dbase = None
|
||||
collection = None
|
||||
options = {}
|
||||
|
||||
# Check for unix domain sockets in the uri
|
||||
if '.sock' in scheme_free:
|
||||
host_part, _, path_part = _rpartition(scheme_free, '/')
|
||||
try:
|
||||
parse_uri('%s%s' % (SCHEME, host_part))
|
||||
except (ConfigurationError, InvalidURI):
|
||||
host_part = scheme_free
|
||||
path_part = ""
|
||||
else:
|
||||
host_part, _, path_part = _partition(scheme_free, '/')
|
||||
|
||||
if not path_part and '?' in host_part:
|
||||
raise InvalidURI("A '/' is required between "
|
||||
"the host list and any options.")
|
||||
|
||||
if '@' in host_part:
|
||||
userinfo, _, hosts = _rpartition(host_part, '@')
|
||||
user, passwd = parse_userinfo(userinfo)
|
||||
else:
|
||||
hosts = host_part
|
||||
|
||||
nodes = split_hosts(hosts, default_port=default_port)
|
||||
|
||||
if path_part:
|
||||
|
||||
if path_part[0] == '?':
|
||||
opts = path_part[1:]
|
||||
else:
|
||||
dbase, _, opts = _partition(path_part, '?')
|
||||
if '.' in dbase:
|
||||
dbase, collection = dbase.split('.', 1)
|
||||
|
||||
if opts:
|
||||
options = split_options(opts)
|
||||
|
||||
return {
|
||||
'nodelist': nodes,
|
||||
'username': user,
|
||||
'password': passwd,
|
||||
'database': dbase,
|
||||
'collection': collection,
|
||||
'options': options
|
||||
}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import pprint
|
||||
import sys
|
||||
try:
|
||||
pprint.pprint(parse_uri(sys.argv[1]))
|
||||
except (InvalidURI, UnsupportedOption) as e:
|
||||
print(e)
|
||||
sys.exit(0)
|
||||
|
||||
Reference in New Issue
Block a user