desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Sorts this cursor\'s results. Pass a field name and a direction, either :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`:: for doc in collection.find().sort(\'field\', pymongo.ASCENDING): print(doc) To sort by multiple fields, pass a list of (key, direction) pairs:: for doc in collection.find().sort([ (\'field1\', pymongo.ASCENDING), (\'field2\', pymongo.DESCENDING)]): print(doc) Beginning with MongoDB version 2.6, text search results can be sorted by relevance:: cursor = db.test.find( {\'$text\': {\'$search\': \'some words\'}}, {\'score\': {\'$meta\': \'textScore\'}}) # Sort by \'score\' field. cursor.sort([(\'score\', {\'$meta\': \'textScore\'})]) for doc in cursor: print(doc) Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. Only the last :meth:`sort` applied to this cursor has any effect. :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the keys to sort on - `direction` (optional): only used if `key_or_list` is a single key, if not given :data:`~pymongo.ASCENDING` is assumed'
def sort(self, key_or_list, direction=None):
self.__check_okay_to_chain() keys = helpers._index_list(key_or_list, direction) self.__ordering = helpers._index_document(keys) return self
'Get the size of the results set for this query. Returns the number of documents in the results set for this query. Does not take :meth:`limit` and :meth:`skip` into account by default - set `with_limit_and_skip` to ``True`` if that is the desired behavior. Raises :class:`~pymongo.errors.OperationFailure` on a database error. When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint` applied to the query. In the following example the hint is passed to the count command: collection.find({\'field\': \'value\'}).hint(\'field_1\').count() The :meth:`count` method obeys the :attr:`~pymongo.collection.Collection.read_preference` of the :class:`~pymongo.collection.Collection` instance on which :meth:`~pymongo.collection.Collection.find` was called. :Parameters: - `with_limit_and_skip` (optional): take any :meth:`limit` or :meth:`skip` that has been applied to this cursor into account when getting the count .. note:: The `with_limit_and_skip` parameter requires server version **>= 1.1.4-** .. versionchanged:: 2.8 The :meth:`~count` method now supports :meth:`~hint`.'
def count(self, with_limit_and_skip=False):
validate_boolean('with_limit_and_skip', with_limit_and_skip) cmd = SON([('count', self.__collection.name), ('query', self.__spec)]) if (self.__max_time_ms is not None): cmd['maxTimeMS'] = self.__max_time_ms if self.__comment: cmd['$comment'] = self.__comment if (self.__hint is not None): cmd['hint'] = self.__hint if with_limit_and_skip: if self.__limit: cmd['limit'] = self.__limit if self.__skip: cmd['skip'] = self.__skip return self.__collection._count(cmd, self.__collation)
'Get a list of distinct values for `key` among all documents in the result set of this query. Raises :class:`TypeError` if `key` is not an instance of :class:`basestring` (:class:`str` in python 3). The :meth:`distinct` method obeys the :attr:`~pymongo.collection.Collection.read_preference` of the :class:`~pymongo.collection.Collection` instance on which :meth:`~pymongo.collection.Collection.find` was called. :Parameters: - `key`: name of key for which we want to get the distinct values .. seealso:: :meth:`pymongo.collection.Collection.distinct`'
def distinct(self, key):
options = {} if self.__spec: options['query'] = self.__spec if (self.__max_time_ms is not None): options['maxTimeMS'] = self.__max_time_ms if self.__comment: options['$comment'] = self.__comment if (self.__collation is not None): options['collation'] = self.__collation return self.__collection.distinct(key, **options)
'Returns an explain plan record for this cursor. .. mongodoc:: explain'
def explain(self):
c = self.clone() c.__explain = True if c.__limit: c.__limit = (- abs(c.__limit)) return next(c)
'Adds a \'hint\', telling Mongo the proper index to use for the query. Judicious use of hints can greatly improve query performance. When doing a query on multiple fields (at least one of which is indexed) pass the indexed field as a hint to the query. Hinting will not do anything if the corresponding index does not exist. Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. `index` should be an index as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[(\'field\', ASCENDING)]``) or the name of the index. If `index` is ``None`` any existing hint for this query is cleared. The last hint applied to this cursor takes precedence over all others. :Parameters: - `index`: index to hint on (as an index specifier) .. versionchanged:: 2.8 The :meth:`~hint` method accepts the name of the index.'
def hint(self, index):
self.__check_okay_to_chain() self.__set_hint(index) return self
'Adds a \'comment\' to the cursor. http://docs.mongodb.org/manual/reference/operator/comment/ :Parameters: - `comment`: A string or document .. versionadded:: 2.7'
def comment(self, comment):
self.__check_okay_to_chain() self.__comment = comment return self
'Adds a $where clause to this query. The `code` argument must be an instance of :class:`basestring` (:class:`str` in python 3) or :class:`~bson.code.Code` containing a JavaScript expression. This expression will be evaluated for each document scanned. Only those documents for which the expression evaluates to *true* will be returned as results. The keyword *this* refers to the object currently being scanned. Raises :class:`TypeError` if `code` is not an instance of :class:`basestring` (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. Only the last call to :meth:`where` applied to a :class:`Cursor` has any effect. :Parameters: - `code`: JavaScript expression to use as a filter'
def where(self, code):
self.__check_okay_to_chain() if (not isinstance(code, Code)): code = Code(code) self.__spec['$where'] = code return self
'Adds a :class:`~pymongo.collation.Collation` to this query. This option is only supported on MongoDB 3.4 and above. Raises :exc:`TypeError` if `collation` is not an instance of :class:`~pymongo.collation.Collation` or a ``dict``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. Only the last collation applied to this cursor has any effect. :Parameters: - `collation`: An instance of :class:`~pymongo.collation.Collation`.'
def collation(self, collation):
self.__check_okay_to_chain() self.__collation = validate_collation_or_none(collation) return self
'Send a query or getmore operation and handles the response. If operation is ``None`` this is an exhaust cursor, which reads the next result batch off the exhaust socket instead of sending getMore messages to the server. Can raise ConnectionFailure.'
def __send_message(self, operation):
client = self.__collection.database.client listeners = client._event_listeners publish = listeners.enabled_for_commands from_command = False if operation: kwargs = {'read_preference': self.__read_preference, 'exhaust': self.__exhaust} if (self.__address is not None): kwargs['address'] = self.__address try: response = client._send_message_with_response(operation, **kwargs) self.__address = response.address if self.__exhaust: self.__exhaust_mgr = _SocketManager(response.socket_info, response.pool) cmd_name = operation.name data = response.data cmd_duration = response.duration rqst_id = response.request_id from_command = response.from_command except AutoReconnect: self.__killed = True raise else: rqst_id = 0 cmd_name = 'getMore' if publish: cmd = SON([('getMore', self.__id), ('collection', self.__collection.name)]) if self.__batch_size: cmd['batchSize'] = self.__batch_size if self.__max_time_ms: cmd['maxTimeMS'] = self.__max_time_ms listeners.publish_command_start(cmd, self.__collection.database.name, 0, self.__address) start = datetime.datetime.now() try: data = self.__exhaust_mgr.sock.receive_message(1, None) except Exception as exc: if publish: duration = (datetime.datetime.now() - start) listeners.publish_command_failure(duration, _convert_exception(exc), cmd_name, rqst_id, self.__address) if isinstance(exc, ConnectionFailure): self.__die() raise if publish: cmd_duration = (datetime.datetime.now() - start) if publish: start = datetime.datetime.now() try: doc = helpers._unpack_response(response=data, cursor_id=self.__id, codec_options=self.__codec_options) if from_command: helpers._check_command_response(doc['data'][0]) except OperationFailure as exc: self.__killed = True self.__die() if publish: duration = ((datetime.datetime.now() - start) + cmd_duration) listeners.publish_command_failure(duration, exc.details, cmd_name, rqst_id, self.__address) if (self.__query_flags & _QUERY_OPTIONS['tailable_cursor']): return raise except NotMasterError as exc: self.__killed = True self.__die() if publish: duration = ((datetime.datetime.now() - start) + cmd_duration) listeners.publish_command_failure(duration, exc.details, cmd_name, rqst_id, self.__address) client._reset_server_and_request_check(self.__address) raise except Exception as exc: if publish: duration = ((datetime.datetime.now() - start) + cmd_duration) listeners.publish_command_failure(duration, _convert_exception(exc), cmd_name, rqst_id, self.__address) raise if publish: duration = ((datetime.datetime.now() - start) + cmd_duration) if from_command: res = doc['data'][0] elif (cmd_name == 'explain'): res = (doc['data'][0] if doc['number_returned'] else {}) else: res = {'cursor': {'id': doc['cursor_id'], 'ns': self.__collection.full_name}, 'ok': 1} if (cmd_name == 'find'): res['cursor']['firstBatch'] = doc['data'] else: res['cursor']['nextBatch'] = doc['data'] listeners.publish_command_success(duration, res, cmd_name, rqst_id, self.__address) if (from_command and (cmd_name != 'explain')): cursor = doc['data'][0]['cursor'] self.__id = cursor['id'] if (cmd_name == 'find'): documents = cursor['firstBatch'] else: documents = cursor['nextBatch'] self.__data = deque(documents) self.__retrieved += len(documents) else: self.__id = doc['cursor_id'] self.__data = deque(doc['data']) self.__retrieved += doc['number_returned'] if (self.__id == 0): self.__killed = True if (self.__limit and self.__id and (self.__limit <= self.__retrieved)): self.__die() if (self.__exhaust and (self.__id == 0)): self.__exhaust_mgr.close()
'Refreshes the cursor with more data from Mongo. Returns the length of self.__data after refresh. Will exit early if self.__data is already non-empty. Raises OperationFailure when the cursor cannot be refreshed due to an error on the query.'
def _refresh(self):
if (len(self.__data) or self.__killed): return len(self.__data) if (self.__id is None): self.__send_message(_Query(self.__query_flags, self.__collection.database.name, self.__collection.name, self.__skip, self.__query_spec(), self.__projection, self.__codec_options, self.__read_preference, self.__limit, self.__batch_size, self.__read_concern, self.__collation)) if (not self.__id): self.__killed = True elif self.__id: if self.__limit: limit = (self.__limit - self.__retrieved) if self.__batch_size: limit = min(limit, self.__batch_size) else: limit = self.__batch_size if self.__exhaust: self.__send_message(None) else: self.__send_message(_GetMore(self.__collection.database.name, self.__collection.name, limit, self.__id, self.__codec_options, self.__max_await_time_ms)) else: self.__killed = True return len(self.__data)
'Does this cursor have the potential to return more data? This is mostly useful with `tailable cursors <http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_ since they will stop iterating even though they *may* return more results in the future. With regular cursors, simply use a for loop instead of :attr:`alive`:: for doc in collection.find(): print(doc) .. note:: Even if :attr:`alive` is True, :meth:`next` can raise :exc:`StopIteration`. :attr:`alive` can also be True while iterating a cursor from a failed server. In this case :attr:`alive` will return False after :meth:`next` fails to retrieve the next batch of results from the server.'
@property def alive(self):
return bool((len(self.__data) or (not self.__killed)))
'Returns the id of the cursor Useful if you need to manage cursor ids and want to handle killing cursors manually using :meth:`~pymongo.mongo_client.MongoClient.kill_cursors` .. versionadded:: 2.2'
@property def cursor_id(self):
return self.__id
'The (host, port) of the server used, or None. .. versionchanged:: 3.0 Renamed from "conn_id".'
@property def address(self):
return self.__address
'Advance the cursor.'
def next(self):
if self.__empty: raise StopIteration if (len(self.__data) or self._refresh()): if self.__manipulate: _db = self.__collection.database return _db._fix_outgoing(self.__data.popleft(), self.__collection) else: return self.__data.popleft() else: raise StopIteration
'Support function for `copy.copy()`. .. versionadded:: 2.4'
def __copy__(self):
return self._clone(deepcopy=False)
'Support function for `copy.deepcopy()`. .. versionadded:: 2.4'
def __deepcopy__(self, memo):
return self._clone(deepcopy=True)
'Deepcopy helper for the data dictionary or list. Regular expressions cannot be deep copied but as they are immutable we don\'t have to copy them when cloning.'
def _deepcopy(self, x, memo=None):
if (not hasattr(x, 'items')): (y, is_list, iterator) = ([], True, enumerate(x)) else: (y, is_list, iterator) = ({}, False, iteritems(x)) if (memo is None): memo = {} val_id = id(x) if (val_id in memo): return memo.get(val_id) memo[val_id] = y for (key, value) in iterator: if (isinstance(value, (dict, list)) and (not isinstance(value, SON))): value = self._deepcopy(value, memo) elif (not isinstance(value, RE_TYPE)): value = copy.deepcopy(value, memo) if is_list: y.append(value) else: if (not isinstance(key, RE_TYPE)): key = copy.deepcopy(key, memo) y[key] = value return y
'Create a client and grab essential information from the server.'
def __init__(self):
self.connected = False self.ismaster = {} self.w = None self.nodes = set() self.replica_set_name = None self.cmd_line = None self.version = Version((-1)) self.auth_enabled = False self.test_commands_enabled = False self.is_mongos = False self.is_rs = False self.has_ipv6 = False self.ssl = False self.ssl_cert_none = False self.ssl_certfile = False self.server_is_resolvable = is_server_resolvable() self.ssl_client_options = {} self.client = _connect(host, port) if (HAVE_SSL and (not self.client)): self.client = _connect(host, port, **_SSL_OPTIONS) if self.client: self.ssl = True self.ssl_client_options = _SSL_OPTIONS self.ssl_certfile = True if (_SSL_OPTIONS.get('ssl_cert_reqs') == ssl.CERT_NONE): self.ssl_cert_none = True if self.client: self.connected = True ismaster = self.client.admin.command('ismaster') if ('setName' in ismaster): self.replica_set_name = ismaster['setName'] self.is_rs = True self.client = pymongo.MongoClient(host, port, replicaSet=self.replica_set_name, **self.ssl_client_options) self.ismaster = self.client.admin.command('ismaster') nodes = [partition_node(node.lower()) for node in self.ismaster.get('hosts', [])] nodes.extend([partition_node(node.lower()) for node in self.ismaster.get('passives', [])]) nodes.extend([partition_node(node.lower()) for node in self.ismaster.get('arbiters', [])]) self.nodes = set(nodes) else: self.ismaster = ismaster self.nodes = set([(host, port)]) self.w = (len(self.ismaster.get('hosts', [])) or 1) self.version = Version.from_client(self.client) try: self.cmd_line = self.client.admin.command('getCmdLineOpts') except pymongo.errors.OperationFailure as e: msg = e.details.get('errmsg', '') if ((e.code == 13) or ('unauthorized' in msg) or ('login' in msg)): self.auth_enabled = True else: raise else: self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: self.user_provided = self._check_user_provided() if (not self.user_provided): roles = {} if self.version.at_least(2, 5, 3, (-1)): roles = {'roles': ['root']} self.client.admin.add_user(db_user, db_pwd, **roles) self.client = _connect(host, port, username=db_user, password=db_pwd, replicaSet=self.replica_set_name, **self.ssl_client_options) self.cmd_line = self.client.admin.command('getCmdLineOpts') if ('enableTestCommands=1' in self.cmd_line['argv']): self.test_commands_enabled = True elif ('parsed' in self.cmd_line): params = self.cmd_line['parsed'].get('setParameter', []) if ('enableTestCommands=1' in params): self.test_commands_enabled = True else: params = self.cmd_line['parsed'].get('setParameter', {}) if (params.get('enableTestCommands') == '1'): self.test_commands_enabled = True self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid') self.has_ipv6 = self._server_started_with_ipv6()
'Return True if db_user/db_password is already an admin user.'
def _check_user_provided(self):
client = pymongo.MongoClient(host, port, username=db_user, password=db_pwd, serverSelectionTimeoutMS=100, **self.ssl_client_options) try: return (db_user in _all_users(client.admin)) except pymongo.errors.OperationFailure as e: msg = e.details.get('errmsg', '') if ((e.code == 18) or ('auth fails' in msg)): return False else: raise
'Run a test only if we can connect to MongoDB.'
def require_connection(self, func):
return self._require(self.connected, ('Cannot connect to MongoDB on %s' % (self.pair,)), func=func)
'Run a test only if the server version is at least ``version``.'
def require_version_min(self, *ver):
other_version = Version(*ver) return self._require((self.version >= other_version), ('Server version must be at least %s' % str(other_version)))
'Run a test only if the server version is at most ``version``.'
def require_version_max(self, *ver):
other_version = Version(*ver) return self._require((self.version <= other_version), ('Server version must be at most %s' % str(other_version)))
'Run a test only if the server is running with auth enabled.'
def require_auth(self, func):
return self.check_auth_with_sharding(self._require(self.auth_enabled, 'Authentication is not enabled on the server', func=func))
'Run a test only if the server is running without auth enabled.'
def require_no_auth(self, func):
return self._require((not self.auth_enabled), 'Authentication must not be enabled on the server', func=func)
'Run a test only if the client is connected to a replica set.'
def require_replica_set(self, func):
return self._require(self.is_rs, 'Not connected to a replica set', func=func)
'Run a test only if the client is connected to a replica set that has `count` secondaries.'
def require_secondaries_count(self, count):
sec_count = (0 if (not self.client) else len(self.client.secondaries)) return self._require((sec_count >= count), ('Need %d secondaries, %d available' % (count, sec_count)))
'Run a test if the client is *not* connected to a replica set.'
def require_no_replica_set(self, func):
return self._require((not self.is_rs), 'Connected to a replica set, not a standalone mongod', func=func)
'Run a test only if the client can connect to a server via IPv6.'
def require_ipv6(self, func):
return self._require(self.has_ipv6, 'No IPv6', func=func)
'Run a test only if the client is not connected to a mongos.'
def require_no_mongos(self, func):
return self._require((not self.is_mongos), 'Must be connected to a mongod, not a mongos', func=func)
'Run a test only if the client is connected to a mongos.'
def require_mongos(self, func):
return self._require(self.is_mongos, 'Must be connected to a mongos', func=func)
'Skip a test when connected to mongos < 2.0 and running with auth.'
def check_auth_with_sharding(self, func):
condition = (not (self.auth_enabled and self.is_mongos and (self.version < (2,)))) return self._require(condition, 'Auth with sharding requires MongoDB >= 2.0.0', func=func)
'Run a test only if the server has test commands enabled.'
def require_test_commands(self, func):
return self._require(self.test_commands_enabled, 'Test commands must be enabled', func=func)
'Run a test only if the client can connect over SSL.'
def require_ssl(self, func):
return self._require(self.ssl, 'Must be able to connect via SSL', func=func)
'Run a test only if the client can connect over SSL.'
def require_no_ssl(self, func):
return self._require((not self.ssl), 'Must be able to connect without SSL', func=func)
'Run a test only if the client can connect with ssl.CERT_NONE.'
def require_ssl_cert_none(self, func):
return self._require(self.ssl_cert_none, 'Must be able to connect with ssl.CERT_NONE', func=func)
'Run a test only if the client can connect with ssl_certfile.'
def require_ssl_certfile(self, func):
return self._require(self.ssl_certfile, 'Must be able to connect with ssl_certfile', func=func)
'Run a test only if the hostname \'server\' is resolvable.'
def require_server_resolvable(self, func):
return self._require(self.server_is_resolvable, "No hosts entry for 'server'. Cannot validate hostname in the certificate", func=func)
'A MongoClient connected to the default server, with a mock topology. standalones, members, mongoses determine the configuration of the topology. They are formatted like [\'a:1\', \'b:2\']. ismaster_hosts provides an alternative host list for the server\'s mocked ismaster response; see test_connect_with_internal_ips.'
def __init__(self, standalones, members, mongoses, ismaster_hosts=None, *args, **kwargs):
self.mock_standalones = standalones[:] self.mock_members = members[:] if self.mock_members: self.mock_primary = self.mock_members[0] else: self.mock_primary = None if (ismaster_hosts is not None): self.mock_ismaster_hosts = ismaster_hosts else: self.mock_ismaster_hosts = members[:] self.mock_mongoses = mongoses[:] self.mock_down_hosts = [] self.mock_wire_versions = {} self.mock_max_write_batch_sizes = {} self.mock_rtts = {} kwargs['_pool_class'] = partial(MockPool, self) kwargs['_monitor_class'] = partial(MockMonitor, self) client_options = client_context.ssl_client_options.copy() client_options.update(kwargs) super(MockClient, self).__init__(*args, **client_options)
'Host is like \'a:1\'.'
def kill_host(self, host):
self.mock_down_hosts.append(host)
'Host is like \'a:1\'.'
def revive_host(self, host):
self.mock_down_hosts.remove(host)
'Return mock ismaster response (a dict) and round trip time.'
def mock_is_master(self, host):
(min_wire_version, max_wire_version) = self.mock_wire_versions.get(host, (common.MIN_WIRE_VERSION, common.MAX_WIRE_VERSION)) max_write_batch_size = self.mock_max_write_batch_sizes.get(host, common.MAX_WRITE_BATCH_SIZE) rtt = self.mock_rtts.get(host, 0) if (host in self.mock_down_hosts): raise NetworkTimeout('mock timeout') elif (host in self.mock_standalones): response = {'ok': 1, 'ismaster': True, 'minWireVersion': min_wire_version, 'maxWireVersion': max_wire_version, 'maxWriteBatchSize': max_write_batch_size} elif (host in self.mock_members): ismaster = (host == self.mock_primary) response = {'ok': 1, 'ismaster': ismaster, 'secondary': (not ismaster), 'setName': 'rs', 'hosts': self.mock_ismaster_hosts, 'minWireVersion': min_wire_version, 'maxWireVersion': max_wire_version, 'maxWriteBatchSize': max_write_batch_size} if self.mock_primary: response['primary'] = self.mock_primary elif (host in self.mock_mongoses): response = {'ok': 1, 'ismaster': True, 'minWireVersion': min_wire_version, 'maxWireVersion': max_wire_version, 'msg': 'isdbgrid', 'maxWriteBatchSize': max_write_batch_size} else: raise AutoReconnect(('Unknown host: %s' % host)) return (response, rtt)
'This test uses MongoClient explicitly to make sure that host and port are not overloaded.'
def test_constants(self):
(host, port) = (client_context.host, client_context.port) MongoClient.HOST = 'somedomainthatdoesntexist.org' MongoClient.PORT = 123456789 with self.assertRaises(AutoReconnect): connected(MongoClient(serverSelectionTimeoutMS=10, **client_context.ssl_client_options)) connected(MongoClient(host, port, **client_context.ssl_client_options)) MongoClient.HOST = host MongoClient.PORT = port connected(MongoClient(**client_context.ssl_client_options))
'Start filtering deprecations.'
def __init__(self, action='ignore'):
self.warn_context = warnings.catch_warnings() self.warn_context.__enter__() warnings.simplefilter(action, DeprecationWarning)
'Stop filtering deprecations.'
def stop(self):
self.warn_context.__exit__() self.warn_context = None
'Test __iter__'
def test_iteration(self):
test_son = SON([(1, 100), (2, 200), (3, 300)]) for ele in test_son: self.assertEqual((ele * 100), test_son[ele])
'has_key and __contains__'
def test_contains_has(self):
test_son = SON([(1, 100), (2, 200), (3, 300)]) self.assertIn(1, test_son) self.assertTrue((2 in test_son), 'in failed') self.assertFalse((22 in test_son), "in succeeded when it shouldn't") self.assertTrue(test_son.has_key(2), 'has_key failed') self.assertFalse(test_son.has_key(22), "has_key succeeded when it shouldn't")
'Test clear()'
def test_clears(self):
test_son = SON([(1, 100), (2, 200), (3, 300)]) test_son.clear() self.assertNotIn(1, test_son) self.assertEqual(0, len(test_son)) self.assertEqual(0, len(test_son.keys())) self.assertEqual({}, test_son.to_dict())
'Test len'
def test_len(self):
test_son = SON() self.assertEqual(0, len(test_son)) test_son = SON([(1, 100), (2, 200), (3, 300)]) self.assertEqual(3, len(test_son)) test_son.popitem() self.assertEqual(2, len(test_son))
'Fail if the two json strings are unequal. Normalize json by parsing it with the built-in json library. This accounts for discrepancies in spacing.'
def assertJsonEqual(self, first, second, msg=None):
self.assertEqual(loads(first), loads(second), msg=msg)
'Do a find() on the client and return which host was used'
def read_from_which_host(self, client):
cursor = client.pymongo_test.test.find() next(cursor) return cursor.address
'Do a find() on the client and return \'primary\' or \'secondary\' depending on which the client used.'
def read_from_which_kind(self, client):
address = self.read_from_which_host(client) if (address == client.primary): return 'primary' elif (address in client.secondaries): return 'secondary' else: self.fail(('Cursor used address %s, expected either primary %s or secondaries %s' % (address, client.primary, client.secondaries)))
'Execute fn(*args, **kwargs) and return the Server instance used.'
def executed_on_which_server(self, client, fn, *args, **kwargs):
client.has_read_from.clear() fn(*args, **kwargs) self.assertEqual(1, len(client.has_read_from)) return one(client.has_read_from)
'Compare response from bulk.execute() to expected response.'
def assertEqualResponse(self, expected, actual):
for (key, value) in expected.items(): if (key == 'nModified'): if self.has_write_commands: self.assertEqual(value, actual['nModified']) else: self.assertFalse(('nModified' in actual)) elif (key == 'upserted'): expected_upserts = value actual_upserts = actual['upserted'] self.assertEqual(len(expected_upserts), len(actual_upserts), ('Expected %d elements in "upserted", got %d' % (len(expected_upserts), len(actual_upserts)))) for (e, a) in zip(expected_upserts, actual_upserts): self.assertEqualUpsert(e, a) elif (key == 'writeErrors'): expected_errors = value actual_errors = actual['writeErrors'] self.assertEqual(len(expected_errors), len(actual_errors), ('Expected %d elements in "writeErrors", got %d' % (len(expected_errors), len(actual_errors)))) for (e, a) in zip(expected_errors, actual_errors): self.assertEqualWriteError(e, a) else: self.assertEqual(actual.get(key), value, ('%r value of %r does not match expected %r' % (key, actual.get(key), value)))
'Compare bulk.execute()[\'upserts\'] to expected value. Like: {\'index\': 0, \'_id\': ObjectId()}'
def assertEqualUpsert(self, expected, actual):
self.assertEqual(expected['index'], actual['index']) if (expected['_id'] == '...'): self.assertTrue(('_id' in actual)) else: self.assertEqual(expected['_id'], actual['_id'])
'Compare bulk.execute()[\'writeErrors\'] to expected value. Like: {\'index\': 0, \'code\': 123, \'errmsg\': \'...\', \'op\': { ... }}'
def assertEqualWriteError(self, expected, actual):
self.assertEqual(expected['index'], actual['index']) self.assertEqual(expected['code'], actual['code']) if (expected['errmsg'] == '...'): self.assertTrue(('errmsg' in actual)) else: self.assertEqual(expected['errmsg'], actual['errmsg']) expected_op = expected['op'].copy() actual_op = actual['op'].copy() if (expected_op.get('_id') == '...'): self.assertTrue(('_id' in actual_op)) actual_op.pop('_id') expected_op.pop('_id') self.assertEqual(expected_op, actual_op)
'Subtype of this binary data.'
@property def subtype(self):
return self.__subtype
'UUID instance wrapped by this UUIDLegacy instance.'
@property def uuid(self):
return self.__uuid
'Create a new :class:`RawBSONDocument`. :Parameters: - `bson_bytes`: the BSON bytes that compose this document - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.5 If a :class:`~bson.codec_options.CodecOptions` is passed in, its `document_class` must be :class:`RawBSONDocument`.'
def __init__(self, bson_bytes, codec_options=None):
self.__raw = bson_bytes self.__inflated_doc = None if (codec_options is None): codec_options = DEFAULT_RAW_BSON_OPTIONS elif (codec_options.document_class is not RawBSONDocument): raise TypeError(('RawBSONDocument cannot use CodecOptions with document class %s' % (codec_options.document_class,))) self.__codec_options = codec_options
'The raw BSON bytes composing this document.'
@property def raw(self):
return self.__raw
'Lazily decode and iterate elements in this document.'
def items(self):
return iteritems(self.__inflated)
'Representation of the arguments used to create this object.'
def _arguments_repr(self):
document_class_repr = ('dict' if (self.document_class is dict) else repr(self.document_class)) uuid_rep_repr = UUID_REPRESENTATION_NAMES.get(self.uuid_representation, self.uuid_representation) return ('document_class=%s, tz_aware=%r, uuid_representation=%s, unicode_decode_error_handler=%r, tzinfo=%r' % (document_class_repr, self.tz_aware, uuid_rep_repr, self.unicode_decode_error_handler, self.tzinfo))
'Make a copy of this CodecOptions, overriding some options:: >>> from bson.codec_options import DEFAULT_CODEC_OPTIONS >>> DEFAULT_CODEC_OPTIONS.tz_aware False >>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True) >>> options.tz_aware True .. versionadded:: 3.5'
def with_options(self, **kwargs):
return CodecOptions(kwargs.get('document_class', self.document_class), kwargs.get('tz_aware', self.tz_aware), kwargs.get('uuid_representation', self.uuid_representation), kwargs.get('unicode_decode_error_handler', self.unicode_decode_error_handler), kwargs.get('tzinfo', self.tzinfo))
'Encode a document to a new :class:`BSON` instance. A document can be any mapping type (like :class:`dict`). Raises :class:`TypeError` if `document` is not a mapping type, or contains keys that are not instances of :class:`basestring` (:class:`str` in python 3). Raises :class:`~bson.errors.InvalidDocument` if `document` cannot be converted to :class:`BSON`. :Parameters: - `document`: mapping type representing a document - `check_keys` (optional): check if keys start with \'$\' or contain \'.\', raising :class:`~bson.errors.InvalidDocument` in either case - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 Replaced `uuid_subtype` option with `codec_options`.'
@classmethod def encode(cls, document, check_keys=False, codec_options=DEFAULT_CODEC_OPTIONS):
if (not isinstance(codec_options, CodecOptions)): raise _CODEC_OPTIONS_TYPE_ERROR return cls(_dict_to_bson(document, check_keys, codec_options))
'Decode this BSON data. By default, returns a BSON document represented as a Python :class:`dict`. To use a different :class:`MutableMapping` class, configure a :class:`~bson.codec_options.CodecOptions`:: >>> import collections # From Python standard library. >>> import bson >>> from bson.codec_options import CodecOptions >>> data = bson.BSON.encode({\'a\': 1}) >>> decoded_doc = bson.BSON.decode(data) <type \'dict\'> >>> options = CodecOptions(document_class=collections.OrderedDict) >>> decoded_doc = bson.BSON.decode(data, codec_options=options) >>> type(decoded_doc) <class \'collections.OrderedDict\'> :Parameters: - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 Removed `compile_re` option: PyMongo now always represents BSON regular expressions as :class:`~bson.regex.Regex` objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a BSON regular expression to a Python regular expression object. Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. .. versionchanged:: 2.7 Added `compile_re` option. If set to False, PyMongo represented BSON regular expressions as :class:`~bson.regex.Regex` objects instead of attempting to compile BSON regular expressions as Python native regular expressions, thus preventing errors for some incompatible patterns, see `PYTHON-500`_. .. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500'
def decode(self, codec_options=DEFAULT_CODEC_OPTIONS):
if (not isinstance(codec_options, CodecOptions)): raise _CODEC_OPTIONS_TYPE_ERROR return _bson_to_dict(self, codec_options)
'Scope dictionary for this instance or ``None``.'
@property def scope(self):
return self.__scope
'Create a new :class:`Timestamp`. This class is only for use with the MongoDB opLog. If you need to store a regular timestamp, please use a :class:`~datetime.datetime`. Raises :class:`TypeError` if `time` is not an instance of :class: `int` or :class:`~datetime.datetime`, or `inc` is not an instance of :class:`int`. Raises :class:`ValueError` if `time` or `inc` is not in [0, 2**32). :Parameters: - `time`: time in seconds since epoch UTC, or a naive UTC :class:`~datetime.datetime`, or an aware :class:`~datetime.datetime` - `inc`: the incrementing counter'
def __init__(self, time, inc):
if isinstance(time, datetime.datetime): if (time.utcoffset() is not None): time = (time - time.utcoffset()) time = int(calendar.timegm(time.timetuple())) if (not isinstance(time, integer_types)): raise TypeError('time must be an instance of int') if (not isinstance(inc, integer_types)): raise TypeError('inc must be an instance of int') if (not (0 <= time < UPPERBOUND)): raise ValueError('time must be contained in [0, 2**32)') if (not (0 <= inc < UPPERBOUND)): raise ValueError('inc must be contained in [0, 2**32)') self.__time = time self.__inc = inc
'Get the time portion of this :class:`Timestamp`.'
@property def time(self):
return self.__time
'Get the inc portion of this :class:`Timestamp`.'
@property def inc(self):
return self.__inc
'Return a :class:`~datetime.datetime` instance corresponding to the time portion of this :class:`Timestamp`. The returned datetime\'s timezone is UTC.'
def as_datetime(self):
return datetime.datetime.fromtimestamp(self.__time, utc)
'Initialize a new ObjectId. An ObjectId is a 12-byte unique identifier consisting of: - a 4-byte value representing the seconds since the Unix epoch, - a 3-byte machine identifier, - a 2-byte process id, and - a 3-byte counter, starting with a random value. By default, ``ObjectId()`` creates a new unique identifier. The optional parameter `oid` can be an :class:`ObjectId`, or any 12 :class:`bytes` or, in Python 2, any 12-character :class:`str`. For example, the 12 bytes b\'foo-bar-quux\' do not follow the ObjectId specification but they are acceptable input:: >>> ObjectId(b\'foo-bar-quux\') ObjectId(\'666f6f2d6261722d71757578\') `oid` can also be a :class:`unicode` or :class:`str` of 24 hex digits:: >>> ObjectId(\'0123456789ab0123456789ab\') ObjectId(\'0123456789ab0123456789ab\') >>> # A u-prefixed unicode literal: >>> ObjectId(u\'0123456789ab0123456789ab\') ObjectId(\'0123456789ab0123456789ab\') Raises :class:`~bson.errors.InvalidId` if `oid` is not 12 bytes nor 24 hex digits, or :class:`TypeError` if `oid` is not an accepted type. :Parameters: - `oid` (optional): a valid ObjectId. .. mongodoc:: objectids'
def __init__(self, oid=None):
if (oid is None): self.__generate() elif (isinstance(oid, bytes) and (len(oid) == 12)): self.__id = oid else: self.__validate(oid)
'Create a dummy ObjectId instance with a specific generation time. This method is useful for doing range queries on a field containing :class:`ObjectId` instances. .. warning:: It is not safe to insert a document containing an ObjectId generated using this method. This method deliberately eliminates the uniqueness guarantee that ObjectIds generally provide. ObjectIds generated with this method should be used exclusively in queries. `generation_time` will be converted to UTC. Naive datetime instances will be treated as though they already contain UTC. An example using this helper to get documents where ``"_id"`` was generated before January 1, 2010 would be: >>> gen_time = datetime.datetime(2010, 1, 1) >>> dummy_id = ObjectId.from_datetime(gen_time) >>> result = collection.find({"_id": {"$lt": dummy_id}}) :Parameters: - `generation_time`: :class:`~datetime.datetime` to be used as the generation time for the resulting ObjectId.'
@classmethod def from_datetime(cls, generation_time):
if (generation_time.utcoffset() is not None): generation_time = (generation_time - generation_time.utcoffset()) timestamp = calendar.timegm(generation_time.timetuple()) oid = (struct.pack('>i', int(timestamp)) + '\x00\x00\x00\x00\x00\x00\x00\x00') return cls(oid)
'Checks if a `oid` string is valid or not. :Parameters: - `oid`: the object id to validate .. versionadded:: 2.3'
@classmethod def is_valid(cls, oid):
if (not oid): return False try: ObjectId(oid) return True except (InvalidId, TypeError): return False
'Generate a new value for this ObjectId.'
def __generate(self):
oid = struct.pack('>i', int(time.time())) oid += ObjectId._machine_bytes oid += struct.pack('>H', (os.getpid() % 65535)) with ObjectId._inc_lock: oid += struct.pack('>i', ObjectId._inc)[1:4] ObjectId._inc = ((ObjectId._inc + 1) % 16777215) self.__id = oid
'Validate and use the given id for this ObjectId. Raises TypeError if id is not an instance of (:class:`basestring` (:class:`str` or :class:`bytes` in python 3), ObjectId) and InvalidId if it is not a valid ObjectId. :Parameters: - `oid`: a valid ObjectId'
def __validate(self, oid):
if isinstance(oid, ObjectId): self.__id = oid.binary elif isinstance(oid, string_type): if (len(oid) == 24): try: self.__id = bytes_from_hex(oid) except (TypeError, ValueError): _raise_invalid_id(oid) else: _raise_invalid_id(oid) else: raise TypeError(('id must be an instance of (bytes, %s, ObjectId), not %s' % (text_type.__name__, type(oid))))
'12-byte binary representation of this ObjectId.'
@property def binary(self):
return self.__id
'A :class:`datetime.datetime` instance representing the time of generation for this :class:`ObjectId`. The :class:`datetime.datetime` is timezone aware, and represents the generation time in UTC. It is precise to the second.'
@property def generation_time(self):
timestamp = struct.unpack('>i', self.__id[0:4])[0] return datetime.datetime.fromtimestamp(timestamp, utc)
'return value of object for pickling. needed explicitly because __slots__() defined.'
def __getstate__(self):
return self.__id
'explicit state set from pickling'
def __setstate__(self, value):
if isinstance(value, dict): oid = value['_ObjectId__id'] else: oid = value if (PY3 and isinstance(oid, text_type)): self.__id = oid.encode('latin-1') else: self.__id = oid
'Get a hash value for this :class:`ObjectId`.'
def __hash__(self):
return hash(self.__id)
'Returns an instance of :class:`decimal.Decimal` for this :class:`Decimal128`.'
def to_decimal(self):
high = self.__high low = self.__low sign = (1 if (high & _SIGN) else 0) if ((high & _SNAN) == _SNAN): return decimal.Decimal((sign, (), 'N')) elif ((high & _NAN) == _NAN): return decimal.Decimal((sign, (), 'n')) elif ((high & _INF) == _INF): return decimal.Decimal((sign, (), 'F')) if ((high & _EXPONENT_MASK) == _EXPONENT_MASK): exponent = (((high & 2305807824841605120) >> 47) - _EXPONENT_BIAS) return decimal.Decimal((sign, (0,), exponent)) else: exponent = (((high & 9223231299366420480) >> 49) - _EXPONENT_BIAS) arr = bytearray(15) mask = 255 for i in range(14, 6, (-1)): arr[i] = ((low & mask) >> ((14 - i) << 3)) mask = (mask << 8) mask = 255 for i in range(6, 0, (-1)): arr[i] = ((high & mask) >> ((6 - i) << 3)) mask = (mask << 8) mask = 281474976710656 arr[0] = ((high & mask) >> 48) digits = tuple((int(digit) for digit in str(_from_bytes(bytes(arr), 'big')))) with decimal.localcontext(_DEC128_CTX) as ctx: return ctx.create_decimal((sign, digits, exponent))
'Create an instance of :class:`Decimal128` from Binary Integer Decimal string. :Parameters: - `value`: 16 byte string (128-bit IEEE 754-2008 decimal floating point in Binary Integer Decimal (BID) format).'
@classmethod def from_bid(cls, value):
if (not isinstance(value, bytes)): raise TypeError('value must be an instance of bytes') if (len(value) != 16): raise ValueError('value must be exactly 16 bytes') return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[:8])[0]))
'The Binary Integer Decimal (BID) encoding of this instance.'
@property def bid(self):
return (_PACK_64(self.__low) + _PACK_64(self.__high))
'Initialize a new :class:`DBRef`. Raises :class:`TypeError` if `collection` or `database` is not an instance of :class:`basestring` (:class:`str` in python 3). `database` is optional and allows references to documents to work across databases. Any additional keyword arguments will create additional fields in the resultant embedded document. :Parameters: - `collection`: name of the collection the document is stored in - `id`: the value of the document\'s ``"_id"`` field - `database` (optional): name of the database to reference - `**kwargs` (optional): additional keyword arguments will create additional, custom fields .. mongodoc:: dbrefs'
def __init__(self, collection, id, database=None, _extra={}, **kwargs):
if (not isinstance(collection, string_type)): raise TypeError(('collection must be an instance of %s' % string_type.__name__)) if ((database is not None) and (not isinstance(database, string_type))): raise TypeError(('database must be an instance of %s' % string_type.__name__)) self.__collection = collection self.__id = id self.__database = database kwargs.update(_extra) self.__kwargs = kwargs
'Get the name of this DBRef\'s collection as unicode.'
@property def collection(self):
return self.__collection
'Get this DBRef\'s _id.'
@property def id(self):
return self.__id
'Get the name of this DBRef\'s database. Returns None if this DBRef doesn\'t specify a database.'
@property def database(self):
return self.__database
'Get the SON document representation of this DBRef. Generally not needed by application developers'
def as_doc(self):
doc = SON([('$ref', self.collection), ('$id', self.id)]) if (self.database is not None): doc['$db'] = self.database doc.update(self.__kwargs) return doc
'Get a hash value for this :class:`DBRef`.'
def __hash__(self):
return hash((self.__collection, self.__id, self.__database, tuple(sorted(self.__kwargs.items()))))
'Support function for `copy.deepcopy()`.'
def __deepcopy__(self, memo):
return DBRef(deepcopy(self.__collection, memo), deepcopy(self.__id, memo), deepcopy(self.__database, memo), deepcopy(self.__kwargs, memo))
'Comparison to another SON is order-sensitive while comparison to a regular dictionary is order-insensitive.'
def __eq__(self, other):
if isinstance(other, SON): return ((len(self) == len(other)) and (self.items() == other.items())) return (self.to_dict() == other)
'Convert a SON document to a normal Python dictionary instance. This is trickier than just *dict(...)* because it needs to be recursive.'
def to_dict(self):
def transform_value(value): if isinstance(value, list): return [transform_value(v) for v in value] elif isinstance(value, collections.Mapping): return dict([(k, transform_value(v)) for (k, v) in iteritems(value)]) else: return value return transform_value(dict(self))
'Convert a Python regular expression into a ``Regex`` instance. Note that in Python 3, a regular expression compiled from a :class:`str` has the ``re.UNICODE`` flag set. If it is undesirable to store this flag in a BSON regular expression, unset it first:: >>> pattern = re.compile(\'.*\') >>> regex = Regex.from_native(pattern) >>> regex.flags ^= re.UNICODE >>> db.collection.insert({\'pattern\': regex}) :Parameters: - `regex`: A regular expression object from ``re.compile()``. .. warning:: Python regular expressions use a different syntax and different set of flags than MongoDB, which uses `PCRE`_. A regular expression retrieved from the server may not compile in Python, or may match a different set of strings in Python than when used in a MongoDB query. .. _PCRE: http://www.pcre.org/'
@classmethod def from_native(cls, regex):
if (not isinstance(regex, RE_TYPE)): raise TypeError(('regex must be a compiled regular expression, not %s' % type(regex))) return Regex(regex.pattern, regex.flags)
'BSON regular expression data. This class is useful to store and retrieve regular expressions that are incompatible with Python\'s regular expression dialect. :Parameters: - `pattern`: string - `flags`: (optional) an integer bitmask, or a string of flag characters like "im" for IGNORECASE and MULTILINE'
def __init__(self, pattern, flags=0):
if (not isinstance(pattern, (text_type, bytes))): raise TypeError(('pattern must be a string, not %s' % type(pattern))) self.pattern = pattern if isinstance(flags, string_type): self.flags = str_flags_to_int(flags) elif isinstance(flags, int): self.flags = flags else: raise TypeError(('flags must be a string or int, not %s' % type(flags)))
'Compile this :class:`Regex` as a Python regular expression. .. warning:: Python regular expressions use a different syntax and different set of flags than MongoDB, which uses `PCRE`_. A regular expression retrieved from the server may not compile in Python, or may match a different set of strings in Python than when used in a MongoDB query. :meth:`try_compile()` may raise :exc:`re.error`. .. _PCRE: http://www.pcre.org/'
def try_compile(self):
return re.compile(self.pattern, self.flags)
'Construct a ZipFile or ContextualZipFile as appropriate.'
def __new__(cls, *args, **kwargs):
if hasattr(zipfile.ZipFile, '__exit__'): return zipfile.ZipFile(*args, **kwargs) return super(ContextualZipFile, cls).__new__(cls)
'Save parameters into MongoDB Buckets, and save the file ID into Params Collections. Parameters params : a list of parameters args : dictionary, item meta data. Returns f_id : the Buckets ID of the parameters.'
def save_params(self, params=[], args={}):
self.__autofill(args) s = time.time() f_id = self.paramsfs.put(self.__serialization(params)) args.update({'f_id': f_id, 'time': datetime.utcnow()}) self.db.Params.insert_one(args) print '[TensorDB] Save params: SUCCESS, took: {}s'.format(round((time.time() - s), 2)) return f_id
'Find one parameter from MongoDB Buckets. Parameters args : dictionary, find items. Returns params : the parameters, return False if nothing found. f_id : the Buckets ID of the parameters, return False if nothing found.'
@AutoFill def find_one_params(self, args={}, sort=None):
s = time.time() d = self.db.Params.find_one(filter=args, sort=sort) if (d is not None): f_id = d['f_id'] else: print '[TensorDB] FAIL! Cannot find: {}'.format(args) return (False, False) try: params = self.__deserialization(self.paramsfs.get(f_id).read()) print '[TensorDB] Find one params SUCCESS, {} took: {}s'.format(args, round((time.time() - s), 2)) return (params, f_id) except: return (False, False)
'Find all parameter from MongoDB Buckets Parameters args : dictionary, find items Returns params : the parameters, return False if nothing found.'
@AutoFill def find_all_params(self, args={}):
s = time.time() pc = self.db.Params.find(args) if (pc is not None): f_id_list = pc.distinct('f_id') params = [] for f_id in f_id_list: tmp = self.paramsfs.get(f_id).read() params.append(self.__deserialization(tmp)) else: print '[TensorDB] FAIL! Cannot find any: {}'.format(args) return False print '[TensorDB] Find all params SUCCESS, took: {}s'.format(round((time.time() - s), 2)) return params
'Delete params in MongoDB uckets. Parameters args : dictionary, find items to delete, leave it empty to delete all parameters.'
@AutoFill def del_params(self, args={}):
pc = self.db.Params.find(args) f_id_list = pc.distinct('f_id') for f in f_id_list: self.paramsfs.delete(f) self.db.Params.remove(args) print '[TensorDB] Delete params SUCCESS: {}'.format(args)
'Save the training log. Parameters args : dictionary, items to save. Examples >>> db.train_log(time=time.time(), {\'loss\': loss, \'acc\': acc})'
@AutoFill def train_log(self, args={}):
_result = self.db.TrainLog.insert_one(args) _log = self._print_dict(args) return _result