我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用pymongo.errors()。
def _check_write_command_response(results): """Backward compatibility helper for write command error handling. """ errors = [res for res in results if "writeErrors" in res[1] or "writeConcernError" in res[1]] if errors: # If multiple batches had errors # raise from the last batch. offset, result = errors[-1] # Prefer write errors over write concern errors write_errors = result.get("writeErrors") if write_errors: # If the last batch had multiple errors only report # the last error to emulate continue_on_error. error = write_errors[-1] error["index"] += offset if error.get("code") == 11000: raise DuplicateKeyError(error.get("errmsg"), 11000, error) else: error = result["writeConcernError"] if "errInfo" in error and error["errInfo"].get('wtimeout'): # Make sure we raise WTimeoutError raise WTimeoutError(error.get("errmsg"), error.get("code"), error) raise OperationFailure(error.get("errmsg"), error.get("code"), error)
def get_default_database(self): """Get the database named in the MongoDB connection URI. .. doctest:: >>> uri = 'mongodb://localhost/my_database' >>> client = MotorClient(uri) >>> db = client.get_default_database() >>> assert db.name == 'my_database' Useful in scripts where you want to choose which database to use based only on the URI in a configuration file. """ attr_name = mangle_delegate_name( self.__class__, '__default_database_name') default_db_name = getattr(self.delegate, attr_name) if default_db_name is None: raise pymongo.errors.ConfigurationError( 'No default database defined') return self[default_db_name]
def find(self, *args, **kwargs): """Create a :class:`MotorCursor`. Same parameters as for PyMongo's :meth:`~pymongo.collection.Collection.find`. Note that ``find`` does not take a `callback` parameter, nor does it return a Future, because ``find`` merely creates a :class:`MotorCursor` without performing any operations on the server. ``MotorCursor`` methods such as :meth:`~MotorCursor.to_list` or :meth:`~MotorCursor.count` perform actual operations. """ if 'callback' in kwargs: raise pymongo.errors.InvalidOperation( "Pass a callback to each, to_list, or count, not to find.") cursor = self.delegate.find(*args, **kwargs) return MotorCursor(cursor, self)
def refresh(self): assert greenlet.getcurrent().parent is not None,\ "Should be on child greenlet" try: self.rsc.refresh() except pymongo.errors.AutoReconnect: pass # RSC has been collected or there # was an unexpected error. except: return finally: # Switch to greenlets blocked in wait_for_refresh(). self.refreshed.set() self.timeout_obj = self.io_loop.add_timeout( time.time() + self._refresh_interval, self.async_refresh)
def connect(self, url, max_retries, retry_interval): connection_options = pymongo.uri_parser.parse_uri(url) del connection_options['database'] del connection_options['username'] del connection_options['password'] del connection_options['collection'] pool_key = tuple(connection_options) if pool_key in self._pool: client = self._pool.get(pool_key)() if client: return client splitted_url = netutils.urlsplit(url) log_data = {'db': splitted_url.scheme, 'nodelist': connection_options['nodelist']} LOG.info('Connecting to %(db)s on %(nodelist)s' % log_data) try: client = MongoProxy(pymongo.MongoClient(url), max_retries, retry_interval) except pymongo.errors.ConnectionFailure as e: LOG.warning(_('Unable to connect to the database server: ' '%(errmsg)s.') % {'errmsg': e}) raise self._pool[pool_key] = weakref.ref(client) return client
def query(self, command, value=1, check=True, allowable_errors=None, **kwargs): """????? pymongo????????,?????? TODO:????????????""" if not self._db: self.recorder('CRITICAL', 'please select db first!') shell_command = 'db.runCommand(\n{cmd}\n)'.format(cmd=dumps(command, indent=4, whole=4)) self.recorder('INFO', '{obj} command start\n{cmd}'.format(obj=self, cmd=shell_command)) try: with tool.timing('s', 10) as t: response = self._db.command(command=command, value=value, check=check, allowable_errors=allowable_errors, **kwargs) except pymongo.errors.PyMongoError as e: self.recorder('ERROR', '{obj} command error [{msg}]'.format(obj=self, msg=e)) raise MongoError self.recorder('INFO', '{obj} command successful\n{cmd} -- {time}'.format(obj=self, cmd=shell_command, time=t)) self._response = self._parse_response(response) return self._response
def query(self, command, value=1, check=True, allowable_errors=None, **kwargs): """????? pymongo????????,?????? TODO:????????????""" if not self._db: self.recorder('CRITICAL', 'please select db first!') shell_command = 'db.runCommand(\n{cmd}\n)'.format(cmd=dumps(command, indent=4, whole=4)) self.recorder('INFO', '{obj} command start\n{cmd}'.format(obj=self, cmd=shell_command)) try: with tool.timing('s', 10) as t: response = yield self._db.command(command=command, value=value, check=check, allowable_errors=allowable_errors, **kwargs) except pymongo.errors.PyMongoError as e: self.recorder('ERROR', '{obj} command error [{msg}]'.format(obj=self, msg=e)) raise MongoError self.recorder('INFO', '{obj} command successful\n{cmd} -- {time}'.format(obj=self, cmd=shell_command, time=t)) self._response = self._parse_response(response) raise Return(self._response)
def _ensure_cookie_secret(): """Makes sure cookie secret is cached.""" global _cookie_master_secret entry = _db.config.find_one({'_id': 'cookie_master_secret'}) if not entry: tmp_cookie_master_secret = misc_util.generate_random_id(length=32) try: entry = { '_id': 'cookie_master_secret', 'value': tmp_cookie_master_secret, } _db.config.insert_one(entry) except pymongo.errors.DuplicateKeyError: entry = _db.config.find_one({'_id': 'cookie_master_secret'}) assert entry _cookie_master_secret = entry['value']
def _increment_atomic_counter(key): try: entry = _db.config.find_one_and_update( {'_id': key}, { '$setOnInsert': {'_id': key}, '$inc': {'value': 1}, }, upsert=True, return_document=pymongo.collection.ReturnDocument.AFTER) except pymongo.errors.DuplicateKeyError: entry = _db.config.find_one_and_update( {'_id': key}, {'$inc': {'value': 1}}, return_document=pymongo.collection.ReturnDocument.AFTER) return entry['value']
def lock_snapshot_cron_job(snapshot_time): """Obtains a lock for a snapshot cron job. Args: snapshot_time: Timestamp of the snapshot. Returns: True if a lock is acquired. Otherwise False. """ try: _db.cron_locks.insert_one({ '_id': 'snapshot:%d' % snapshot_time, 'locked_time': misc_util.time(), }) except pymongo.errors.DuplicateKeyError: return False return True
def create(self): """Creates model in database.""" state = self.get_state() state.pop("_id", None) state["time"]["created"] = timeutils.current_unix_timestamp() state["time"]["updated"] = state["time"]["created"] state["update_marker"] = self.new_update_marker() collection = self.collection() insert_method = retryutils.mongo_retry()(collection.insert_one) find_method = retryutils.mongo_retry()(collection.find_one) try: document = insert_method(state) except pymongo.errors.DuplicateKeyError as exc: raise exceptions.UniqueConstraintViolationError from exc document = find_method({"_id": document.inserted_id}) self.set_state(document) return self
def check_deprecated_kwargs(kwargs): if 'safe' in kwargs: raise pymongo.errors.ConfigurationError( "Motor does not support 'safe', use 'w'") if 'slave_okay' in kwargs or 'slaveok' in kwargs: raise pymongo.errors.ConfigurationError( "Motor does not support 'slave_okay', use read_preference") if 'auto_start_request' in kwargs: raise pymongo.errors.ConfigurationError( "Motor does not support requests")
def _check(self, sock_info): """This side-effecty function checks if this pool has been reset since the last time this socket was used, or if the socket has been closed by some external network error, and if so, attempts to create a new socket. If this connection attempt fails we reset the pool and reraise the error. Checking sockets lets us avoid seeing *some* :class:`~pymongo.errors.AutoReconnect` exceptions on server hiccups, etc. We only do this if it's been > 1 second since the last socket checkout, to keep performance reasonable - we can't avoid AutoReconnects completely anyway. """ error = False if sock_info.closed: error = True elif self.pool_id != sock_info.pool_id: sock_info.close() error = True elif time.time() - sock_info.last_checkout > 1: if _closed(sock_info.sock): sock_info.close() error = True if not error: return sock_info else: try: return self.connect() except socket.error: self.reset() raise
def _create_wait_queue_timeout(self): return pymongo.errors.ConnectionFailure( 'Timed out waiting for socket from pool with max_size %r and' ' wait_queue_timeout %r' % ( self.max_size, self.wait_queue_timeout))
def open(self): """Connect to the server. Takes an optional callback, or returns a Future that resolves to ``self`` when opened. This is convenient for checking at program startup time whether you can connect. .. doctest:: >>> client = MotorClient() >>> # run_sync() returns the open client. >>> IOLoop.current().run_sync(client.open) MotorClient(MongoClient('localhost', 27017)) ``open`` raises a :exc:`~pymongo.errors.ConnectionFailure` if it cannot connect, but note that auth failures aren't revealed until you attempt an operation on the open client. :Parameters: - `callback`: Optional function taking parameters (self, error) .. versionchanged:: 0.2 :class:`MotorClient` now opens itself on demand, calling ``open`` explicitly is now optional. """ yield self._ensure_connected() raise gen.Return(self)
def open(self): """Connect to the server. Takes an optional callback, or returns a Future that resolves to ``self`` when opened. This is convenient for checking at program startup time whether you can connect. .. doctest:: >>> client = MotorClient() >>> # run_sync() returns the open client. >>> IOLoop.current().run_sync(client.open) MotorClient(MongoClient('localhost', 27017)) ``open`` raises a :exc:`~pymongo.errors.ConnectionFailure` if it cannot connect, but note that auth failures aren't revealed until you attempt an operation on the open client. :Parameters: - `callback`: Optional function taking parameters (self, error) .. versionchanged:: 0.2 :class:`MotorReplicaSetClient` now opens itself on demand, calling ``open`` explicitly is now optional. """ yield self._ensure_connected(True) primary = self._get_member() if not primary: raise pymongo.errors.AutoReconnect('no primary is available') raise gen.Return(self)
def _get_more(self, callback): """ Get a batch of data asynchronously, either performing an initial query or getting more data from an existing cursor. :Parameters: - `callback`: function taking parameters (batch_size, error) """ if not self.alive: raise pymongo.errors.InvalidOperation( "Can't call get_more() on a MotorCursor that has been" " exhausted or killed.") self.started = True self._refresh(callback=callback)
def __getattr__(self, item): if not self.delegate._file: raise pymongo.errors.InvalidOperation( "You must call MotorGridOut.open() before accessing " "the %s property" % item) return getattr(self.delegate, item)
def connect(self): """Connect to Mongo and return a new connected MotorSocket. Note that the pool does not keep a reference to the socket -- you must call maybe_return_socket() when you're done with it. """ child_gr = greenlet.getcurrent() main = child_gr.parent assert main is not None, "Should be on child greenlet" if self.max_size and self.motor_sock_counter >= self.max_size: if self.max_waiters and len(self.queue) >= self.max_waiters: raise self._create_wait_queue_timeout() waiter = stack_context.wrap(child_gr.switch) self.queue.append(waiter) if self.wait_queue_timeout is not None: deadline = self.io_loop.time() + self.wait_queue_timeout timeout = self.io_loop.add_timeout( deadline, functools.partial( child_gr.throw, pymongo.errors.ConnectionFailure, self._create_wait_queue_timeout())) self.waiter_timeouts[waiter] = timeout # Yield until maybe_return_socket passes spare socket in. return main.switch() else: motor_sock = self.create_connection() motor_sock.settimeout(self.net_timeout) return SocketInfo(motor_sock, self.pool_id, self.pair[0])
def _safe_mongo_call(max_retries, retry_interval): return tenacity.retry( retry=tenacity.retry_if_exception_type( pymongo.errors.AutoReconnect), wait=tenacity.wait_fixed(retry_interval), stop=(tenacity.stop_after_attempt(max_retries) if max_retries >= 0 else tenacity.stop_never) )
def create_index(self, keys, name=None, *args, **kwargs): try: self.conn.create_index(keys, name=name, *args, **kwargs) except pymongo.errors.OperationFailure as e: if e.code is ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS: LOG.info("Index %s will be recreate." % name) self._recreate_index(keys, name, *args, **kwargs)