我们从Python开源项目中,提取了以下42个代码示例,用于说明如何使用redis.ConnectionError()。
def wait_serve(ip_port, timeout=5): t = time.time() + timeout rcl = get_client(ip_port) while time.time() < t: try: rcl.hget('foo', 'foo') logger.info('redis is ready: ' + repr(ip_port)) return except redis.ConnectionError as e: logger.info('can not connect to redis: ' + repr(ip_port) + ' ' + repr(e)) time.sleep(0.1) continue else: logger.error('can not connect to redis: ' + repr(ip_port)) raise
def subscribe(self, client, channel): """Register a new client to receive messages on a channel.""" # Make sure this process is subscribed to the redis channel if channel not in self.pubsub.channels: try: self.pubsub.subscribe([channel]) except ConnectionError: app.logger.exception('Could not connect to redis.') else: log('Subscribed to redis channel {}'.format(channel)) # Make sure this process has a greenlet listening for messages if self.greenlet is None: self.start() self.clients[channel].append(client) log('Subscribed client {} to channel {}'.format(client, channel))
def push_notification(data, user=None): """ Push notification to Cerberus user(s) :param dict data: The content of the notification """ if not user: notif_queues = ['cerberus:notification:%s' % (username) for username in CERBERUS_USERS] else: notif_queues = ['cerberus:notification:%s' % (user.username)] for notif_queue in notif_queues: try: redis.rpush( notif_queue, json.dumps(data), ) except RedisError: pass
def _get_service_names(self): """ Get a list of service names from Sentinel. Tries Sentinel hosts until one succeeds; if none succeed, raises a ConnectionError. :return: the list of service names from Sentinel. """ master_info = None connection_errors = [] for sentinel in self._sentinel.sentinels: # Unfortunately, redis.sentinel.Sentinel does not support sentinel_masters, so we have to step # through all of its connections manually try: master_info = sentinel.sentinel_masters() break except (redis.ConnectionError, redis.TimeoutError) as e: connection_errors.append('Failed to connect to {} due to error: "{}".'.format(sentinel, e)) continue if master_info is None: raise redis.ConnectionError( 'Could not get master info from Sentinel\n{}:'.format('\n'.join(connection_errors)) ) return list(master_info.keys())
def get(cls, pk, force=False): if not force: ident_key = identity_key(cls, pk) if cls._db_session.identity_map and \ ident_key in cls._db_session.identity_map: return cls._db_session.identity_map[ident_key] try: cached_val = cls._cache_client.get(cls.gen_raw_key(pk)) if cached_val: cls._statsd_incr('hit') return cls.from_cache(cached_val) except redis.ConnectionError as e: logger.error(e) except TypeError as e: logger.error(e) cls._statsd_incr('miss') obj = cls._db_session().query(cls).get(pk) if obj is not None: cls.set_raw(obj.__rawdata__) return obj
def check_prerequisites(self): """ ????????????? """ # ??redis, mongodb?? try: self.__redis__ = get_vendor("DB").get_redis() self.__redis__.client_list() self.__listener__ = self.__redis__.pubsub() self.__listener__.subscribe(["dHydra"]) except redis.ConnectionError: self.logger.error("Cannot connect to redis") return False self.mongo = get_vendor("DB").get_mongodb() if self.mongo is False: self.logger.error("Cannot connect to mongodb") return False # ?????????????? return True
def new(): """ Initialize key value store that will be used for the event publishing. That way the main API takes advantage of Redis pub/sub capabilities to push events to the event stream API. """ try: publisher = redis.StrictRedis( host=config.KEY_VALUE_STORE["host"], port=config.KEY_VALUE_STORE["port"], db=config.KV_EVENTS_DB_INDEX, decode_responses=True ) publisher.get(None) except redis.ConnectionError: try: import fakeredis publisher = fakeredis.FakeStrictRedis() except: print("Cannot access to the required Redis instance") sys.exit(1) return publisher
def _get_service_names(self): """ Get a list of service names from Sentinel. Tries Sentinel hosts until one succeeds; if none succeed, raises a ConnectionError. """ master_info = None connection_errors = [] for sentinel in self._sentinel.sentinels: # Unfortunately, redis.sentinel.Sentinel does not support sentinel_masters, so we have to step # through all of its connections manually try: master_info = sentinel.sentinel_masters() break except (redis.ConnectionError, redis.TimeoutError) as e: connection_errors.append("Failed to connect to {}: {}".format(sentinel, e)) continue if master_info is None: raise redis.ConnectionError( "Could not get master info from sentinel\n{}.".format("\n".join(connection_errors))) return list(master_info.keys()) ### Connection handling ####
def rss(self): """Dump the last N days' updates as an RSS feed. """ # determine whether the rss file is up to date content = None if self.cache_redis is None: content = EMPTY_RSS else: try: value = self.cache_redis.get('rss~main') if value: content = value else: tasks.rss_regen() content = self.cache_redis.get('rss~main') except redis.ConnectionError: content = EMPTY_RSS # TODO: throw in a last-modified header too? self.handler.send_response(200, 'OK') self.handler.set_content_type('text/xml; charset=utf-8') self.handler.end_headers() self.wfile.write(content)
def listen(self, rcv_timeout=0.01): message = True while message: try: message = self._pubsub.get_message(timeout=rcv_timeout) except ConnectionError: log.error("lost connection to Redis") time.sleep(1) break if message: log.debug("%s - receied pub message: %s" % (self._cli_id, message)) if message['type'] == 'message': try: return jsonpickle.loads(message['data']) except(ValueError, KeyError): return message['data'] return None
def send(self, receiver_id, payload, sender_id=None): log.debug("sending %s to %s" % (payload, receiver_id)) packet_message = dict() packet_message['originator_uuid'] = self._uuid packet_message['receiver_id'] = receiver_id packet_message['receiver_bus_id'] = [0, 0, 0, 0] if sender_id is None: packet_message['sender_id'] = self._bus_addr else: packet_message['sender_id'] = sender_id packet_message['sender_bus_id'] = [0, 0, 0, 0] packet_message['payload'] = payload packet_message['payload_length'] = len(payload) try: self._transport.publish(packet_message) except ConnectionError: log.exception("could not publish to redis")
def subscribe(self): # Try to subscribe. If you fail, reconnect and try again. # If you fail, allow the resulting exception to be passed on. try: # Create a pubsub to receive messages self.pubsub = self.dataclient.pubsub(ignore_subscribe_messages=True) # Subscribe to act_player_info keyspace events self.pubsub.psubscribe(u'__key*__:act_player_info') except redis.ConnectionError: self.connect() # Try again to subscribe # Create a pubsub to receive messages self.pubsub = self.dataclient.pubsub(ignore_subscribe_messages=True) # Subscribe to act_player_info keyspace events self.pubsub.subscribe(u'__key*__:act_player_info')
def test_sub_lst(self): """ ??master???time stamp? """ # ??????pool??redis?3S??? # rds = redis.Redis(connection_pool=self._rds_pool) rds = redis.Redis(host=self._ip, port=self._port) try: sub = rds.pubsub() sub_lst = ['channel_1', 'channel_2'] print sub.unsubscribe(*sub_lst) print sub.subscribe(*sub_lst) sub.subscribe('abc') for item in sub.listen(): print item print type(item['data']) if 'chan' in item['channel']: print item['channel'], item['channel'][item['channel'].find('chan'):] if item['channel'] == 'channel_1' and item['type'] == 'subscribe': print sub.unsubscribe(item['channel']) except redis.ConnectionError as e: pass
def _start_process(self): if self._is_external: return if REDIS_DEBUGGER: debugger = REDIS_DEBUGGER.split() args = debugger + self.args else: args = self.args stdout = None if REDIS_SHOW_OUTPUT else subprocess.PIPE if REDIS_SHOW_OUTPUT: sys.stderr.write("Executing: {}".format(repr(args))) self.process = subprocess.Popen( args, stdin=sys.stdin, stdout=stdout, stderr=sys.stderr, ) begin = time.time() while True: try: self.client().ping() break except (redis.ConnectionError, redis.ResponseError): self.process.poll() if self.process.returncode is not None: raise RuntimeError( "Process has exited with code {}\n. Redis output: {}" .format(self.process.returncode, self._get_output())) if time.time() - begin > 300: raise RuntimeError('Cannot initialize client (waited 5mins)') time.sleep(0.1)
def check_redis_store_connected(app_configs, **kwargs): """ This code is copied from the dockerflow.django.checks but with a different name of the connection. """ import redis from django_redis import get_redis_connection errors = [] try: # Note! This name 'store' is specific only to tecken connection = get_redis_connection('store') except redis.ConnectionError as e: msg = 'Could not connect to redis: {!s}'.format(e) errors.append(checks.Error(msg, id=ERROR_CANNOT_CONNECT_REDIS)) except NotImplementedError as e: msg = 'Redis client not available: {!s}'.format(e) errors.append(checks.Error(msg, id=ERROR_MISSING_REDIS_CLIENT)) except ImproperlyConfigured as e: msg = 'Redis misconfigured: "{!s}"'.format(e) errors.append(checks.Error(msg, id=ERROR_MISCONFIGURED_REDIS)) else: result = connection.ping() if not result: msg = 'Redis ping failed' errors.append(checks.Error(msg, id=ERROR_REDIS_PING_FAILED)) return errors
def run_task(bot, cmd, _from, timeout = 180): """ Runs specified command synchronously (if Redis is running) or asynchronously (this is not recommended for production use since the whole bot will be blocked until a command returns. """ bot.log.debug("Running {}".format(cmd)) async = True try: task = Q.enqueue(check_output, cmd, stderr=STDOUT, timeout=timeout, ttl=60) tasklist = bot['tasks'] tasklist[task.get_id()] = _from bot['tasks'] = tasklist return "Task '{}' enqueued as {}".format(str(_from), task.get_id()) except ConnectionError: bot.log.error("Error connecting to Redis, falling back to synchronous execution") async = False if not async: # notify also chatrooms and/or bot admins bot.send(_from, "Running the task synchronously, whole bot blocked now, please wait.") try: raw_result = check_output(cmd, stderr=STDOUT) except CalledProcessError as exc: raw_result = exc.output except OSError: raw_result = "*ERROR*: ansible-playbook command not found" return raw_result
def check_redis_connected(app_configs, **kwargs): """ A Django check to connect to the default redis connection using ``django_redis.get_redis_connection`` and see if Redis responds to a ``PING`` command. """ import redis from django_redis import get_redis_connection errors = [] try: connection = get_redis_connection('default') except redis.ConnectionError as e: msg = 'Could not connect to redis: {!s}'.format(e) errors.append(checks.Error(msg, id=ERROR_CANNOT_CONNECT_REDIS)) except NotImplementedError as e: msg = 'Redis client not available: {!s}'.format(e) errors.append(checks.Error(msg, id=ERROR_MISSING_REDIS_CLIENT)) except ImproperlyConfigured as e: msg = 'Redis misconfigured: "{!s}"'.format(e) errors.append(checks.Error(msg, id=ERROR_MISCONFIGURED_REDIS)) else: result = connection.ping() if not result: msg = 'Redis ping failed' errors.append(checks.Error(msg, id=ERROR_REDIS_PING_FAILED)) return errors
def init_redis(self): # ??redis, mongodb?? try: self.__redis__ = get_vendor("DB").get_redis() self.__redis__.client_list() self.__listener__ = self.__redis__.pubsub() self.__listener__.subscribe(["dHydra"]) except redis.ConnectionError: self.logger.error("Cannot connect to redis") return False
def test_heartbeat_returns_500_when_redis_unavailable(self): self.mock_redis.ping.side_effect = redis.ConnectionError() response = self.client.get('/__heartbeat__') self.assertEqual(response.status_code, 500)
def test_heartbeat_returns_200_when_redis_unavailable(self): self.mock_redis.ping.side_effect = redis.ConnectionError() response = self.client.get('/__lbheartbeat__') self.assertEqual(response.status_code, 200)
def heartbeat(): status = 200 # Check cache connectivity try: current_app.redis_client.ping() statsd_client.incr('heartbeat.pass') except redis.ConnectionError: statsd_client.incr('heartbeat.fail') status = 500 return Response('', status=status)
def test_redis_unavail(): with pytest.raises(ConnectionError): d = Cupboard(host='localhost', port=8000, backend='redis')
def test_apply_rules_set_fails_gracefully(self): port_id = 1 mac_address = netaddr.EUI("AA:BB:CC:DD:EE:FF") conn_err = redis.ConnectionError with mock.patch("quark.cache.security_groups_client." "redis_base.ClientBase") as redis_mock: mocked_redis_cli = mock.MagicMock() redis_mock.return_value = mocked_redis_cli client = sg_client.SecurityGroupsClient() mocked_redis_cli.master.hset.side_effect = conn_err with self.assertRaises(q_exc.RedisConnectionFailure): client.apply_rules(port_id, mac_address.value, [])
def is_sane_redis(config:Configurator) -> bool: """Check that we have a working Redis connection for session. Execute this on startup, so we bail out without starting up with a missing Redis. :return: True if Redis connection works """ try: redis = get_redis(config.registry) redis.set("websauna_session_test", True) return True except ConnectionError as e: return False
def wait_for_redis_to_start(redis_ip_address, redis_port, num_retries=5): """Wait for a Redis server to be available. This is accomplished by creating a Redis client and sending a random command to the server until the command gets through. Args: redis_ip_address (str): The IP address of the redis server. redis_port (int): The port of the redis server. num_retries (int): The number of times to try connecting with redis. The client will sleep for one second between attempts. Raises: Exception: An exception is raised if we could not connect with Redis. """ redis_client = redis.StrictRedis(host=redis_ip_address, port=redis_port) # Wait for the Redis server to start. counter = 0 while counter < num_retries: try: # Run some random command and see if it worked. print("Waiting for redis server at {}:{} to respond..." .format(redis_ip_address, redis_port)) redis_client.client_list() except redis.ConnectionError as e: # Wait a little bit. time.sleep(1) print("Failed to connect to the redis server, retrying.") counter += 1 else: break if counter == num_retries: raise Exception("Unable to connect to Redis. If the Redis instance is " "on a different machine, check that your firewall is " "configured properly.")
def safe_ping(): """ Try to send PING command to Redis and return True in case of success. """ try: rs_cli.ping() except ConnectionError: logging.exception("Failed to issue PING command to Redis.") return False return True
def report_status(uuid, status, progress): """ Report partial status to Redis, according to: Protocol: checking status of running job (wiki page) """ try: send_beat('checking', uuid) data = json.dumps({'status': status, 'progress': progress}).encode('utf-8') rs_cli.setex('status:{}'.format(uuid), data, 60) except ConnectionError: logging.warning('Failed to report partial status due to the problem with Redis connectivity.', exc_info=True)
def fetch_submission(): """ Fetch submission from the queue, according to: Protocol: uploading submission (wiki page) """ data = None while not data and not interrupted: try: set_instance_lock() send_beat('idle') keys = [REDIS_QUEUE_KEY + ":high", REDIS_QUEUE_KEY + ":medium", REDIS_QUEUE_KEY + ":low"] data = rs_cli.blpop(keys, timeout=5) except ConnectionError: _retry_ping() if interrupted: raise KeyboardInterrupt() data_decoded = json.loads(data[1].decode('utf-8')) s_uuid = str(data_decoded['uuid']) queue_key = "{}:order".format(data[0].decode('utf-8')) if not rs_cli.zrem(queue_key, s_uuid): logging.warning('Failed to remove {} key from {}'.format(s_uuid, queue_key)) return data_decoded
def download_files(main_key, dest_path): """ Download files stored in Redis key called `main_key` and extract them into dest_path. """ try: rmtree(dest_path, ignore_errors=True) sub_keys = rs_cli.hkeys(main_key) if not len(sub_keys): logging.error('Failed to download files from Redis, key does not exist.') raise KeyError(main_key) for key in sub_keys: key = key.decode('utf-8') m = match(r'^file:(.+)$', key) if m: path = path_join(dest_path, m.group(1)) # in case of directories, if path is eg. foo/bar/code.cpp try: makedirs(path_abspath(path_join(path, '..'))) except FileExistsError: pass with open(path, 'wb') as source_file: source_file.write(rs_cli.hget(main_key, key)) if not rs_cli.hget(main_key, 'options:persistent'): rs_cli.delete(main_key) except ConnectionError as e: err = 'Failed to download files from Redis. Connection problem. {}'.format(e.message) logging.error(err) raise RuntimeError(err)
def _failed_login_ip(self): if self.block_redis: try: if not self.block_redis.exists(self.remote_addr): self.block_redis.set(self.remote_addr, 1) self.block_redis.expire(self.remote_addr, int(self.config.blocked_timeout)) else: self.block_redis.incr(self.remote_addr) except redis.ConnectionError: pass
def _failed_login_user(self, username): if self.block_redis: try: if not self.block_redis.exists(username): self.block_redis.set(username, 1) self.block_redis.expire(username, int(self.config.blocked_timeout)) else: self.block_redis.incr(username) except redis.ConnectionError: pass
def _check_blocked_ip(self): if self.block_redis: try: if (self.block_redis.exists(self.remote_addr) and int(self.block_redis.get(self.remote_addr)) > int(self.config.blocked_attempts_ip)): return True except redis.ConnectionError: return False return False
def _check_blocked_user(self, username): if self.block_redis: try: if (self.block_redis.exists(username) and int(self.block_redis.get(username)) > int(self.config.blocked_attempts_user)): return True except redis.ConnectionError: return False return False
def retry_if_connection_error(exception): return isinstance(exception, ConnectionError)
def __init__(self, bus_addr=1, com_port=None, baud=115200, transport=None): global instance_id instance_id += 1 self._uuid = str(uuid.uuid4()) if transport is None: self._transport = RedisConn(fake_redis_cli, sub_channel='pjon-python-redis', pub_channel='pjon-python-redis', cli_id=self._uuid) log.debug("using fakeredis transport") else: while True: try: self._transport = RedisConn(transport, sub_channel='pjon-python-redis', pub_channel='pjon-python-redis') break except ConnectionError: log.exception("connection to Redis failed, retrying") time.sleep(1) log.debug("using transport: %s" % str(transport)) #self.transport.subscribe('pjon-serial') self._transport.subscribe('pjon-python-redis') self._data = [] self._started = False self._bus_addr = bus_addr self._receiver_function = self.dummy_receiver self._receiver_function_forward = self.dummy_receiver_forward self._error_function = self.dummy_error
def write(self, string): message = dict() message['originator_uuid'] = self._uuid message['payload'] = string try: self._transport.publish(message) except ConnectionError: log.exception("could not publish to redis")
def server_online(cls): try: redis.Redis(cls.HOST).get(None) except redis.ConnectionError: return False else: return True
def retry_connect(redis_cfg, tries=300, base_delay=4.): for i in range(tries): try: r = redis.StrictRedis(**redis_cfg) r.ping() return r except redis.ConnectionError as e: if i == tries - 1: raise else: delay = base_delay * (1 + (os.getpid() % 10) / 9) logger.warning('Could not connect to {}. Retrying after {:.2f} sec ({}/{}). Error: {}'.format( redis_cfg, delay, i + 2, tries, e)) time.sleep(delay)
def __init__(self): self.redis = redis try: self.redis.echo("Testing Connection") except ConnectionError: raise RequestsRespectfulRedisError("Could not establish a connection to the provided Redis server")
def __init__(self, get_response): self.get_response = get_response if not redisAvailable: raise MiddlewareNotUsed() self.r = redis.Redis( host='127.0.0.1', port=6379) try: self.r.ping() except redis.ConnectionError: raise MiddlewareNotUsed()
def __next__(self): try: while True: try: # This is a micro-optimization so we try the fast # path first. We assume there are messages in the # cache and if there aren't, we go down the slow # path of doing network IO. data = self.message_cache.pop(0) self.misses = 0 message = Message.decode(data) return MessageProxy(message) except IndexError: # If there are fewer messages currently being # processed than we're allowed to prefetch, # prefetch up to that number of messages. messages = [] if self.message_refc < self.prefetch: self.message_cache = messages = self.broker._fetch( queue_name=self.queue_name, prefetch=self.prefetch - self.message_refc, ) # Because we didn't get any messages, we should # progressively long poll up to the idle timeout. if not messages: self.misses, backoff_ms = compute_backoff(self.misses, max_backoff=self.timeout) time.sleep(backoff_ms / 1000) return None # Since we received some number of messages, we # have to keep track of them. self.message_refc += len(messages) except redis.ConnectionError as e: raise ConnectionClosed(e) from None
def run(self): logging.debug(u"RUNE musicdata service starting") while True: if self.dataclient is None: try: # Try to connect self.connect() self.subscribe() self.status() self.sendUpdate() except (redis.ConnectionError, RuntimeError): self.dataclient = None # On connection error, sleep 5 and then return to top and try again time.sleep(5) continue try: # Wait for notice that key has changed msg = self.pubsub.get_message() if msg: # act_player_info key event occured self.status() self.sendUpdate() time.sleep(.01) except (redis.ConnectionError, RuntimeError): # if we lose our connection while trying to query DB # sleep 5 and then return to top to try again self.dataclient = None logging.debug(u"Could not get status from Rune Redis service") time.sleep(5) continue