我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用gevent.queue()。
def _concurrent_execute(self, context, start_req, parser, pool, pool_size): queue = Queue() # ???? # ???????????? for r in start_req: queue.put_nowait(r) if pool is None: pool = GeventPool(pool_size) greenlets = [] while True: try: req = self._check_req(queue.get(timeout=1)) if req.parser is None: req.parser = parser greenlets.append(pool.spawn(req, context, queue)) except Empty: break return [greenlet.get() for greenlet in greenlets]
def usecase_child_d(forthreader, backwriter): recvqueue = gevent.queue.Queue() def g_from_forthpipe_to_q(forthreader): while True: m = forthreader.get() recvqueue.put(m) if m == "STOP": break def g_from_q_to_backpipe(backwriter): while True: m = recvqueue.get() backwriter.put(m) if m == "STOP": break g1 = gevent.spawn(g_from_forthpipe_to_q, forthreader) g2 = gevent.spawn(g_from_q_to_backpipe, backwriter) g1.get() g2.get()
def probe(self): self.log.debug("stdout queue %d" % self.stdout_queue.qsize()) if not self.stdout_queue.qsize(): return {} data = [] try: # OPT skip_fields while True: line = self.stdout_queue.get_nowait() data.append(line) except gevent.queue.Empty as e: pass msg = {} msg['data'] = data msg['ts'] = (datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds() return msg
def echo_worker(self): """ The `echo_worker` works through the `self.received_transfers` queue and spawns `self.on_transfer` greenlets for all not-yet-seen transfers. """ log.debug('echo worker', qsize=self.received_transfers.qsize()) while self.stop_signal is None: if self.received_transfers.qsize() > 0: transfer = self.received_transfers.get() if transfer in self.seen_transfers: log.debug( 'duplicate transfer ignored', initiator=pex(transfer['initiator']), amount=transfer['amount'], identifier=transfer['identifier'] ) else: self.seen_transfers.append(transfer) self.greenlets.append(gevent.spawn(self.on_transfer, transfer)) else: gevent.sleep(.5)
def stop(self): """Stop the greenlet workers and empty all queues.""" with self._state_change: if not self._running: return self._running = False for queue in (self.callback_queue,): queue.put(_STOP) while self._workers: worker = self._workers.pop() worker.join() # Clear the queues self.callback_queue = Queue() # pragma: nocover python2atexit.unregister(self.stop)
def async_determine_rd_coordinates(): """ Worker task which gets search parameters of the queue and executes a SearchTask """ while not SEARCHES_QUEUE.empty(): args = SEARCHES_QUEUE.get() task = SearchTask(*args) try: task.determine_rd_coordinates() except Exception as exp: # when tasks fails.. continue.. log.error('\n\n\n') log.error(exp) log.error('\n\n\n')
def mocked_rpc_server(): class MockedRpcServer(object): queue = Queue() outbox = [] def __init__(self, host, port): pass @classmethod def mocked_send(cls, message): cls.queue.put(message.serialize()) def recv(self): results = self.queue.get() return Message.unserialize(results) def send(self, message): self.outbox.append(message.serialize()) return MockedRpcServer
def processJsonRep(self,socket, address): org = self.conf.get('base','client_id') jsonSocket = jsonSession(socket=socket,org=org) while 1: try: code, data = jsonSocket.recv() if code != 0: logger.error("local receive error (%s %s)"%(code, data)) socket.close() break try: _reportQueue.put_nowait(data) except gevent.queue.Full: logger.error("report queue is full") jsonSocket.send_response(conf.global_vars.ErrCode.QueueFull, 'ok') continue jsonSocket.send_response(0, 'ok') except Exception, e: logger.error("uncaught error, e={}, traceback={}".format(e, traceback.format_exc())) socket.close() break
def processRep(self,socket, address): org = self.conf.get('base', 'client_id') pbSocket = pbSession(socket=socket,org=org) while 1: try: code, data = pbSocket.recv(decode=False) if code != 0: if "connection closed" not in data: logger.error("local receive error (%s %s)"%(code, data)) socket.close() break try: _reportQueue.put_nowait(data) except gevent.queue.Full: logger.error("report queue is full") pbSocket.send_response(conf.global_vars.ErrCode.QueueFull, 'ok') continue pbSocket.send_response(0, 'ok') except Exception, e: logger.error("uncaught error, e={}, traceback={}".format(e, traceback.format_exc())) socket.close() break
def enqueue(self, queue_event_list, max_queued_messages): if len(queue_event_list) == 0: return while True: try: # get msg task_msg = _reportQueue.get() if not task_msg: continue dataid, org, ip = task_msg[0][-3:] logger.debug('recv msg, org: %s dataid: %s' %(org, dataid)) # enqueue for (q, flush_ready_event) in queue_event_list: if not q.full(): q.put_nowait(task_msg) else: logger.error("queue full") if q.qsize() >= max_queued_messages and not flush_ready_event.is_set(): flush_ready_event.set() except Exception, e: logger.error(e)
def _print_msg(self, _msg=None, _found_msg=False): if _msg is None: self.print_count += 1 if self.print_count < 100: return self.print_count = 0 msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % ( self.found_count, self.queue.qsize(), self.scan_count, time.time() - self.start_time) sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg) elif _msg.startswith('[+] Check DNS Server'): sys.stdout.write('\r' + _msg + ' ' * (self.console_width - len(_msg))) else: sys.stdout.write('\r' + _msg + ' ' * (self.console_width - len(_msg)) + '\n') if _found_msg: msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % ( self.found_count, self.queue.qsize(), self.scan_count, time.time() - self.start_time) sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg) sys.stdout.flush()
def pipeline(stages, initial_data): monitors = Group() # Make sure items in initial_data are iterable. if not isinstance(initial_data, types.GeneratorType): try: iter(initial_data) except: raise TypeError('initial_data must be iterable') # The StopIteration will bubble through the queues as it is reached. # Once a stage monitor sees it, it indicates that the stage will read # no more data and the monitor can wait for the current work to complete # and clean up. if hasattr(initial_data, 'append'): initial_data.append(StopIteration) if not stages: return PipelineResult(monitors, []) # chain stage queue io # Each stage shares an output queue with the next stage's input. qs = [initial_data] + [Queue() for _ in range(len(stages))] for stage, in_q, out_q in zip(stages, qs[:-1], qs[1:]): stage.in_q = in_q stage.out_q = out_q monitors.spawn(stage_monitor, stage) gevent.sleep(0) return PipelineResult(monitors, stages[-1].out_q)
def __init__(self, target, id=''): self.target = target self.id = id self.ip = [] self.dns_ip = ['1.1.1.1', '127.0.0.1', '0.0.0.0', '202.102.110.203', '202.102.110.204', '220.250.64.225'] self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0'} self.queue = Queue() self.thread_num = 60 self.c_count = {} self.domain = [] self.domains = {} self.title = {} self.appname = {} self.removed_domains = [] self.init()
def remove_error_domain(self): while not self.queue.empty(): domain = self.queue.get() try: r = requests.get('http://' + domain, timeout=4, allow_redirects=False) if r.status_code not in [400, 403, 500]: continue except requests.exceptions.ConnectTimeout: self.removed_domains.append(domain) continue except requests.exceptions.ConnectionError: self.removed_domains.append(domain) continue except requests.exceptions.TooManyRedirects: self.removed_domains.append(domain) continue except requests.exceptions.ReadTimeout: self.removed_domains.append(domain) continue except: continue
def remove_error_subdomain(self, d): while not self.queue.empty(): domain = self.queue.get() domain = 'this_subdomain_will_never_exist' + '.' + domain resolvers = dns.resolver.Resolver(configure=False) resolvers.nameservers = [self.dns[d % len(self.dns)]] resolvers.timeout = 10.0 try: answers = dns.resolver.query(domain) ips = [answer.address for answer in answers] for ip in ips: if ip in self.dns_ip: continue self.removed_domains.append(domain) except dns.resolver.NXDOMAIN: pass except dns.resolver.NoAnswer: pass except dns.exception.Timeout: pass except: pass
def sub_brute(self, d): while not self.queue.empty(): domain = self.queue.get() resolvers = dns.resolver.Resolver(configure=False) resolvers.nameservers = [self.dns[d % len(self.dns)]] resolvers.timeout = 10.0 try: sys.stdout.write('\r????: '+str(len(self.domains.keys()))+'?????: '+str(self.queue.qsize())) sys.stdout.flush() answers = resolvers.query(domain) ips = [answer.address for answer in answers] for ip in ips: if ip not in self.dns_ip: if domain in self.domains.keys() and ip not in self.domains[domain]: self.domains[domain].append(ip) else: self.domains[domain] = [ip] except dns.resolver.NXDOMAIN: continue except dns.resolver.NoAnswer: continue except dns.name.EmptyLabel: continue except dns.exception.Timeout: continue
def directory_brute(self): ''' ??????????? :return: ''' while not self.queue.empty(): _dir = self.queue.get() for target in self.targets: try: url = target + _dir self.count += 1 sys.stdout.write('\r?????: ' + str(self.count)) sys.stdout.flush() r = requests.get('http://' + target + _dir, allow_redirects=False) if r.status_code in [200, 403]: self.sensitive[url] = r.status_code except requests.exceptions.ReadTimeout: continue except requests.exceptions.ConnectionError: continue except requests.exceptions.TooManyRedirects: continue
def listen(self, namespace, max_timeout): """Register to listen to a namespace and yield messages as they arrive. If no messages arrive within `max_timeout` seconds, this will yield a `None` to allow clients to do periodic actions like send PINGs. This will run forever and yield items as an iterable. Use it in a loop and break out of it when you want to deregister. """ queue = gevent.queue.Queue() namespace = namespace.rstrip("/") for ns in _walk_namespace_hierarchy(namespace): self.consumers.setdefault(ns, []).append(queue) try: while True: # jitter the timeout a bit to ensure we don't herd timeout = max_timeout - random.uniform(0, max_timeout / 2) try: yield queue.get(block=True, timeout=timeout) except gevent.queue.Empty: yield None # ensure we're not starving others by spinning gevent.sleep() finally: for ns in _walk_namespace_hierarchy(namespace): self.consumers[ns].remove(queue) if not self.consumers[ns]: del self.consumers[ns]
def get(self): """ Receives ANY message whatever is the first in the queue. Blocks the greenlet if the queue is empty. Other greenlets will continue to run. """ return self.queue_.get()
def get_nowait(self): """ Receives ANY message whatever is the first or raises. :raises queue.Empty: If the queue is empty """ return self.queue_.get_nowait()
def receive_wait(self, filter_fn: Callable): """ Repeatedly call receive(filter) until the result is found. Other greenlets can continue to run cooperatively. :param filter_fn: A callable which checks if message is desired (and returns True) or should be skipped (and returns False) """ while True: LOG(self.queue_.queue) m = self.receive(filter_fn=filter_fn) if m is not None: return m gevent.sleep(0.0)
def receive(self, filter_fn: Callable): """ Apply filter to all messages in the inbox, first message for which filter returns True will be returned. :param filter_fn: A callable which checks if message is desired (and returns True) or should be skipped (and returns False) :returns: Message, if the filter returned True, otherwise ``None`` if no message matches or the mailbox was empty """ if self.queue_.empty(): return None # try every element in the queue, get it, check it, place it into the # queue end (NOTE: This will mix the messages breaking the order) try: for i in range(len(self.queue_)): m = self.queue_.get_nowait() if filter_fn(m): LOG("Mailbox: match return", m) return m self.queue_.put(m) except queue.Empty: pass return None
def _default_parser(context, response, queue): """???Response????? """ content_type = response.headers["content-type"] if content_type.startswith("application/json"): return response.json() else: return response.text
def _sync_execute(self, context, start_req, parser): queue = list(start_req) result = [] while queue: req = queue.pop(0) req = self._check_req(req) if req.parser is None: req.parser = parser result.append(req(context, queue)) return result
def __call__(self, context, queue): """ :param context: ??? :param queue: ???? """ try: response = self.method(*self.args, **self.kws) result = self.parser(context, response, queue) if self.sleep: gevent.sleep(self.sleep) return result except: context.logger.exception(u"crawl error") return sys.exc_info()
def __init__(self, pool_name, pool_size, close_conn_f, conn_cls, *conn_args, **conn_kwargs): """Constructor. Args: pool_name: name of the pool. pool_size: max number of connections to create in the pool. close_conn_f: function to close a connection. It should take exactly one argument which is an object returned by conn_cls. conn_cls: python class or function for creating a connection. conn_args, conn_kwargs: arguments passed to conn_cls to create a connection. """ self.pool_name = pool_name self.pool_size = pool_size assert close_conn_f is None or hasattr(close_conn_f, '__call__') self.close_conn_f = close_conn_f assert hasattr(conn_cls, '__call__') self.conn_cls = conn_cls self.conn_args = conn_args self.conn_kwargs = conn_kwargs # The number of connections in the pool that are ever used, # e.g. total unique number of connections returned by get(). # This is the maximum number of concurrent connections ever reached. self.num_connected = 0 self._queue = gevent.queue.LifoQueue(maxsize=pool_size) for i in xrange(0, pool_size): # Pre-populate the pool with connection holders. self._queue.put(ConnectionHolder(pool_name)) # Run garbage collection on unused connections. # Randomize the GC job start time. start_after_secs = random.randint(0, 1000 * GC_INTERVAL_SECS) / 1000.0 self._gc_job = Periodical("ConnPool-GC-%s" % pool_name, GC_INTERVAL_SECS, start_after_secs, self._gc_unused_conn, MAX_CONN_AGE_SECS) self.desc = self._get_desc()
def get(self, block=True, timeout=None): """Get a connection holder with connection object (conn) populated. Args: block: whether to wait if queue is empty. timeout: the max seconds to wait. If no connection is available after timeout, a gevent.queue.Empty exception is thrown. Returns: a ConnectionHolder object with conn populated. """ conn_holder = self._queue.get(block, timeout) if conn_holder.conn is None: tm = None try: # In case self._create_conn() blocks, it should block for max # timeout seconds. tm = gevent.Timeout.start_new(timeout, gevent.queue.Empty) conn_holder.set_conn(self._create_conn()) except: # If we fail to create a connection, we put conn_holder back # and re-raise the exception. conn_holder.set_conn(None) self.put(conn_holder) raise finally: if tm: tm.cancel() self.num_connected += 1 conn_holder.last_access_time = time.time() return conn_holder
def put(self, conn_holder, replace=False): """Put back the conn_holder (returned by get()) in queue. Args: conn_holder: connection holder returned by get() replace: whether to create a new replacement for this connection. """ assert self._queue.qsize() < self.pool_size assert conn_holder.pool_name == self.pool_name if replace: self._close_conn(conn_holder) self._queue.put_nowait(conn_holder)
def qsize(self): """Return the free objects in the queue.""" return self._queue.qsize()
def __init__(self, app, pool_size=30): self.task_queue = gevent.queue.Queue() self.pool_size = pool_size self.app = app
def test_queue2(self): """?????size?????get/set????????""" _log.info('test_queue2222222222') task_queue = Queue(3) def worker(name): try: while True: task = task_queue.get(timeout=1) # decrements queue size by 1 print('Worker %s got task %s' % (name, task)) gevent.sleep(0) except Empty: print('Quitting time!') def boss(): """ Boss will wait to hand out work until a individual worker is free since the maxsize of the task queue is 3. """ for i in xrange(1,10): task_queue.put(i) print('Assigned all work in iteration 1') for i in xrange(10,20): task_queue.put(i) print('Assigned all work in iteration 2') gevent.joinall([ gevent.spawn(boss), gevent.spawn(worker, 'steve'), gevent.spawn(worker, 'john'), gevent.spawn(worker, 'bob'), ])
def worker(n): try: while True: task = tasks.get(timeout=1) # decrements queue size by 1 print('Worker %s got task %s' % (n, task)) gevent.sleep(0) # yielding????Greenlet??????? except Empty: print('Quitting time!')
def boss(): """ Boss will wait to hand out work until a individual workeworker free since the maxsize of the task queue is 3. """ for i in range(1,10): tasks.put(i) print('Assigned all work in iteration 1') for i in range(10,20): tasks.put(i) print('Assigned all work in iteration 2')
def _create_greenlet_worker(self, queue): def greenlet_worker(): while True: try: func = queue.get() if func is _STOP: break func() except Empty: continue except Exception as exc: log.warning("Exception in worker greenlet") log.exception(exc) return gevent.spawn(greenlet_worker)
def start(self): """Start the greenlet workers.""" with self._state_change: if self._running: return self._running = True # Spawn our worker greenlets, we have # - A callback worker for watch events to be called for queue in (self.callback_queue,): w = self._create_greenlet_worker(queue) self._workers.append(w) python2atexit.register(self.stop)
def stop(self): """Stop the request processor.""" shared = self.shared self.shared = None log.info("RequestHandler.stop: about to flush requests queue") shared.requests.join() shared.ending.set()
def _start_thread(self): """Run the request processor""" # We pass a direct reference to `shared` into the worker, to avoid # that thread holding a ref to `self`, which would prevent GC. A # previous version of this used a weakref to `self`, but would # potentially abort the thread before the requests queue was empty shared = self.shared def worker(): try: while not shared.ending.is_set(): try: # set a timeout so we check `ending` every so often task = shared.requests.get(timeout=1) except Empty: continue try: shared.connection.request(task.request) if task.future: res = shared.connection.response() task.future.set_response(res) except Exception as e: if task.future: task.future.set_error(e) finally: shared.requests.task_done() log.info("RequestHandler worker: exiting cleanly") except: # deal with interpreter shutdown in the same way that # python 3.x's threading module does, swallowing any # errors raised when core modules such as sys have # already been destroyed if _sys is None: return raise name = "pykafka.RequestHandler.worker for {}:{}".format( self.shared.connection.host, self.shared.connection.port) return self.handler.spawn(worker, name=name)
def __call__(self, environ, start_response): self.environ = environ uwsgi.websocket_handshake() self._req_ctx = None if hasattr(uwsgi, 'request_context'): # uWSGI >= 2.1.x with support for api access across-greenlets self._req_ctx = uwsgi.request_context() else: # use event and queue for sending messages from gevent.event import Event from gevent.queue import Queue from gevent.select import select self._event = Event() self._send_queue = Queue() # spawn a select greenlet def select_greenlet_runner(fd, event): """Sets event when data becomes available to read on fd.""" while True: event.set() try: select([fd], [], [])[0] except ValueError: break self._select_greenlet = gevent.spawn( select_greenlet_runner, uwsgi.connection_fd(), self._event) self.app(self)
def wait(self): """Waits and returns received messages. If running in compatibility mode for older uWSGI versions, it also sends messages that have been queued by send(). A return value of None means that connection was closed. This must be called repeatedly. For uWSGI < 2.1.x it must be called from the main greenlet.""" while True: if self._req_ctx is not None: try: msg = uwsgi.websocket_recv(request_context=self._req_ctx) except IOError: # connection closed return None return self._decode_received(msg) else: # we wake up at least every 3 seconds to let uWSGI # do its ping/ponging event_set = self._event.wait(timeout=3) if event_set: self._event.clear() # maybe there is something to send msgs = [] while True: try: msgs.append(self._send_queue.get(block=False)) except gevent.queue.Empty: break for msg in msgs: self._send(msg) # maybe there is something to receive, if not, at least # ensure uWSGI does its ping/ponging try: msg = uwsgi.websocket_recv_nb() except IOError: # connection closed self._select_greenlet.kill() return None if msg: # message available return self._decode_received(msg)
def received_message(self, message): """ Override the base class to store the incoming message in the `messages` queue. """ self.messages.put(copy.deepcopy(message))
def closed(self, code, reason=None): """ Puts a :exc:`StopIteration` as a message into the `messages` queue. """ # When the connection is closed, put a StopIteration # on the message queue to signal there's nothing left # to wait for self.messages.put(StopIteration)
def receive(self): """ Returns messages that were stored into the `messages` queue and returns `None` when the websocket is terminated or closed. """ # If the websocket was terminated and there are no messages # left in the queue, return None immediately otherwise the client # will block forever if self.terminated and self.messages.empty(): return None message = self.messages.get() if message is StopIteration: return None return message
def wait_for_action(self, timeout=3600, raise_on_hangup=False): """ Wait until an action is over and return action event. """ self.log.debug("wait for action start") try: event = self._action_queue.get(timeout=timeout) self.log.debug("wait for action end %s" % str(event)) if raise_on_hangup is True and self.has_hangup(): self.log.warn("wait for action call hung up !") raise RESTHangup() return event except gevent.queue.Empty: if raise_on_hangup is True and self.has_hangup(): self.log.warn("wait for action call hung up !") raise RESTHangup() self.log.warn("wait for action end timed out!") return Event() # In order to "block" the execution of our service until the # command is finished, we use a synchronized queue from gevent # and wait for such event to come. The on_channel_execute_complete # method will put that event in the queue, then we may continue working. # However, other events will still come, like for instance, DTMF.
def disconnect(self): # Prevent command to be stuck while waiting response try: self._action_queue.put_nowait(Event()) except gevent.queue.Full: pass self.log.debug('Releasing Connection ...') super(PlivoOutboundEventSocket, self).disconnect() self.log.debug('Releasing Connection Done')
def __init__(self, other, *args, **kwargs): from gevent.queue import Queue super(MemoryBufferedPlayable, self).__init__(*args, **kwargs) self.frames = Queue() self.other = other gevent.spawn(self._buffer)
def test_worker(self, gen_id_mock, call_mock): call_mock.side_effect = TestContestManager._valid_subprocess_call ContestManager.tasks["poldo"] = { "generator": "/gen", "validator": "/val" } with patch("src.logger.Logger.error", side_effect=TestContestManager._stop_worker_loop): with patch("gevent.queue.Queue.put", side_effect=NotImplementedError("Stop loop")): with self.assertRaises(NotImplementedError) as ex: ContestManager.worker("poldo")