我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用six.moves.queue.Queue()。
def __init__(self, config): log.debug('Starting KinesisProducer') self.config = config self._queue = queue.Queue() self._closed = False accumulator = RecordAccumulator(RawBuffer, config) if config['kinesis_concurrency'] == 1: client = Client(config) else: client = ThreadPoolClient(config) self._sender = Sender(queue=self._queue, accumulator=accumulator, client=client, partitioner=random_partitioner) self._sender.daemon = True self._sender.start()
def test_flush(config): q = queue.Queue() accumulator = RecordAccumulator(RawBuffer, config) client = mock.Mock() sender = Sender(queue=q, accumulator=accumulator, client=client, partitioner=partitioner) sender.flush() assert not client.put_record.called accumulator.try_append(b'-') sender.flush() expected_record = (b'-\n', 4) client.put_record.assert_called_once_with(expected_record)
def setup(self, config): """ Establish connection to Elasticsearch cluster and start periodic commit. :param config: Configuration object. :type config: ``dict`` """ self.config = config self.context_size = config.get(helper.CONTEXT_SIZE, 120) self.elastic_bulk = queue.Queue() self.elastic = self.config[helper.INJECTOR].get_elasticsearch() self.helper = self.config[helper.INJECTOR].get_elasticsearch_helper() self.create_mapping() thread = threading.Thread(target=self._commit, args=()) thread.daemon = True thread.start() self.thread = thread
def run(self): futures = queue.Queue(maxsize=121) self.start_profile() for i in range(self.num_queries): if i >= 120: old_future = futures.get_nowait() old_future.result() key = "{}-{}".format(self.thread_num, i) future = self.run_query(key) futures.put_nowait(future) while True: try: futures.get_nowait().result() except queue.Empty: break self.finish_profile
def run(self): futures = queue.Queue(maxsize=121) self.start_profile() for i in range(self.num_queries): if i > 0 and i % 120 == 0: # clear the existing queue while True: try: futures.get_nowait().result() except queue.Empty: break key = "{0}-{1}".format(self.thread_num, i) future = self.run_query(key) futures.put_nowait(future) while True: try: futures.get_nowait().result() except queue.Empty: break self.finish_profile()
def watch_once(self, key, timeout=None, **kwargs): """Watch a key and stops after the first event. :param key: key to watch :param timeout: (optional) timeout in seconds. :returns: event """ event_queue = queue.Queue() def callback(event): event_queue.put(event) w = watch.Watcher(self, key, callback, **kwargs) try: return event_queue.get(timeout=timeout) except queue.Empty: raise exceptions.WatchTimedOut() finally: w.stop()
def setUp(self): # Simple model with 1 continuous + 1 discrete + 1 continuous variable. def model(): p = Variable(torch.Tensor([0.5])) mu = Variable(torch.zeros(1)) sigma = Variable(torch.ones(1)) x = pyro.sample("x", Normal(mu, sigma)) # Before the discrete variable. y = pyro.sample("y", Bernoulli(p)) z = pyro.sample("z", Normal(mu, sigma)) # After the discrete variable. return dict(x=x, y=y, z=z) self.sites = ["x", "y", "z", "_INPUT", "_RETURN"] self.model = model self.queue = Queue() self.queue.put(poutine.Trace())
def _traces(self, *args, **kwargs): """ algorithm entered here Running until the queue is empty and collecting the marginal histogram is performing exact inference :returns: Iterator of traces from the posterior. :rtype: Generator[:class:`pyro.Trace`] """ # currently only using the standard library queue self.queue = Queue() self.queue.put(poutine.Trace()) p = poutine.trace( poutine.queue(self.model, queue=self.queue, max_tries=self.max_tries)) while not self.queue.empty(): tr = p.get_trace(*args, **kwargs) yield (tr, tr.log_pdf())
def __init__(self, source, buffer_size=2): assert buffer_size >= 2, "minimum buffer size is 2" # The effective buffer size is one larger, because the generation # process will generate one extra element and block until there is room # in the buffer. self.buffer = Queue(maxsize=buffer_size - 1) def populate_buffer(): try: for item in source: self.buffer.put((None, item)) except: self.buffer.put((sys.exc_info(), None)) else: self.buffer.put(DONE) thread = threading.Thread(target=populate_buffer) thread.daemon = True thread.start()
def run(self): if not self.containers: return queue = Queue() thread_args = queue, self.log_args thread_map = build_thread_map(self.containers, self.presenters, thread_args) for line in consume_queue(queue, self.cascade_stop): remove_stopped_threads(thread_map) if not line: if not thread_map: # There are no running containers left to tail, so exit return # We got an empty line because of a timeout, but there are still # active containers to tail, so continue continue try: self.output.write(line) self.output.flush() except ValueError: # ValueError: I/O operation on closed file break
def __init__(self, logger, path, tchannel, deployment_str, headers, timeout_seconds, reconfigure_interval_seconds): self.logger = logger self.path = path self.tchannel = tchannel self.deployment_str = deployment_str self.headers = headers self.timeout_seconds = timeout_seconds self.task_queue = queue.Queue() self.workers = {} self.reconfigure_signal = threading.Event() self.reconfigure_interval_seconds = reconfigure_interval_seconds self.reconfigure_thread = None
def get(self): '''Get a task from queue when bucket available''' if self.bucket.get() < 1: return None now = time.time() self.mutex.acquire() try: task = self.priority_queue.get_nowait() self.bucket.desc() except Queue.Empty: self.mutex.release() return None task.exetime = now + self.processing_timeout self.processing.put(task) self.mutex.release() return task.taskid
def _check_task_done(self): '''Check status queue''' cnt = 0 try: while True: task = self.status_queue.get_nowait() # check _on_get_info result here if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task: if task['project'] not in self.projects: continue project = self.projects[task['project']] project.on_get_info(task['track'].get('save') or {}) logger.info( '%s on_get_info %r', task['project'], task['track'].get('save', {}) ) continue elif not self.task_verify(task): continue self.on_task_status(task) cnt += 1 except Queue.Empty: pass return cnt
def init_service(self, endpoint, project, uuid, log_batch_size, ignore_errors, ignored_tags): self._errors = queue.Queue() if self.RP is None: self.ignore_errors = ignore_errors self.ignored_tags = ignored_tags logging.debug('ReportPortal - Init service: endpoint=%s, ' 'project=%s, uuid=%s', endpoint, project, uuid) self.RP = ReportPortalServiceAsync( endpoint=endpoint, project=project, token=uuid, error_handler=self.async_error_handler, log_batch_size=log_batch_size ) else: logging.debug('The pytest is already initialized') return self.RP
def watch_once(self, key, timeout=None, **kwargs): """ Watch a key and stops after the first event. If the timeout was specified and event didn't arrived method will raise ``WatchTimedOut`` exception. :param key: key to watch :param timeout: (optional) timeout in seconds. :returns: ``Event`` """ event_queue = queue.Queue() def callback(event): event_queue.put(event) watch_id = self.add_watch_callback(key, callback, **kwargs) try: return event_queue.get(timeout=timeout) except queue.Empty: raise exceptions.WatchTimedOut() finally: self.cancel_watch(watch_id)
def _audio_data_generator(buff): """A generator that yields all available data in the given buffer. Args: buff - a Queue object, where each element is a chunk of data. Yields: A chunk of data that is the aggregate of all chunks of data in `buff`. The function will block until at least one data chunk is available. """ while True: # Use a blocking get() to ensure there's at least one chunk of data chunk = buff.get() if not chunk: # A falsey value indicates the stream is closed. break data = [chunk] # Now consume whatever other data's still buffered. while True: try: data.append(buff.get(block=False)) except queue.Empty: break yield b''.join(data)
def _test_remote_commands_async(): u""" >>> kak = headless() >>> @Remote.command(kak.pid) ... def write_position(pipe, line, column): ... pipe(utils.join(('exec ', 'a', str(line), ':', str(column), '<esc>'), sep='')) >>> pipe(kak.pid, 'write-position', 'unnamed0') >>> time.sleep(0.05) >>> pipe(kak.pid, 'exec a,<space><esc>', 'unnamed0', sync=True) >>> time.sleep(0.02) >>> write_position('unnamed0') >>> pipe(kak.pid, 'exec \%H', 'unnamed0', sync=True) >>> Remote.onclient(kak.pid, 'unnamed0')(lambda selection: print(selection)) 1:1, 1:5 >>> q = Queue() >>> Remote.onclient(kak.pid, 'unnamed0', sync=False)(lambda selection: q.put(selection)) >>> print(q.get()) 1:1, 1:5 >>> pipe(kak.pid, 'quit!', 'unnamed0') >>> kak.wait() 0 >>> _fifo_cleanup() """ pass
def init(self): self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore] self.good = set() self.broken = {} self.redirected = {} # set a timeout for non-responding servers socket.setdefaulttimeout(5.0) # create output file open(path.join(self.outdir, 'output.txt'), 'w').close() # create queues and worker threads self.wqueue = queue.Queue() self.rqueue = queue.Queue() self.workers = [] for i in range(self.app.config.linkcheck_workers): thread = threading.Thread(target=self.check_thread) thread.setDaemon(True) thread.start() self.workers.append(thread)
def pull_batch_from_queue(self): """ Take a rollout from the queue of the thread runner. """ # get top rollout from queue (FIFO) rollout = self.runner.queue.get(timeout=600.0) while not rollout.terminal: try: # Now, get remaining *available* rollouts from queue and append them into # the same one above. If queue.Queue(5): len=5 and everything is # superfast (not usually the case), then all 5 will be returned and # exception is raised. In such a case, effective batch_size would become # constants['ROLLOUT_MAXLEN'] * queue_maxlen(5). But it is almost never the # case, i.e., collecting a rollout of length=ROLLOUT_MAXLEN takes more time # than get(). So, there are no more available rollouts in queue usually and # exception gets always raised. Hence, one should keep queue_maxlen = 1 ideally. # Also note that the next rollout generation gets invoked automatically because # its a thread which is always running using 'yield' at end of generation process. # To conclude, effective batch_size = constants['ROLLOUT_MAXLEN'] rollout.extend(self.runner.queue.get_nowait()) except queue.Empty: break return rollout
def __init__(self, delegate, address='', port=22, backlog=5, key=None, timeout=None, encoding='ascii', handler=Handler): threading.Thread.__init__(self, name='sshim.Server') self.exceptions = queue.Queue() self.encoding = encoding self.timeout = timeout self.counter = Counter() self.handler = handler self.delegate = delegate self.daemon = True self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind((address, port)) self.socket.listen(backlog) logging.info('sshim.Server listening on %s:%d', *self.socket.getsockname()) self.key = key or DEFAULT_KEY
def __init__(self, api_key, user_agent, base_url, entity, project, run_id): self._endpoint = "{base}/{entity}/{project}/{run}/file_stream".format( base=base_url, entity=entity, project=project, run=run_id) self._client = requests.Session() self._client.auth = ('api', api_key) self._client.timeout = self.HTTP_TIMEOUT self._client.headers.update({ 'User-Agent': user_agent, }) self._file_policies = {} self._queue = queue.Queue() self._thread = threading.Thread(target=self._thread_body) # It seems we need to make this a daemon thread to get sync.py's atexit handler to run, which # cleans this thread up. self._thread.daemon = True self._thread.start()
def _thread_body(self): while True: event = self._queue.get() if isinstance(event, EventFinish): break self._handle_event(event) while True: try: event = self._queue.get(True, 1) except queue.Empty: event = None if event: self._handle_event(event) elif not self._jobs: # Queue was empty and no jobs left. break
def record_audio(self, rate, chunk): """Opens a recording stream in a context manager.""" # Create a thread-safe buffer of audio data buff = queue.Queue() audio_stream = self.audio_interface.open( format=self.FORMAT, # The API currently only supports 1-channel (mono) audio # https://goo.gl/z757pE channels=1, rate=rate, input=True, frames_per_buffer=chunk, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't overflow # while the calling thread makes network requests, etc. stream_callback=functools.partial(self._fill_buffer, buff), ) yield buff audio_stream.stop_stream() audio_stream.close() # Signal the _audio_data_generator to finish buff.put(None) self.audio_interface.terminate() # [END audio_stream]
def __init__(self, delegate, logger=None, address='', port=22, backlog=5, key=None, timeout=None, encoding='ascii', handler=Handler): threading.Thread.__init__(self, name='sshim.Server') self.logger = logger self.exceptions = queue.Queue() self.encoding = encoding self.timeout = timeout self.counter = Counter() self.handler = handler self.delegate = delegate self.daemon = True self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind((address, port)) self.socket.listen(backlog) msg = 'sshim.Server listening on {}:{}'.\ format(self.socket.getsockname()[0], self.socket.getsockname()[1]) self.add_msg(msg) self.key = key or DEFAULT_KEY
def __init__(self): self.bid = { 'price': float(16), 'date': str('2015-01-05'), 'time': str(time.mktime(datetime.datetime.now().timetuple())), 'amount': int(10), 'towards': int(1), 'code': str('000001'), 'user': str('root'), 'strategy': str('example01'), 'status': '0x01', 'bid_model': 'strategy', 'amount_model': 'amount', 'order_id': str(random.random()) } # ???? ??/??/?? self.bid_queue = queue.Queue(maxsize=20)
def __init__(self, max_concurrent_batches=10, block_on_send=False, block_on_response=False): self.max_concurrent_batches = max_concurrent_batches self.block_on_send = block_on_send self.block_on_response = block_on_response session = requests.Session() session.headers.update({"User-Agent": "libhoney-py/"+VERSION}) self.session = session # libhoney adds events to the pending queue for us to send self.pending = queue.Queue(maxsize=1000) # we hand back responses from the API on the responses queue self.responses = queue.Queue(maxsize=2000) self.threads = [] for i in range(self.max_concurrent_batches): t = threading.Thread(target=self._sender) t.daemon = True t.start() self.threads.append(t)
def __init__(self, endpoint, project, token, api_base="api/v1", error_handler=None, log_batch_size=20): """Init the service class. Args: endpoint: endpoint of report portal service. project: project name to use for launch names. token: authorization token. api_base: defaults to api/v1, can be changed to other version. error_handler: function to be called to handle errors occurred during items processing (in thread) """ super(ReportPortalServiceAsync, self).__init__() self.error_handler = error_handler self.log_batch_size = log_batch_size self.rp_client = ReportPortalService( endpoint, project, token, api_base) self.log_batch = [] self.supported_methods = ["start_launch", "finish_launch", "start_test_item", "finish_test_item", "log"] self.queue = queue.Queue() self.listener = QueueListener(self.queue, self.process_item) self.listener.start()
def __init__(self, fps=22.4, step_mul=1, render_sync=False): """Create a renderer for use by humans. Make sure to call `init` with the game info, or just use `run`. Args: fps: How fast should the game be run. step_mul: How many game steps to take per observation. render_sync: Whether to wait for the obs to render before continuing. """ self._fps = fps self._step_mul = step_mul self._render_sync = render_sync self._obs_queue = queue.Queue() self._render_thread = threading.Thread(target=self.render_thread, name="Renderer") self._render_thread.start() self._game_times = collections.deque(maxlen=100) # Avg FPS over 100 frames. self._render_times = collections.deque(maxlen=100) self._last_time = time.time() self._last_game_loop = 0 self._name_lengths = {}
def _get_items(self): """Get multiple items from a Queue. Gets at least one (blocking) and at most ``max_items`` items (non-blocking) from a given Queue. Does not mark the items as done. :rtype: Sequence :returns: A sequence of items retrieved from the queue. """ items = [self._queue.get()] while len(items) < self._max_batch_size: try: items.append(self._queue.get_nowait()) except queue.Empty: break return items
def __init__(self, receive_port): """Receives health pills from a debugger and writes them to disk. Args: receive_port: The port at which to receive health pills from the TensorFlow debugger. always_flush: A boolean indicating whether the EventsWriter will be flushed after every write. Can be used for testing. """ super(InteractiveDebuggerDataServer, self).__init__( receive_port, InteractiveDebuggerDataStreamHandler) self._incoming_channel = queue.Queue() self._outgoing_channel = comm_channel_lib.CommChannel() self._run_states = RunStates(breakpoints_func=lambda: self.breakpoints) self._tensor_store = tensor_store_lib.TensorStore() curried_handler_constructor = functools.partial( InteractiveDebuggerDataStreamHandler, self._incoming_channel, self._outgoing_channel, self._run_states, self._tensor_store) grpc_debug_server.EventListenerBaseServicer.__init__( self, receive_port, curried_handler_constructor)
def test_retry_loop_does_not_retry_task(mock_retrying_executor): mock_event = _get_mock_event(is_terminal=True) mock_retrying_executor.stopping = True mock_retrying_executor._is_current_attempt = mock.Mock(return_value=True) mock_retrying_executor.retry = mock.Mock(return_value=False) mock_retrying_executor.retry_pred = mock.Mock(return_value=False) mock_retrying_executor.task_retries = mock_retrying_executor.\ task_retries.set(mock_event.task_id, 1) modified_task_id = mock_event.task_id + '-retry1' modified_mock_event = mock_event.set( 'task_id', modified_task_id ) mock_retrying_executor.src_queue = Queue() mock_retrying_executor.src_queue.put(modified_mock_event) mock_retrying_executor.retry_loop() assert mock_retrying_executor.dest_queue.qsize() == 1 assert len(mock_retrying_executor.task_retries) == 0
def __init__(self, downstream_executor, retry_pred=lambda e: not e.success, retries=3): self.executor = downstream_executor self.retries = retries self.retry_pred = retry_pred self.task_retries = m() self.task_retries_lock = Lock() self.src_queue = downstream_executor.get_event_queue() self.dest_queue = Queue() self.stopping = False self.retry_thread = Thread(target=self.retry_loop) self.retry_thread.daemon = True self.retry_thread.start()
def __init__(self, downstream_executor): self.downstream_executor = downstream_executor self.tasks_lock = Lock() # Tasks that are pending termination self.killed_tasks = [] # Tasks that are currently running self.running_tasks = [] self.src_queue = downstream_executor.get_event_queue() self.dest_queue = Queue() self.stopping = False self.timeout_thread = Thread(target=self.timeout_loop) self.timeout_thread.daemon = True self.timeout_thread.start()
def session(self): ''' Manage Nikon session with context manager. ''' # When raw device, do not perform if self.__no_polling: with super(Nikon, self).session(): yield return # Within a normal PTP session with super(Nikon, self).session(): # launch a polling thread self.__event_queue = Queue() self.__nikon_event_proc = Thread( name='NikonEvtPolling', target=self.__nikon_poll_events ) self.__nikon_event_proc.daemon = False atexit.register(self._nikon_shutdown) self.__nikon_event_proc.start() try: yield finally: self._nikon_shutdown()
def __init__(self, device=None): '''Instantiate the first available PTP device over IP''' self.__setup_constructors() logger.debug('Init IP') self.__dev = device if device is None: raise NotImplementedError( 'IP discovery not implemented. Please provide a device.' ) self.__device = device # Signal usable implicit session self.__implicit_session_open = Event() # Signal implicit session is shutting down self.__implicit_session_shutdown = Event() self.__check_session_lock = Lock() self.__transaction_lock = Lock() self.__event_queue = Queue() atexit.register(self._shutdown)
def __init__(self, fps, mod_config): self._env = Environment.get_instance() self.mod_config = mod_config self.fps = fps self.event_queue = Queue() self.before_trading_fire_date = datetime.date(2000, 1, 1) self.after_trading_fire_date = datetime.date(2000, 1, 1) self.settlement_fire_date = datetime.date(2000, 1, 1) if not mod_config.redis_uri: self.quotation_engine_thread = Thread(target=self.quotation_worker) self.quotation_engine_thread.daemon = True self.clock_engine_thread = Thread(target=self.clock_worker) self.clock_engine_thread.daemon = True
def __init__(self, predictors, batch_size=5): """ :param predictors: a list of OnlinePredictor""" assert len(predictors) for k in predictors: #assert isinstance(k, OnlinePredictor), type(k) # TODO use predictors.return_input here assert k.return_input == False self.input_queue = queue.Queue(maxsize=len(predictors)*100) self.threads = [ PredictorWorkerThread( self.input_queue, f, id, batch_size=batch_size) for id, f in enumerate(predictors)] if six.PY2: # TODO XXX set logging here to avoid affecting TF logging import tornado.options as options options.parse_command_line(['--logging=debug'])
def __call__(self, event): group = self._group_by(event) try: queue = self._queues[group] except KeyError: queue = six_queue.Queue(self._queue_depth) self._queues[group] = queue thread = self._thread_group.add_thread(self._run, group, queue) thread.link(self._done, group) queue.put(event)
def __init__(self, env, policy, num_local_steps, render=True): threading.Thread.__init__(self) self.queue = queue.Queue(5) self.num_local_steps = num_local_steps self.env = env self.last_features = None self.policy = policy self.daemon = True self.sess = None self.summary_writer = None self.render = render
def __init__(self, n_threads, queue_size=0): self._n_threads = n_threads self._queue = Queue.Queue(maxsize=queue_size) # 0 = infinite size self._error_queue = Queue.Queue(maxsize=queue_size) self._threads = () self._terminate = threading.Event() self._processed_lock = threading.Lock() self.processed = 0 self._inserted = 0 self.with_progress = None self.start_threads(n_threads)
def _consume_queue(self, terminate_evt): """ This is the main thread function that consumes functions that are inside the _queue object. To use, execute self._queue(fn), where fn is a function that performs some kind of network IO or otherwise benefits from threading and is independent. terminate_evt is automatically passed in on thread creation and is a common event for this generation of threads. The threads will terminate when the event is set and the queue burns down. Returns: void """ interface = self._initialize_interface() while not terminate_evt.is_set(): try: fn = self._queue.get(block=True, timeout=0.01) except Queue.Empty: continue # periodically check if the thread is supposed to die fn = partial(fn, interface) try: self._consume_queue_execution(fn) except Exception as err: self._error_queue.put(err) self._close_interface(interface)
def _check_errors(self): try: err = self._error_queue.get(block=False) self._error_queue.task_done() self.kill_threads() raise err except Queue.Empty: pass
def __init__(self): self.pool = Queue.Queue(maxsize=0) self.outstanding = 0 self._lock = threading.Lock() def handler(signum, frame): self.reset_pool() signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGTERM, handler)
def get_connection(self): with self._lock: try: conn = self.pool.get(block=False) self.pool.task_done() except Queue.Empty: conn = self._create_connection() finally: self.outstanding += 1 return conn
def test_init(config): q = queue.Queue() accumulator = RecordAccumulator(RawBuffer, config) client = mock.Mock() sender = Sender(queue=q, accumulator=accumulator, client=client, partitioner=partitioner) sender.start() sender.close() sender.join()