我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用threading.Semaphore()。
def get(self, name): """Gets (or creates) a semaphore with a given name. :param name: The semaphore name to get/create (used to associate previously created names with the same semaphore). Returns an newly constructed semaphore (or an existing one if it was already created for the given name). """ with self._lock: try: return self._semaphores[name] except KeyError: sem = threading.Semaphore() self._semaphores[name] = sem return sem
def connect(self, nvim, ui, profile=None, notify=False): """Connect nvim and the ui. This will start loops for handling the UI and nvim events while also synchronizing both. """ self._notify = notify self._error = None self._nvim = nvim self._ui = ui self._profile = profile self._sem = Semaphore(0) self.debug_events = len(os.environ.get("NVIM_PYTHON_UI_DEBUG", "")) > 0 t = Thread(target=self._nvim_event_loop) t.daemon = True t.start() self._ui_event_loop() if self._error: print(self._error) if self._profile: print(self._profile)
def __init__(self, num_threads, sleep=InterruptibleSleep): """Constructor for ThreadGate instances. Args: num_threads: The total number of threads using this gate. sleep: Used for dependency injection. """ self.__enabled_count = 1 self.__lock = threading.Lock() self.__thread_semaphore = threading.Semaphore(self.__enabled_count) self.__num_threads = num_threads self.__backoff_time = 0 self.__sleep = sleep
def __init__(self, config, tee, node_name, node_config): self._config = config self._tee = tee self.node_name = node_name self.node_config = node_config self._thread_limit = Semaphore(self._config.docker['thread_limit']) tls = False if self.node_config.get('tls'): tls = docker.tls.TLSConfig(**self.node_config['tls']) try: client_class = docker.APIClient except AttributeError: client_class = docker.Client self._tee('Node {}: Fallback to old docker-py Client.'.format(self.node_name)) self.client = client_class( base_url=self.node_config['base_url'], tls=tls, timeout=self._config.docker.get('api_timeout'), version='auto' )
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', converter=None, validate_certs=True, anon=False, security_token=None, profile_name=None): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint, connection_cls=STSConnection) self.region = region self.anon = anon self._mutex = threading.Semaphore() super(STSConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, validate_certs=validate_certs, security_token=security_token, profile_name=profile_name)
def test_direct_call(self): """Calls the activity directly.""" """Creates one trigger and an activity and triggers it.""" foo = TriggerBase() bas = [None] s = Semaphore(0) @activity(foo) async def bar(): bas[0] = "Triggered" s.release() asyncio.run_coroutine_threadsafe(bar(), self.loop) s.acquire() self.assertEqual(bas[0], "Triggered") # Activities outside of modules where only meant to be used during early stages of development. The are officially # not supported.
def test_parameters(self): """Triggers an activity and passes extra parameters.""" bas = [None] foo = TriggerBase() s = Semaphore(0) @activity(foo, "arg", k="kwarg") async def bar(p, k): bas[0] = p + k s.release() asyncio.run_coroutine_threadsafe(foo.trigger(), self.loop) assert s.acquire(timeout=0.1) self.assertEqual(bas[0], "argkwarg")
def test_simple_descriptor_trigger(self): class Foo(ModuleBase): def __init__(self, s): super().__init__() self.bar = None self.s = s trigger = DescriptorClassTrigger(TriggerBase) @activity(trigger) async def activity(self): self.bar = "qwertyuiop" s.release() s = Semaphore(0) foo = Foo(s) asyncio.run_coroutine_threadsafe(foo.trigger.trigger(), self.loop) self.assertTrue(s.acquire(timeout=0.1)) self.assertEqual(foo.bar, "qwertyuiop")
def test_clock(self): bas = [0] clk = Clock(100) s = Semaphore(0) @activity(clk) async def foo(): bas[0] += 1 if bas[0] >= 5: clk.stop() s.release() t0 = time() clk.start() self.assertTrue(s.acquire(timeout=0.1)) self.assertGreaterEqual(time() - t0, 0.05) self.assertEqual(bas[0], 5)
def connect(self, nvim, ui, profile=None, notify=False): """Connect nvim and the ui. This will start loops for handling the UI and nvim events while also synchronizing both. """ self._notify = notify self._error = None self._nvim = nvim self._ui = ui self._profile = profile self._sem = Semaphore(0) t = Thread(target=self._nvim_event_loop) t.daemon = True t.start() self._ui_event_loop() if self._error: print(self._error) if self._profile: print(self._profile)
def releaseDiskResource(ngamsCfgObj, slotId): """ Release a disk resource acquired with ngamsHighLevelLib.acquireDiskResource(). ngamsCfgObj: NG/AMS Configuration Object (ngamsConfig). slotId: Slot ID referring to the disk resource (string). Returns: Void. """ T = TRACE() storageSet = ngamsCfgObj.getStorageSetFromSlotId(slotId) if (not storageSet.getMutex()): return global _diskMutexSems if (not _diskMutexSems.has_key(slotId)): _diskMutexSems[slotId] = threading.Semaphore(1) logger.debug("Releasing disk resource with Slot ID: %s", slotId) _diskMutexSems[slotId].release()
def __init__(self, sparql_endpoint=None, thread_limiter=4): """Creates the dataset class The default endpoint is the original from wikidata. :param string sparql_endpoint: The URI of the SPARQL endpoint :param integer thread_limiter: The number of concurrent HTTP queries """ if sparql_endpoint is not None: self.SPARQL_ENDPOINT = sparql_endpoint self.th_semaphore = threading.Semaphore(thread_limiter) # self.query_sem = threading.Semaphore(thread_limiter) # Instanciate splited subs as false self.splited_subs = {'updated': False}
def __init__(self, *args, **kwds): # Setup the NCS object, containing mechanisms # for communicating between NCS and this User code. self._ncs = NcsPyVM(*args, **kwds) # Just checking if the NCS logging works... self.debug('Initalizing object') # Register our 'finish' callback self._finish_cb = lambda: self.finish() self._ncs.reg_finish(self._finish_cb) self.mypipe = os.pipe() self.waithere = threading.Semaphore(0) # Create as blocked # This method starts the user application in a thread
def StartPLC(self): if self.CurrentPLCFilename is not None and self.PLCStatus == "Stopped": c_argv = ctypes.c_char_p * len(self.argv) error = None res = self._startPLC(len(self.argv), c_argv(*self.argv)) if res == 0: self.PLCStatus = "Started" self.StatusChange() self.PythonRuntimeCall("start") self.StartSem = Semaphore(0) self.PythonThread = Thread(target=self.PythonThreadProc) self.PythonThread.start() self.StartSem.acquire() self.LogMessage("PLC started") else: self.LogMessage(0, _("Problem starting PLC : error %d" % res)) self.PLCStatus = "Broken" self.StatusChange()
def setUp(self): """Set up a TCP server to receive log messages, and a SocketHandler pointing to that server's address and port.""" BaseTest.setUp(self) self.server = server = self.server_class(self.address, self.handle_socket, 0.01) server.start() server.ready.wait() hcls = logging.handlers.SocketHandler if isinstance(server.server_address, tuple): self.sock_hdlr = hcls('localhost', server.port) else: self.sock_hdlr = hcls(server.server_address, None) self.log_output = '' self.root_logger.removeHandler(self.root_logger.handlers[0]) self.root_logger.addHandler(self.sock_hdlr) self.handled = threading.Semaphore(0)
def __init__(self, ip="0.0.0.0", port=3671, valueCache=None): """Initialize the connection to the given host/port Initialized the connection, but does not connect. """ self.remote_ip = ip self.remote_port = port self.discovery_port = None self.data_port = None self.connected = False self.result_queue = queue.Queue() self.ack_semaphore = threading.Semaphore(0) self.conn_state_ack_semaphore = threading.Semaphore(0) if valueCache is None: self.value_cache = ValueCache() else: self.value_cache = valueCache self.connection_state = 0 self.keepalive_thread = threading.Thread(target=self.keepalive, args=()) self.keepalive_thread.daemon = True self.keepalive_thread.start() self._lock = threading.Lock() self._write_delay = 0.05
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', converter=None, validate_certs=True): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint, connection_cls=STSConnection) self.region = region self._mutex = threading.Semaphore() AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, validate_certs=validate_certs)
def myrunner(func): sem = td.Semaphore(config.num_thread) def wrapper(i): sem.acquire() try: func(i) except Exception as e: raise finally: sem.release() ts = [] for i in range(10): t = td.Thread(target=wrapper, args=(i,)) t.start() ts.append(t) for t in ts: t.join()
def __init__(self, config): self.config = config self.max_fetches = Semaphore(self.config.opt_dict['max_simultaneous_fetches']) self.count_lock = RLock() self.progress_counter = 0 self.channel_counters = {} self.source_counters = {} self.source_counters['total'] = {} self.raw_json = {} self.cache_id = self.config.cache_id self.json_id = self.config.json_id self.ttvdb1_id = self.config.ttvdb1_id self.ttvdb2_id = self.config.ttvdb2_id self.imdb3_id = self.config.imdb3_id # end init()
def __init__(self, session, cache, heuristic, transform=None, limiter=None, max_inflight=0): """ :param session: requests session to use :param cache: cache to use :param heuristic: function that accepts a partially constructed Response object (with only `expiry` set to `None`) and returns the number of seconds this data will be fresh for. :param transform: function that accepts a partially constructed Response object (with `expiry` and `transformed` still set to `None`) and returns any object to represent this data, which may be used to determine the result's lifetime :param limiter: This object is called once every time the network is accessed. Any returned data is discarded. """ self.session = session self.cache = cache self.heuristic = heuristic self.transform = transform or (lambda x: None) self.limiter = limiter or (lambda: None) if max_inflight > 0: self.inflight = Semaphore(max_inflight) else: self.inflight = None
def __init__(self, radio, parent=None): threading.Thread.__init__(self) gobject.GObject.__init__(self) self.__queue = {} if parent: self.__runlock = parent._get_run_lock() self.status = lambda msg: parent.status(msg) else: self.__runlock = threading.Lock() self.status = self._status self.__counter = threading.Semaphore(0) self.__lock = threading.Lock() self.__enabled = True self.radio = radio
def __init__(self, size): self.__pool = threading.Semaphore(size) self.__threads = []
def __init__(self, n): self.n = n self.count = 0 self.mutex = Semaphore(1) self.barrier = Semaphore(0)
def test_timer_is_well_created_and_delayed(self): sem = threading.Semaphore(1) def delayed(): sem.release() lock_time = time.time() sem.acquire() caduc.timer.Timer(1, delayed).start() sem.acquire() release_time = time.time() (release_time-lock_time).should.be.eql(1., epsilon=0.1)
def setUp(self): self.faker = faker.Faker() self.semaphore = mock.Mock() self.original_semaphore = threading.Semaphore threading.Semaphore = mock.MagicMock(return_value=self.semaphore) self.semaphore.reset_mock()
def tearDown(self): threading.Semaphore = self.original_semaphore
def test_init_default_count(self): ClientSemaphore() threading.Semaphore.assert_called_once_with(5)
def test_init_provided_count(self): threading.Semaphore = mock.MagicMock() ClientSemaphore(10) threading.Semaphore.assert_called_once_with(10)
def wait_for_property(self, name, cond=lambda val: val, level_sensitive=True): sema = threading.Semaphore(value=0) def observer(val): if cond(val): sema.release() self.observe_property(name, observer) if not level_sensitive or not cond(getattr(self, name.replace('-', '_'))): sema.acquire() self.unobserve_property(name, observer)
def __init__(self): self.mutex = threading.RLock() self.can_read = threading.Semaphore(0) self.can_write = threading.Semaphore(0) self.active_readers = 0 self.active_writers = 0 self.waiting_readers = 0 self.waiting_writers = 0
def __init__(self): self._profile = None self._lstVdfProfiles = self.builder.get_object("tvVdfProfiles").get_model() self._q_games = collections.deque() self._q_profiles = collections.deque() self._s_games = threading.Semaphore(0) self._s_profiles = threading.Semaphore(0) self._lock = threading.Lock() self.__profile_load_started = False self._on_preload_finished = None
def __init__(self, training_label_prefix, dataset_name=None, epochs=None, time_limit=None, num_gpus=None): if not ((epochs is None) ^ (time_limit is None)): raise ValueError('epochs or time_limit must present, ' 'but not both!') self._training_label_prefix = training_label_prefix self._dataset_name = dataset_name or active_config().dataset_name self._validate_training_label_prefix() self._epochs = epochs self._time_limit = time_limit fixed_config_keys = dict(dataset_name=self._dataset_name, epochs=self._epochs, time_limit=self._time_limit) self._config_builder = Embed300FineRandomConfigBuilder( fixed_config_keys) try: self._num_gpus = len(sh.nvidia_smi('-L').split('\n')) - 1 except sh.CommandNotFound: self._num_gpus = 1 self._num_gpus = num_gpus or self._num_gpus # TODO ! Replace set with a thread-safe set self._available_gpus = set(range(self.num_gpus)) self._semaphore = Semaphore(self.num_gpus) self._running_commands = [] # a list of (index, sh.RunningCommand) self._stop_search = False self._lock = Lock()
def main(training_label_prefix, dataset_name=None, epochs=None, time_limit=None, num_gpus=None): epochs = int(epochs) if epochs else None time_limit = parse_timedelta(time_limit) if time_limit else None num_gpus = int(num_gpus) if num_gpus else None search = HyperparamSearch(training_label_prefix=training_label_prefix, dataset_name=dataset_name, epochs=epochs, time_limit=time_limit, num_gpus=num_gpus) def handler(signum, frame): logging('Stopping hyperparam search..') with search.lock: search.stop() for index, running_command in search.running_commands: try: label = search.training_label(index) logging('Sending SIGINT to {}..'.format(label)) running_command.signal(signal.SIGINT) except OSError: # The process might have exited before logging('{} might have terminated before.'.format(label)) except: traceback.print_exc(file=sys.stderr) logging('All training processes have been sent SIGINT.') signal.signal(signal.SIGINT, handler) # We need to execute search.run() in another thread in order for Semaphore # inside it doesn't block the signal handler. Otherwise, the signal handler # will be executed after any training process finishes the whole epoch. executor = ThreadPoolExecutor(max_workers=1) executor.submit(search.run) # wait must be True in order for the mock works, # see the unit test for more details executor.shutdown(wait=True)
def __init__(self): self.tasks_by_path = {} self.queued_tasks = [] self.semaphore = threading.Semaphore(0) self._lock = threading.Lock()
def __init__(self, timeFrame=1.0, callLimit=6): """ timeFrame = float time in secs [default = 1.0] callLimit = int max amount of calls per 'timeFrame' [default = 6] """ self.timeFrame = timeFrame self.semaphore = Semaphore(callLimit)
def __init__(self, count): """A semaphore for the purpose of limiting the number of tasks :param count: The size of semaphore """ self._semaphore = threading.Semaphore(count)
def __init__(self, model_base_dir, num_parallel_predictions=2): possible_dirs = os.listdir(model_base_dir) model_dir = os.path.join(model_base_dir, max(possible_dirs)) print("Loading {}".format(model_dir)) self.sess = tf.get_default_session() loaded_model = tf.saved_model.loader.load(self.sess, ['serve'], model_dir) assert 'serving_default' in list(loaded_model.signature_def) input_dict, output_dict = _signature_def_to_tensors(loaded_model.signature_def['serving_default']) self._input_tensor = input_dict['images'] self._output_dict = output_dict self.sema = Semaphore(num_parallel_predictions)
def testMultiPythonThread(self): import time, threading class Global: count = 0 started = threading.Event() finished = threading.Semaphore(0) def sleep(self, ms): time.sleep(ms / 1000.0) self.count += 1 g = Global() def run(): with JSContext(g) as ctxt: ctxt.eval(""" started.wait(); for (i=0; i<10; i++) { sleep(100); } finished.release(); """) threading.Thread(target=run).start() now = time.time() self.assertEqual(0, g.count) g.started.set() g.finished.acquire() self.assertEqual(10, g.count) self.assertTrue((time.time() - now) >= 1)