我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用threading.BoundedSemaphore()。
def __init__(self, queue, num_threads, timeout=0): """ Init thread worker :param Queue.Queue queue: simple queue object :param int num_threads: threads numbers :param int timeout: delay timeout """ super(Worker, self).__init__() self.__semaphore = BoundedSemaphore(num_threads) self.__event = Event() self.__event.set() self.__empty = False self.__running = True self.__queue = queue self.__timeout = timeout self.counter = 0
def main(): global semaphore, sends signal.signal(signal.SIGINT, interrupt_handler) args = commandline() print(' ( Shell:{shell}, Numbers:{max_request}, Threads:{max_threads}, Retry:{max_retry} )\n'.format(**args.__dict__)) semaphore = BoundedSemaphore(value=args.max_threads) stopwatch_start = time.time() for i, payload in enumerate(create_payload(args), 1): if attack: sends = i semaphore.acquire() t = Thread(target=crack, args=(i, args, payload)) t.setDaemon(True) t.start() for _ in range(args.max_threads): semaphore.acquire() stopwatch = time.time() - stopwatch_start words = args.max_request * sends if sends else pwd_total speed = words / stopwatch if stopwatch else 0 msg = '[Success] Password: {}'.format(pwd) if pwd else '[Failed] No password found' print('\n\n{msg}\n[Finish] {words} words in {stopwatch:.3f} seconds. ({speed:.0f} w/s)'.format(**locals()))
def test_BoundedSemaphore_limit(self): # BoundedSemaphore should raise ValueError if released too often. for limit in range(1, 10): bs = threading.BoundedSemaphore(limit) threads = [threading.Thread(target=bs.acquire) for _ in range(limit)] for t in threads: t.start() for t in threads: t.join() threads = [threading.Thread(target=bs.release) for _ in range(limit)] for t in threads: t.start() for t in threads: t.join() self.assertRaises(ValueError, bs.release)
def transform(self, docs, buffer_size=100): args = shlex.split(self.RUN_TAGGER_CMD) + ['--output-format', 'conll'] proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) buffer_sema = threading.BoundedSemaphore(buffer_size) t = threading.Thread(target=self._write_input, args=(docs, proc, buffer_sema)) t.start() while True: # reading can only follow writing unless EOF is reached so buffer_sema >= 0 res = [] while True: line = proc.stdout.readline().decode('utf-8').rstrip() if line == '': break word, tag, confidence = line.split('\t') res.append((word, tag, float(confidence))) if not res: break yield res buffer_sema.release() t.join()
def setUp(self): """Set up. Creates a FirewallEnforcer object with current and expected rules set to an empty FirewallRules object. """ self.gce_service = mock.MagicMock() self.firewall_api = fe.ComputeFirewallAPI( self.gce_service, dry_run=True) self.expected_rules = fe.FirewallRules(constants.TEST_PROJECT) self.current_rules = fe.FirewallRules(constants.TEST_PROJECT) self.project_sema = threading.BoundedSemaphore(value=1) self.operation_sema = threading.BoundedSemaphore(value=5) self.enforcer = fe.FirewallEnforcer( constants.TEST_PROJECT, self.firewall_api, self.expected_rules, self.current_rules, self.project_sema, self.operation_sema)
def __init__(self, global_configs, rule_defs=None, snapshot_timestamp=None): """Initialization. Args: global_configs (dict): Global configurations. rule_defs (list): IAP rule definition dicts snapshot_timestamp (int): Snapshot timestamp. """ super(IapRuleBook, self).__init__() self._rules_sema = threading.BoundedSemaphore(value=1) self.resource_rules_map = {} if not rule_defs: self.rule_defs = {} else: self.rule_defs = rule_defs self.add_rules(rule_defs) self.snapshot_timestamp = snapshot_timestamp self.org_res_rel_dao = org_resource_rel_dao.OrgResourceRelDao( global_configs) self.project_dao = project_dao.ProjectDao(global_configs)
def main_threaded(iniconfig): semaphore = BoundedSemaphore(CONCURRENCY_LIMIT) tasks = [] for appid in iniconfig: section = iniconfig[appid] task = Thread(target=checker, args=(section, appid, semaphore)) tasks.append(task) task.start() try: for t in tasks: t.join() except KeyboardInterrupt: for t in tasks: if hasattr(t, 'terminate'): # multiprocessing t.terminate() print 'Validation aborted.' sys.exit(1)
def __init__(self,genreq,threads=20): self.genReq=genreq self.results=[] self.threads=threads self.run=True self.threads_list=[] self.nres=0 self.mutex=1 self.Semaphore_Mutex=threading.BoundedSemaphore(value=self.mutex)
def __init__(self,req): self.req=req self.MD5Orig="" self.origWords=False self.dynamics=[] self.injResults=[] self.fingerResults=[] self.threads=1 self.threaded=False self.semMUTEX=threading.BoundedSemaphore(value=1)
def setThreaded(self,THREADS): self.threaded=True self.nthreads=THREADS self.semTHREADS=threading.BoundedSemaphore(value=THREADS)
def __init__ (self): threading.Thread.__init__(self) Attacker.__Semaphore_Threads=threading.BoundedSemaphore(value=Attacker.__Threads)
def setThreads(n): Attacker.__Threads=n Attacker.__Semaphore_Threads=threading.BoundedSemaphore(value=Attacker.__Threads)
def __init__(self, limit=10): self.limit = limit self.counter = threading.BoundedSemaphore(value=limit) self.count = 0 # Start time self.start = time.time() # Image saving rate self.rate = 0
def acquire_n(self, value=1, blocking=True, timeout=None): """ Acquire ``value`` number of tokens at once. The parameters ``blocking`` and ``timeout`` have the same semantics as :class:`BoundedSemaphore`. :returns: The same value as the last call to `BoundedSemaphore`'s :meth:`acquire` if :meth:`acquire` were called ``value`` times instead of the call to this method. """ ret = None for _ in range(value): ret = self.acquire(blocking=blocking, timeout=timeout) return ret
def release_n(self, value=1): """ Release ``value`` number of tokens at once. :returns: The same value as the last call to `BoundedSemaphore`'s :meth:`release` if :meth:`release` were called ``value`` times instead of the call to this method. """ ret = None for _ in range(value): ret = self.release() return ret
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True): subdomains = subdomains or [] base_url = 'https://dnsdumpster.com/' self.live_subdomains = [] self.engine_name = "DNSdumpster" self.threads = 70 self.lock = threading.BoundedSemaphore(value=self.threads) self.q = q super(DNSdumpster, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose) return
def __init__(self, subdomains, ports): self.subdomains = subdomains self.ports = ports self.threads = 20 self.lock = threading.BoundedSemaphore(value=self.threads)
def test_various_ops(self): # This takes about n/3 seconds to run (about n/3 clumps of tasks, # times about 1 second per clump). NUMTASKS = 10 # no more than 3 of the 10 can run at once sema = threading.BoundedSemaphore(value=3) mutex = threading.RLock() numrunning = Counter() threads = [] for i in range(NUMTASKS): t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning) threads.append(t) self.assertEqual(t.ident, None) self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t))) t.start() if verbose: print('waiting for all tasks to complete') for t in threads: t.join(NUMTASKS) self.assertTrue(not t.is_alive()) self.assertNotEqual(t.ident, 0) self.assertFalse(t.ident is None) self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>', repr(t))) if verbose: print('all tasks done') self.assertEqual(numrunning.get(), 0)
def testWithBoundedSemaphore(self): lock = threading.BoundedSemaphore() def locked(): if lock.acquire(False): lock.release() return False else: return True self.boilerPlate(lock, locked)
def cracking_threads(fn, port, config): global FOUND global THREADS th = [] sem = threading.BoundedSemaphore(config.concurrency) with open(config.wordlist, "r") as f: for i, password in enumerate(f.readlines()): password = password.replace("\n", "") # log.debug(" -- Testing '%s'" % password) if FOUND is not None: break # Launch password t = threading.Thread(target=find_password_sem, args=(fn, sem, config.target, port, config.user, password, None, )) th.append(t) sem.acquire() t.start() if (i % 500) == 0: log.info(" >> %s passwords tested" % i) # Wait for ending for x in th: x.join() if FOUND is not None: log.error(" - Password found: %s" % FOUND) # ----------------------------------------------------------------------
def test_various_ops(self): # This takes about n/3 seconds to run (about n/3 clumps of tasks, # times about 1 second per clump). NUMTASKS = 10 # no more than 3 of the 10 can run at once sema = threading.BoundedSemaphore(value=3) mutex = threading.RLock() numrunning = Counter() threads = [] for i in range(NUMTASKS): t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning) threads.append(t) self.assertIsNone(t.ident) self.assertRegexpMatches(repr(t), r'^<TestThread\(.*, initial\)>$') t.start() if verbose: print 'waiting for all tasks to complete' for t in threads: t.join(NUMTASKS) self.assertFalse(t.is_alive()) self.assertNotEqual(t.ident, 0) self.assertIsNotNone(t.ident) self.assertRegexpMatches(repr(t), r'^<TestThread\(.*, \w+ -?\d+\)>$') if verbose: print 'all tasks done' self.assertEqual(numrunning.get(), 0)
def testWithBoundedSemaphore(self): lock = threading.BoundedSemaphore() def locked(): if lock.acquire(False): lock.release() return False else: return True self.boilerPlate(lock, locked) # This is needed to make the test actually run under regrtest.py!
def __init__(self, domain, subdomains=None, q=None, lock=threading.Lock()): subdomains = subdomains or [] self.base_url = 'https://dnsdumpster.com/' self.domain = urlparse.urlparse(domain).netloc self.subdomains = [] self.live_subdomains = [] self.session = requests.Session() self.engine_name = "DNSdumpster" multiprocessing.Process.__init__(self) self.threads = 70 self.lock = threading.BoundedSemaphore(value=self.threads) self.q = q self.timeout = 25 self.print_banner() return
def __init__(self,subdomains,ports): self.subdomains = subdomains self.ports = ports self.threads = 20 self.lock = threading.BoundedSemaphore(value=self.threads)
def __init__(self, initial=1, period=1, amount=1): super(RateLimitBucket, self).__init__() self.semaphore = threading.BoundedSemaphore(initial) self.amount = amount self.period = period self._stop_event = threading.Event()
def test_various_ops(self): # This takes about n/3 seconds to run (about n/3 clumps of tasks, # times about 1 second per clump). NUMTASKS = 10 # no more than 3 of the 10 can run at once sema = threading.BoundedSemaphore(value=3) mutex = threading.RLock() numrunning = Counter() threads = [] for i in range(NUMTASKS): t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning) threads.append(t) self.assertEqual(t.ident, None) self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t))) t.start() if verbose: print 'waiting for all tasks to complete' for t in threads: t.join(NUMTASKS) self.assertTrue(not t.is_alive()) self.assertNotEqual(t.ident, 0) self.assertFalse(t.ident is None) self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t))) if verbose: print 'all tasks done' self.assertEqual(numrunning.get(), 0)
def test_apply_change_lots_of_rules(self): """Changing more rules than permitted by the operation semaphore works. Setup: * Create a new bounded semaphore with a limit of 2 operations. * Create a list of 10 rules to insert. * Run _apply_change. Expected Results: * All rules end up in the successes list. """ insert_function = self.firewall_api.insert_firewall_rule self.enforcer.operation_sema = threading.BoundedSemaphore(value=2) test_rule_name = 'test-network-allow-internal-0' test_rule = constants.EXPECTED_FIREWALL_RULES[test_rule_name] test_rules = [] for i in xrange(10): rule = copy.deepcopy(test_rule) rule['name'] = '%s-%i' % (test_rule_name, i) test_rules.append(rule) (successes, failures, change_errors) = self.enforcer._apply_change( insert_function, test_rules) self.assertSameStructure(test_rules, successes) self.assertListEqual([], failures) self.assertListEqual([], change_errors)
def __init__(self, global_configs=None, dry_run=False, concurrent_workers=1, project_sema=None, max_running_operations=0): """Initialize. Args: global_configs (dict): Global configurations. dry_run (bool): If True, will simply log what action would have been taken without actually applying any modifications. concurrent_workers (int): The number of parallel enforcement threads to execute. project_sema (threading.BoundedSemaphore): An optional semaphore object, used to limit the number of concurrent projects getting written to. max_running_operations (int): Used to limit the number of concurrent write operations on a single project's firewall rules. Set to 0 to allow unlimited in flight asynchronous operations. """ self.global_configs = global_configs self.enforcement_log = enforcer_log_pb2.EnforcerLog() self._dry_run = dry_run self._concurrent_workers = concurrent_workers self._project_sema = project_sema self._max_running_operations = max_running_operations self._local = LOCAL_THREAD
def initialize_batch_enforcer(global_configs, concurrent_threads, max_write_threads, max_running_operations, dry_run): """Initialize and return a BatchFirewallEnforcer object. Args: global_configs (dict): Global configurations. concurrent_threads: The number of parallel enforcement threads to execute. max_write_threads: The maximum number of enforcement threads that can be actively updating project firewalls. max_running_operations: The maximum number of write operations per enforcement thread. dry_run: If True, will simply log what action would have been taken without actually applying any modifications. Returns: A BatchFirewallEnforcer instance. """ if max_write_threads: project_sema = threading.BoundedSemaphore(value=max_write_threads) else: project_sema = None enforcer = batch_enforcer.BatchFirewallEnforcer( global_configs=global_configs, dry_run=dry_run, concurrent_workers=concurrent_threads, project_sema=project_sema, max_running_operations=max_running_operations) return enforcer
def test_various_ops(self): # This takes about n/3 seconds to run (about n/3 clumps of tasks, # times about 1 second per clump). NUMTASKS = 10 # no more than 3 of the 10 can run at once sema = threading.BoundedSemaphore(value=3) mutex = threading.RLock() numrunning = Counter() threads = [] for i in range(NUMTASKS): t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning) threads.append(t) self.assertEqual(t.ident, None) self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t))) t.start() if verbose: print('waiting for all tasks to complete') for t in threads: t.join() self.assertTrue(not t.is_alive()) self.assertNotEqual(t.ident, 0) self.assertFalse(t.ident is None) self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>', repr(t))) if verbose: print('all tasks done') self.assertEqual(numrunning.get(), 0)
def __init__(self, max_threads=3): # priority queue picks things with a lesser value first self._submission_queue = queue.PriorityQueue() self._submission_set = set() self._threads_semaphore = threading.BoundedSemaphore(max_threads) self._thread = threading.Thread(None, self.__run, 'ExecutorQueue.__run') self._thread.daemon = True self._thread.start()