我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用threading.enumerate()。
def print_all_stacktraces(): print("\n*** STACKTRACE - START ***\n") code = [] for threadId, stack in sys._current_frames().items(): threadName = '' for t in threading.enumerate(): if t.ident == threadId: threadName = t.name code.append("\n# ThreadID: %s %s" % (threadId, threadName)) for filename, lineno, name, line in traceback.extract_stack(stack): code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) for line in code: print(line) print("\n*** STACKTRACE - END ***\n")
def worker_int(worker): worker.log.info("worker received INT or QUIT signal") ## get traceback info import threading, sys, traceback id2name = dict([(th.ident, th.name) for th in threading.enumerate()]) code = [] for threadId, stack in sys._current_frames().items(): code.append("\n# Thread: %s(%d)" % (id2name.get(threadId,""), threadId)) for filename, lineno, name, line in traceback.extract_stack(stack): code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) worker.log.debug("\n".join(code))
def UpDataShare(): thread = [] MaxThread = 3 num=0 code = Tools().GetShareCode() for x in code: y = threading.Thread(target=ChildThead, args=(x,)) thread.append(y) try: for t in tqdm(thread): t.start() while True: time.sleep(0.05) if len(threading.enumerate()) < MaxThread: if len(code) - num < 13: t.join() num = num + 1 break except: print "1223"
def shutdown_system(): worker_thread.stop_all_threads() _thread.interrupt_main() current_pid = funcs.process__get_current_pid() matched = False for hwnd in funcs.window__find_handles(): pid = funcs.window__get_process_id(hwnd) if pid == current_pid: print("Sending PostMessage to hwnd owned by {0}".format(pid)) funcs.window__post_message(hwnd, WM_QUIT, 0, 0) matched = True # Continue in case there are more windows we own if not matched: time.sleep(0.1) # print("DEBUG could not find a window to post a quit to. Forcing quit.") # for t in threading.enumerate(): # print("Running thread: {0}".format(t)) sys.exit()
def stats_handler(self, candidate, stats, message): now = int(time.time()) print '@%d' % now, message.candidate.get_member().mid.encode('hex'), json.dumps(stats) candidate_mid = candidate.get_member().mid stats = self.preprocess_stats(stats) stats['time'] = now stats_old = self.crawl_message.get(candidate_mid, None) self.crawl_message[candidate_mid] = stats if stats_old is None: return time_dif = float(stats['uptime'] - stats_old['uptime']) if time_dif > 0: for index, key in enumerate(['bytes_orig', 'bytes_exit', 'bytes_relay']): self.current_stats[index] = self.current_stats[index] * 0.875 + \ (((stats[key] - stats_old[key]) / time_dif) / 1024) * 0.125
def _multi_thread_download(url, file_name, file_size, thread_count): import threading fp = open(file_name, "wb") fp.truncate(file_size) fp.close() part = file_size // thread_count for i in range(thread_count): start = part * i if i == thread_count - 1: end = file_size else: end = start + part t = threading.Thread(target=_downloader, kwargs={'start': start, 'end': end, 'url': url, 'filename': file_name}) t.setDaemon(True) t.start() main_thread = threading.current_thread() for t in threading.enumerate(): if t is main_thread: continue t.join() return file_name
def __init__(self, token, url="https://listener.logz.io:8071", logs_drain_timeout=5, debug=False): self.token = token self.url = "{0}/?token={1}".format(url, token) self.logs_drain_timeout = logs_drain_timeout self.debug = debug # Function to see if the main thread is alive self.is_main_thread_active = lambda: any((i.name == "MainThread") and i.is_alive() for i in enumerate()) # Create a queue to hold logs self.queue = queue.Queue() self.sending_thread = Thread(target=self._drain_queue) self.sending_thread.daemon = False self.sending_thread.name = "logzio-sending-thread" self.sending_thread.start()
def test_thread_leak(self): # The lock shouldn't leak a Thread instance when used from a foreign # (non-threading) thread. lock = self.locktype() def f(): lock.acquire() lock.release() n = len(threading.enumerate()) # We run many threads in the hope that existing threads ids won't # be recycled. Bunch(f, 15).wait_for_finished() if len(threading.enumerate()) != n: # There is a small window during which a Thread instance's # target function has finished running, but the Thread is still # alive and registered. Avoid spurious failures by waiting a # bit more (seen on a buildbot). time.sleep(0.4) self.assertEqual(n, len(threading.enumerate()))
def test_enumerate_after_join(self): # Try hard to trigger #1703448: a thread is still returned in # threading.enumerate() after it has been join()ed. enum = threading.enumerate newgil = hasattr(sys, 'getswitchinterval') if newgil: geti, seti = sys.getswitchinterval, sys.setswitchinterval else: geti, seti = sys.getcheckinterval, sys.setcheckinterval old_interval = geti() try: for i in range(1, 100): seti(i * 0.0002 if newgil else i // 5) t = threading.Thread(target=lambda: None) t.start() t.join() l = enum() self.assertNotIn(t, l, "#1703448 triggered after %d trials: %s" % (i, l)) finally: seti(old_interval)
def scan(self): self.result = [] _gate_way = '.'.join(self.current_ip.split('.')[:3]) # gate_way = _gate_way+'.1' # pyflakes says this is not used if self.alert: console.show_activity('Scanning.....') for x in range(1, 256): ip = '{}.{}'.format(_gate_way, x) self.thread_limit.acquire() threading.Thread(target=self.pscan, args=(ip, self.port), name='PortScanner').start() thread_list = [x for x in threading.enumerate() if x.name == 'PortScanner'] for _ in thread_list: _.join() if self.alert: if self.result: console.hud_alert(' '.join(self.result), 'success', 1) else: console.hud_alert('Not found', 'error', 1) console.hide_activity() return self.result
def sigquit_handler(sig, frame): """Helps debug deadlocks by printing stacktraces when this gets a SIGQUIT e.g. kill -s QUIT <PID> or CTRL+\ """ print("Dumping stack traces for all threads in PID {}".format(os.getpid())) id_to_name = dict([(th.ident, th.name) for th in threading.enumerate()]) code = [] for thread_id, stack in sys._current_frames().items(): code.append("\n# Thread: {}({})" .format(id_to_name.get(thread_id, ""), thread_id)) for filename, line_number, name, line in traceback.extract_stack(stack): code.append('File: "{}", line {}, in {}' .format(filename, line_number, name)) if line: code.append(" {}".format(line.strip())) print("\n".join(code))
def clear(args): logging.basicConfig( level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT) dags = get_dags(args) if args.task_regex: for idx, dag in enumerate(dags): dags[idx] = dag.sub_dag( task_regex=args.task_regex, include_downstream=args.downstream, include_upstream=args.upstream) DAG.clear_dags( dags, start_date=args.start_date, end_date=args.end_date, only_failed=args.only_failed, only_running=args.only_running, confirm_prompt=not args.no_confirm, include_subdags=not args.exclude_subdags)
def setUp(self): super(PowerControlTestCase, self).setUp() PowerControlManager.check_device_availability = mock.Mock() self.threads_at_start = set(threading.enumerate()) self.power_manager = PowerControlManager() monitor_daemon = PowerMonitorDaemon(self.power_manager) class MonitorDaemonThread(threading.Thread): def run(self): monitor_daemon.run() def stop(self): monitor_daemon.stop() monitor_daemon.join() self.md_thread = MonitorDaemonThread() self.md_thread.start() self.fence_type = PowerControlType.objects.create(agent = 'fake_agent', default_username = 'fake', default_password = 'fake')
def test_enumerate_after_join(self): # Try hard to trigger #1703448: a thread is still returned in # threading.enumerate() after it has been join()ed. enum = threading.enumerate old_interval = sys.getcheckinterval() try: for i in xrange(1, 100): # Try a couple times at each thread-switching interval # to get more interleavings. sys.setcheckinterval(i // 5) t = threading.Thread(target=lambda: None) t.start() t.join() l = enum() self.assertNotIn(t, l, "#1703448 triggered after %d trials: %s" % (i, l)) finally: sys.setcheckinterval(old_interval)
def test_BaseStream_connect(): event = threading.Event() def dummy_func(): while not event.is_set(): time.sleep(1.) base = BaseStream() n_threads_0 = threading.active_count() base.connect(dummy_func, "TEST") n_threads_1 = threading.active_count() # Check that a thread was started. assert n_threads_1 - n_threads_0 == 1, "Thread not started." # Check that the thread was created and named properly. name = [t.getName() for t in threading.enumerate() if t.getName() == "TEST"] assert name[0] == "TEST", "Thread not named properly." # Check that connect method only allows one connection. with pytest.raises(RuntimeError): base.connect(dummy_func, "SECOND_TEST") # Clean up. event.set()
def OnClickStopAction(self, event): if self.pipeline_started == True: if self.citer_flow[1] == 1: self.sequence_timer.Start(1000) self.ancestor.GetPage(4).data_poll_timer.Start(1000) if self.total_iter > 0: self.ancestor.GetPage(2).data_poll_timer.Start(1000) if self.citer_flow[1] < 2: self.citer_flow[1] = 2 self.pipeline_started = False self.button_pause.SetBitmapLabel(getpause48Bitmap()) self.ancestor.GetPage(1).button_pause.SetBitmapLabel(getpauseBitmap()) self.citer_flow[3] = 0 self.citer_flow[4] = 0 self.citer_flow[5] = 0 def ThreadClean(self): while len(enumerate()) > 2: sleep(0.1) wx.CallAfter(self.OnClickFinal,) self.thread = threading.Thread(target=ThreadClean, args=(self,)) self.thread.daemon = True self.thread.start()
def __quit(self): # End program logging.info("User pressed ''quit'' button - now halting threads") # Close threads running for signal display and processing self.signalDisplayInstance.closeThreads() logging.info("Signal display thread was closed") # If camera connection is active, close it self.cameraInstance.close_camera_thread() logging.info("Camera capture thread was closed") # Close GUI self.root.quit() logging.info("Tk mainloop() was halted") # Debug: Store all still running threads logging.debug(threading.enumerate()) # Exit program if settings.determine_if_under_testing() is False: logging.info("Program will halt now...") sys.exit()
def show_threads(): """ Log the name, ident and daemon flag of all alive threads in DEBUG level """ if logger.isEnabledFor(logging.DEBUG): all_threads = threading.enumerate() max_name = reduce(max, map(len, [t.name for t in all_threads])) max_ident = reduce(max, map(int, map(math.ceil, map(math.log10, [t.ident for t in all_threads if t.ident is not None])))) msg = ['Name' + ' '*(max_name-2) + 'Ident' + ' '*(max_ident-3) + 'Daemon', '='*max_name + ' ' + '=' * max_ident + ' ======'] fmt = '%{0}.{0}s %{1}d %d'.format(max_name, max_ident) for t in threading.enumerate(): msg.append(fmt % (t.name, t.ident, t.daemon)) logger.debug("Threads currently alive on process %d:\n%s", os.getpid(), '\n'.join(msg))
def test_enumerate_after_join(self): # Try hard to trigger #1703448: a thread is still returned in # threading.enumerate() after it has been join()ed. enum = threading.enumerate old_interval = sys.getswitchinterval() try: for i in range(1, 100): sys.setswitchinterval(i * 0.0002) t = threading.Thread(target=lambda: None) t.start() t.join() l = enum() self.assertNotIn(t, l, "#1703448 triggered after %d trials: %s" % (i, l)) finally: sys.setswitchinterval(old_interval)
def enable_tracing(self): """ Enable tracing if it is disabled and debugged program is running, else do nothing. Do this on all threads but the debugger thread. :return: True if tracing has been enabled, False else. """ _logger.x_debug("enable_tracing()") #self.dump_tracing_state("before enable_tracing()") if not self.tracing_enabled and self.execution_started: # Restore or set trace function on all existing frames appart from # debugger threading.settrace(self._tracer) # then enable on all threads to come for thr in threading.enumerate(): if thr.ident != self.debugger_thread_ident: # skip debugger thread a_frame = sys._current_frames()[thr.ident] while a_frame: a_frame.f_trace = self._tracer a_frame = a_frame.f_back iksettrace._set_trace_on(self._tracer, self.debugger_thread_ident) self.tracing_enabled = True #self.dump_tracing_state("after enable_tracing()") return self.tracing_enabled
def list_threads(self, txt): cp_threads = 0 http_threads = 0 for thread in threading.enumerate(): if thread.name.find("CP Server") == 0: cp_threads += 1 if thread.name.find("HTTPServer") == 0: http_threads +=1 self._logger.info("list_threads: {} - Number of Threads: {} (CP Server={}, HTTPServer={}".format(txt, threading.activeCount(), cp_threads, http_threads)) for thread in threading.enumerate(): if thread.name.find("CP Server") != 0 and thread.name.find("HTTPServer") != 0: self._logger.info("list_threads: {} - Thread {}".format(txt, thread.name)) return ################################################################# # Item Methods #################################################################
def report_status(self): current_thread_list = threading.enumerate() thread_name = [] for thread in current_thread_list: if thread.name in self.extensions: thread_name.append(thread.name) LOG.info("Current plugin threads: " + " ".join(thread_name)) # If some extensions threads exit unexpectedly, create a new thread # for it none_thread_extensions = [i for i in self.extensions if i not in thread_name] if len(none_thread_extensions) > 0: LOG.info("Recreating thread(s) for extension(s): " + " ".join( none_thread_extensions)) for ext in none_thread_extensions: task = getattr(self.extensions[ext], 'periodic_task') task_name = ext t = threading.Thread(target=task, name=task_name) t.start()
def test_002_multi_thread_filter(): import threading import time for i in range(1, 50): if i % 2 != 0: target = ls_filter else: target = org_filter thread = threading.Thread(name=i, target=target) thread.start() while len(threading.enumerate()) > 1: time.sleep(1) assert success
def find_free_proxies(url='http://icanhazip.com/'): def _free_proxies_thread_worker(proxy, url, _list): if test_proxy(proxy, url) is True: _list.append(proxy) logger = logging.getLogger(__name__) free_proxies = [] main_thread = threading.currentThread() for proxy in proxy_list: threading.Thread(target=_free_proxies_thread_worker, args=(proxy, url, free_proxies)).start() for thread in threading.enumerate(): if thread is not main_thread: thread.join() if not len(free_proxies): logger.critical('no working proxies') return None return free_proxies
def main(): django.setup() devices = NetworkDevice.objects.all() starttime = datetime.now() for dev in devices: my_thread = threading.Thread(target=show_version, args=(dev,)) my_thread.start() main_thread = threading.currentThread() for thread in threading.enumerate(): if thread != main_thread: print thread thread.join() totaltime = datetime.now() - starttime print print "Elapsed time " + str(totaltime) print
def flush(self): """Flush buffered output.""" orphans = [] self.lock.acquire() try: # Detect threads no longer existing. indexes = (getattr(t, 'index', None) for t in threading.enumerate()) indexes = filter(None, indexes) for index in self.__output_buffers: if not index in indexes: orphans.append((index, self.__output_buffers[index][0])) for orphan in orphans: del self.__output_buffers[orphan[0]] finally: self.lock.release() # Don't keep the lock while writting. Will append \n when it shouldn't. for orphan in orphans: if orphan[1]: self._wrapped.write('%d>%s\n' % (orphan[0], orphan[1])) return self._wrapped.flush()
def multithread_engine(object,redirect,credentials): start_time = datetime.datetime.now() index = 0 if(object == initialize.ntw_device): arguments = credentials if(object == initialize.switchport): arguments = credentials for i in object: my_thread = threading.Thread(target=getattr(object[index],redirect) , args=(arguments,)) my_thread.start() index = index + 1 main_thread = threading.currentThread() for some_thread in threading.enumerate(): if some_thread != main_thread: print(some_thread) some_thread.join() print("\n") print("TIME ELAPSED: {}\n".format(datetime.datetime.now() - start_time))
def multithread_engine(object,redirect,credentials): start_time = datetime.datetime.now() index = 0 if(object == initialize.ntw_device): arguments = None if(object == initialize.switchport): arguments = credentials for i in object: my_thread = threading.Thread(target=getattr(object[index],redirect) , args=(arguments,)) my_thread.start() index = index + 1 main_thread = threading.currentThread() for some_thread in threading.enumerate(): if some_thread != main_thread: print(some_thread) some_thread.join() print("\n") print("TIME ELAPSED: {}\n".format(datetime.datetime.now() - start_time))
def __del__(self): import threading key = object.__getattribute__(self, '_local__key') try: threads = list(threading.enumerate()) except: return for thread in threads: try: __dict__ = thread.__dict__ except AttributeError: continue if key in __dict__: try: del __dict__[key] except KeyError: pass
def shutdown_components(self): """Execute before the reactor is shut down""" self.log.info('exiting-on-keyboard-interrupt') for component in reversed(registry.iterate()): yield component.stop() import threading self.log.info('THREADS:') main_thread = threading.current_thread() for t in threading.enumerate(): if t is main_thread: continue if not t.isDaemon(): continue self.log.info('joining thread {} {}'.format( t.getName(), "daemon" if t.isDaemon() else "not-daemon")) t.join()
def iter_thread_frames(): main_thread_frame = None for ident, frame in sys._current_frames().items(): if IDENT_TO_UUID.get(ident) == MAIN_UUID: main_thread_frame = frame # the MainThread should be shown in it's "greenlet" version continue yield ident, frame for thread in threading.enumerate(): if not getattr(thread, '_greenlet', None): # some inbetween state, before greenlet started or after it died?... pass elif thread._greenlet.gr_frame: yield thread.ident, thread._greenlet.gr_frame else: # a thread with greenlet but without gr_frame will be fetched from sys._current_frames # If we switch to another greenlet by the time we get there we will get inconsistent dup of threads. # TODO - make best-effort attempt to show coherent thread dump yield thread.ident, main_thread_frame
def run_thread(req_list, name=None, is_lock=True, limit_num=8): ''' ????? - req_list ????, list, ?????????, ??? - [ - (func_0, (para_0_1, para_0_2, *,)), - (func_1, (para_1_1, para_1_2, *,)), - * - ] - name ???, str, ???None - is_lock ??????, bool, ???True, ????, False???? - limit_num ?????, int, ???8 ''' queue = deque(req_list) while len(queue): if threading.active_count() <= limit_num: para = queue.popleft() now_thread = threading.Thread( target=para[0], args=para[1], name=name, daemon=True) now_thread.start() if is_lock: for now_thread in threading.enumerate(): if now_thread is not threading.currentThread(): now_thread.join()
def test_builtin_channels(self): b = wspbus.Bus() self.responses, expected = [], [] for channel in b.listeners: for index, priority in enumerate([100, 50, 0, 51]): b.subscribe(channel, self.get_listener(channel, index), priority) for channel in b.listeners: b.publish(channel) expected.extend([msg % (i, channel, None) for i in (2, 1, 3, 0)]) b.publish(channel, arg=79347) expected.extend([msg % (i, channel, 79347) for i in (2, 1, 3, 0)]) self.assertEqual(self.responses, expected)
def test_custom_channels(self): b = wspbus.Bus() self.responses, expected = [], [] custom_listeners = ('hugh', 'louis', 'dewey') for channel in custom_listeners: for index, priority in enumerate([None, 10, 60, 40]): b.subscribe(channel, self.get_listener(channel, index), priority) for channel in custom_listeners: b.publish(channel, 'ah so') expected.extend([msg % (i, channel, 'ah so') for i in (1, 3, 0, 2)]) b.publish(channel) expected.extend([msg % (i, channel, None) for i in (1, 3, 0, 2)]) self.assertEqual(self.responses, expected)
def watchThreads(self) -> None: while True: try: workingThreads: List[Thread] = threading.enumerate() with open(f"{self.core.config.directory.api}/{API.Thread.value}", "w") as f: json.dump([x.name for x in workingThreads], f, sort_keys=True, indent=4) for i, thread, func in enumerate(self.threads): if not thread.is_alive() or thread not in workingThreads: if thread.name in [x.meta.name for x in self.core.PM.plugins.values()]: self.threads[i] = self.startThread(func, name=thread.name) else: self.threads[i] = self.startThread(func, name=thread.name, args=[self.core]) except: pass finally: time.sleep(10)
def get_full_thread_dump(): """Returns a string containing a traceback for all threads""" output = io.StringIO() time = strftime("%Y-%m-%d %H:%M:%S", gmtime()) thread_names = {} for thread in threading.enumerate(): thread_names[thread.ident] = thread.name output.write("\n>>>> Begin stack trace (%s) >>>>\n" % time) for threadId, stack in current_frames().items(): output.write( "\n# ThreadID: %s (%s)\n" % (threadId, thread_names.get(threadId, "unknown"))) for filename, lineno, name, line in traceback.extract_stack(stack): output.write( 'File: "%s", line %d, in %s\n' % (filename, lineno, name)) if line: output.write(" %s\n" % (line.strip())) output.write("\n<<<< End stack trace <<<<\n\n") thread_dump = output.getvalue() output.close() return thread_dump