我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用multiprocessing.dummy.Pool()。
def for_genre(genre,num): pool = ThreadPool(multiprocessing.cpu_count()-1) nums = list(range(1,num)) results = pool.starmap(soupit,zip(nums,itertools.repeat(genre))) pool.close() pool.join() #build up the list of urls with the results of all the sub-processes that succeeded in a single list new_results = [] for j in results: if j: for i in j: new_results.append(i) pool = ThreadPool(multiprocessing.cpu_count()-1) pool.starmap(dwnld,zip(enumerate(new_results),itertools.repeat(genre))) pool.close pool.close()
def __init__(self, args): super(Slave, self).__init__() self._pool=Pool(args.thread_num) self._timeout=args.int_timeout self._call_method=getattr(requests,args.int_method) self._flags=args.int_flags.split(',,') if args.int_headers!="": self._headers=json.loads(input2json(args.int_headers)) else: self._headers={} if args.int_cookies!='': cookiesl=args.int_cookies.split(',') self._cookies={x.split(':')[0]:x.split(':')[1] for x in cookiesl} else: self._cookies={}
def del_zabbix_events(): try: HOST = '172.16.4.93' PORT = 3306 DB = 'zabbix' MYSQL = Mysql.MYSQL(USER,PASSWORD,HOST,PORT,DB) cmd = "select eventid from events order by eventid limit 10000;" results = MYSQL.Run(cmd) MYSQL.Close() def Delete(eventid): MySql = Mysql.MYSQL(USER,PASSWORD,HOST,PORT,DB) cmd = "delete from events where eventid=%i" % int(eventid[0]) MySql.Run(cmd) MySql.Close() pool = ThreadPool(8) pool.map(Delete, results) pool.close() pool.join() loging.write('del_last_eventid:%s' %results[-1][0]) except Exception as e: loging.write(e)
def invoke_mappers(n_mappers, batches): mapper_outputs = [] logger.info("# of Mappers {}".format(n_mappers)) pool = ThreadPool(n_mappers) mapper_ids = [i + 1 for i in range(n_mappers)] invoke_lambda_partial = partial(invoke_lambda, batches, mapper_outputs, mapper_lambda_name) mappers_executed = 0 while mappers_executed < n_mappers: nm = min(PARALLEL_LAMBDAS, n_mappers) results = pool.map(invoke_lambda_partial, mapper_ids[mappers_executed: mappers_executed + nm]) mappers_executed += nm pool.close() pool.join() logger.info("All the mappers finished")
def convert_to_demo_ids(match_ids, threads): # Tell the user what is happening print "Converting Match IDs to Demo IDs" # Define the number of threads pool = ThreadPool(threads) # Calls get_demo_ids() and adds the value returned each call to an array called demo_ids demo_ids = pool.map(get_demo_ids, match_ids) pool.close() pool.join() # Create an array to add any captured errors to errors = [] # Find any errors, add them to the errors array, and remove them from demo_ids for text in demo_ids: if " " in text: errors.append(text[1:]) demo_ids.remove(text) # Print the errors (if there are any) print_errors(errors) return demo_ids
def download(demo_ids, threads): # Convert the DemoIDs to URLs urls = convert_to_urls(demo_ids) # Define the number of threads pool = ThreadPool(threads) # Make a folder for the event to save the files in directory = make_dir() # Calls get() and adds the filesize returned each call to an array called filesizes filesizes = pool.map(get, urls) pool.close() pool.join() # Create a float to store the filesizes in and add them together total_file_size = sum(filesizes) # Print the properly formatted filesize. print "Successfully transferred %s. Enjoy!" % (format_file_size(total_file_size)) return True
def turn_page(self): """ Turn menu page """ self.book_menu.set_items({}, 0, self.go_site_playback) books = self.get_books() book_list = self.set_books(self.current_page, books) d = self.book_menu.make_dict(book_list.items) self.book_menu.set_items(d, 0, self.go_site_playback) buttons = self.components[1].buttons size = len(buttons.values()) if size == 0: return pool = Pool(size) pool.map(self.set_image, buttons.values()) pool.close() pool.join() self.book_menu.select_by_index(0)
def all_or_nothing(matrix, graph, results): aux_res = MultiThreadedAoN() aux_res.prepare(graph, results) # catch errors if results.__graph_id__ is None: raise ValueError('The results object was not prepared. Use results.prepare(graph)') elif results.__graph_id__ != graph.__id__: raise ValueError('The results object was prepared for a different graph') else: pool = ThreadPool(results.cores) all_threads = {'count': 0} report = [] for O in range(matrix.shape[0]): a = matrix[O, :] if np.sum(a) > 0: pool.apply_async(func_assig_thread, args=(O, a, graph, results, aux_res, all_threads, report)) pool.close() pool.join() results.link_loads = np.sum(aux_res.temp_link_loads, axis=1) return report
def doWork(self): self.emit(SIGNAL("ProgressMaxValue(PyQt_PyObject)"), self.matrix.shape[0]) self.emit(SIGNAL("ProgressValue(PyQt_PyObject)"), 0) # If we are going to perform All or Nothing if self.method['algorithm'] == 'AoN': pool = ThreadPool(self.results.cores) self.all_threads['count'] = 0 for O in range(self.results.zones): a = self.matrix[O, :] if np.sum(a) > 0: pool.apply_async(self.func_assig_thread, args=(O, a)) pool.close() pool.join() self.emit(SIGNAL("ProgressValue(PyQt_PyObject)"), self.matrix.shape[0]) self.results.link_loads = np.sum(self.aux_res.temp_link_loads, axis=1) self.emit(SIGNAL("ProgressText (PyQt_PyObject)"), "Saving Outputs") self.emit(SIGNAL("finished_threaded_procedure( PyQt_PyObject )"), None)
def start(): if len(sys.argv[1:]) == 0: config = utils.load_config() else: config = utils.load_config(sys.argv[1]) logger = utils.get_logger() logger.info('????') room_count = len(config['ROOM_URLS']) if room_count == 0: logger.info('?????????????') exit(0) pool = ThreadPool(room_count) for room_url in config['ROOM_URLS']: m = Monitor(room_url) pool.apply_async(m.run) pool.close() try: pool.join() except KeyboardInterrupt: logger.warning('????') exit(1)
def _multiThreadedTest(infiles): arg1 = [] arg2 = home for item in infiles: arg1.append(item) pool = ThreadPool(len(arg1)) pool.starmap(_csvParse, zip(arg1, repeat(arg2))) print("Parsed through %d IP addresses." % (len(set(internal_ips + external_ips)))) _blackList(hosts=set(internal_ips + external_ips)) _geolocate(hosts) #print(privateIP.text) #_initialize() #_multiThreadedTest(last30) #res_list = [x[0] for x in compromise] #_barChart(yValues=(DATA), xValues=sorted(TITLES),outfile="bar.png") #text_file = open("badguys.txt", "w") #for i in biglist: # text_file.write("%s\n" % (i)) #_pieChart(ports, "Top ports", 10, "topports.png") #_folium("test.html")
def Proposal_Massive(days_list=None, proposal_days_interval=1, CPUs=4, collection=COLLECTION): assert isinstance(days_list, list), "Days list must be a list" days_count = len(days_list) pool = ThreadPool(CPUs) try: print "Creating proposals for days between {}-{} in {}".format(days_list[0], days_list[-1], collection) pool.map(Proposal_creator, ( (day, proposal_days_interval, idx + 1, days_count, collection) for idx, day in enumerate(days_list))) except Exception as e: print "Thread error at processing '{}'".format(e) pool.close() pool.join()
def LaGouSpiderWithKeyWord(position, city): # ?????? pageCount = SearchPageCount(position, city) if pageCount == 0: print('???????????????????') return totaldata = DataFrame().T urls = [] for i in range(0, pageCount): url = 'http://www.lagou.com/jobs/positionAjax.json?' params = {'city': city, 'kd': position, 'pn': i+1} url += parse.urlencode(params) urls.append(url) # ??work? pool = ThreadPool(processes=8) # ?????rdatas rdatas = pool.map(get_rdata, urls) for rdata in rdatas: totaldata = pd.concat([totaldata, rdata]) totaldata.to_csv('lagou.csv')
def imap(requests, stream=True, pool=None, size=2, exception_handler=None): """Concurrently converts a generator object of Requests to a generator of Responses. :param requests: a generator of Request objects. :param stream: If False, the content will not be downloaded immediately. :param size: Specifies the number of requests to make at a time. default is 2 :param exception_handler: Callback function, called when exception occured. Params: Request, Exception """ def send(r): return r.send(stream=stream) pool = pool if pool else Pool(size) for request in pool.imap(send, requests): if request.response is not None: yield request.response elif exception_handler: exception_handler(request, request.exception) if not pool: pool.close()
def imap_unordered(requests, stream=True, pool=None, size=2, exception_handler=None): """Concurrently converts a generator object of Requests to a generator of Responses. :param requests: a generator of Request objects. :param stream: If False, the content will not be downloaded immediately. :param size: Specifies the number of requests to make at a time. default is 2 :param exception_handler: Callback function, called when exception occured. Params: Request, Exception """ def send(r): return r.send(stream=stream) pool = pool if pool else Pool(size) with contextlib.closing(Pool(size)) as pool: for request in pool.imap_unordered(send, requests): if request.response is not None: yield request.response elif exception_handler: exception_handler(request, request.exception) if not pool: pool.close()
def download(self): logger.info('[Downloader] takes hand') self.mkdir(self.resources_folder) tasks = [ (self.get_filepath(meta, url), url) for meta, urls in self.resource_bundles for url in urls ] with contextlib.closing(Pool(8)) as pool: results = pool.map(self.downloading, tasks) status = [ok for ok, _ in results] fails = [src for ok, src in results if not ok] logger.info('[Downloader] download %d items (Total: %d)!', sum(status), len(status)) return sum(status), fails
def postgres_main(ipdict,threads): printPink("crack postgres now...") print "[*] start postgres %s" % time.ctime() starttime=time.time() global lock lock = threading.Lock() global result result=[] pool=Pool(threads) for ip in ipdict['postgres']: pool.apply_async(func=postgreS,args=(str(ip).split(':')[0],int(str(ip).split(':')[1]))) pool.close() pool.join() print "[*] stop crack postgres %s" % time.ctime() print "[*] crack postgres done,it has Elapsed time:%s " % (time.time()-starttime) return result
def run(self,ipdict,pinglist,threads,file): if len(ipdict['ssl']): printPink("crack ssl now...") print "[*] start test openssl_heart %s" % time.ctime() starttime=time.time() pool=Pool(threads) for ip in ipdict['ssl']: pool.apply_async(func=self.openssl_test,args=(str(ip).split(':')[0],int(str(ip).split(':')[1]))) pool.close() pool.join() print "[*] stop ssl serice %s" % time.ctime() print "[*] crack ssl done,it has Elapsed time:%s " % (time.time()-starttime) for i in xrange(len(self.result)): self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file): if len(ipdict['ftp']): printPink("crack ftp now...") print "[*] start crack ftp %s" % time.ctime() starttime=time.time() pool=Pool(threads) for ip in ipdict['ftp']: pool.apply_async(func=self.ftp_l,args=(str(ip).split(':')[0],int(str(ip).split(':')[1]))) pool.close() pool.join() print "[*] stop ftp serice %s" % time.ctime() print "[*] crack ftp done,it has Elapsed time:%s " % (time.time()-starttime) for i in xrange(len(self.result)): self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file): if len(ipdict['mysql']): printPink("crack mysql now...") print "[*] start crack mysql %s" % time.ctime() starttime=time.time() pool=Pool(threads) for ip in ipdict['mysql']: pool.apply_async(func=self.mysq1,args=(str(ip).split(':')[0],int(str(ip).split(':')[1]))) pool.close() pool.join() print "[*] stop crack mysql %s" % time.ctime() print "[*] crack mysql done,it has Elapsed time:%s " % (time.time()-starttime) for i in xrange(len(self.result)): self.config.write_file(contents=self.result[i],file=file)
def pop_main(ipdict,threads): printPink("crack pop now...") print "[*] start crack pop %s" % time.ctime() starttime=time.time() global lock lock = threading.Lock() global result result=[] pool=Pool(threads) for ip in ipdict['pop3']: pool.apply_async(func=pop3_l,args=(str(ip).split(':')[0],int(str(ip).split(':')[1]))) pool.close() pool.join() print "[*] stop pop serice %s" % time.ctime() print "[*] crack pop done,it has Elapsed time:%s " % (time.time()-starttime) return result
def run(self,ipdict,pinglist,threads,file): if len(ipdict['mongodb']): printPink("crack mongodb now...") print "[*] start crack mongodb %s" % time.ctime() starttime=time.time() pool=Pool(threads) for ip in ipdict['mongodb']: pool.apply_async(func=self.mongoDB,args=(str(ip).split(':')[0],int(str(ip).split(':')[1]))) pool.close() pool.join() print "[*] stop mongoDB serice %s" % time.ctime() print "[*] crack mongoDB done,it has Elapsed time:%s " % (time.time()-starttime) for i in xrange(len(self.result)): self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file): printPink("crack snmp now...") print "[*] start crack snmp %s" % time.ctime() starttime=time.time() pool=Pool(threads) for ip in pinglist: pool.apply_async(func=self.snmp_l,args=(str(ip).split(':')[0],"")) pool.close() pool.join() print "[*] stop crack snmp %s" % time.ctime() print "[*] crack snmp done,it has Elapsed time:%s " % (time.time()-starttime) for i in xrange(len(self.result)): self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file): if len(ipdict['rsync']): printPink("crack rsync now...") print "[*] start crack rsync %s" % time.ctime() starttime=time.time() pool=Pool(threads) for ip in ipdict['rsync']: pool.apply_async(func=self.rsync_creak,args=(str(ip).split(':')[0],int(str(ip).split(':')[1]))) pool.close() pool.join() print "[*] stop rsync serice %s" % time.ctime() print "[*] crack rsync done,it has Elapsed time:%s " % (time.time()-starttime) for i in xrange(len(self.result)): self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file): if len(ipdict['ldap']): printPink("crack ldap now...") print "[*] start ldap %s" % time.ctime() starttime=time.time() pool=Pool(threads) for ip in ipdict['ldap']: pool.apply_async(func=self.ldap_creak,args=(str(ip).split(':')[0],str(ip).split(':')[1])) pool.close() pool.join() print "[*] stop ldap serice %s" % time.ctime() print "[*] crack ldap done,it has Elapsed time:%s " % (time.time()-starttime) for i in xrange(len(self.result)): self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file): if len(ipdict['smb']): printPink("crack smb now...") print "[*] start crack smb serice %s" % time.ctime() starttime=time.time() pool=Pool(threads) for ip in ipdict['smb']: pool.apply_async(func=self.smb_l,args=(str(ip).split(':')[0],int(str(ip).split(':')[1]))) pool.close() pool.join() print "[*] stop smb serice %s" % time.ctime() print "[*] crack smb done,it has Elapsed time:%s " % (time.time()-starttime) for i in xrange(len(self.result)): self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file): if len(ipdict['vnc']): printPink("crack vnc now...") print "[*] start crack vnc %s" % time.ctime() starttime=time.time() pool=Pool(threads) for ip in ipdict['vnc']: pool.apply_async(func=self.vnc_l,args=(str(ip).split(':')[0],int(str(ip).split(':')[1]))) pool.close() pool.join() print "[*] stop vnc serice %s" % time.ctime() print "[*] crack vnc done,it has Elapsed time:%s " % (time.time()-starttime) for i in xrange(len(self.result)): self.config.write_file(contents=self.result[i],file=file)
def run(self,ipdict,pinglist,threads,file): if len(ipdict['http']): print "[*] start test web burp at %s" % time.ctime() starttime=time.time() pool=Pool(threads) for ip in ipdict['http']: pool.apply_async(func=self.webmain,args=(str(ip).split(':')[0],int(str(ip).split(':')[1]))) pool.close() pool.join() print "[*] stop test iip_put&&scanner web paths at %s" % time.ctime() print "[*] test iip_put&&scanner web paths done,it has Elapsed time:%s " % (time.time()-starttime) for i in xrange(len(self.result)): self.config.write_file(contents=self.result[i],file=file)
def main(): fuuNum = 0 fuuNum_list = [] for url in open('urlid_duo.txt'): url = url.strip() fuu_list.append(url) #?0?1?2...????????fuuNum_list? #??????url??????url??fuu_list? #fuuNum_list???????????0???fuu_list???????????url??????url #????fuuNum_list??????????0??fuu_list????????? #???????????????????????????~~ fuuNum_list.append(fuuNum) fuuNum += 1 pool = ThreadPool(totalThread) pool.map(getFuck, fuuNum_list)#map???fuuNum_list???????????getFuck???? pool.close() pool.join()
def main(): #??? global totalThread # getAvailableIp()#????ip keywordNum = 0 keywordNum_list = [] for kw in open('kw.txt'):#????? kw = kw.strip() keyword_list.append(kw) keywordNum_list.append(keywordNum) keywordNum += 1 pool = ThreadPool(totalThread) pool.map(getKeyword, keywordNum_list) pool.close() pool.join() ''' gap = keywordNum/totalThread thread_list = [] for line in range(0,keywordNum,gap):#10,5 t = threading.Thread(target=getRange,args=(line,line+gap)) t.start()#??? thread_list.append(t) for tt in thread_list:#???? tt.join() '''
def cut_Dataset(data_set, parrel=False, nomial=False): """ :param data_set:bunch of Dataset :param parrel: if it is True,cut dataset in parrel.Windows is not available :param nomial: if nomial is True,only noun-like words will remain :return:data_set after cutted """ from tqdm import tqdm data_cut = [] start = time.time() print('cuting dataset......') if parrel: p = ThreadPool(9) p.map(cut_Text, data_set.data) p.close() p.join() else: n=0 for doc_content in tqdm(data_set.data): data_cut.append(cut_Text(doc_content, nomial)) end = time.time() print('cuting runs %0.2f seconds.' % (end - start)) data_set.data = data_cut
def reprojectToThisThreaded(self, sourceProjection, numThreads): uvList = [] fx = float(self.imsize[0]) fy = float(self.imsize[1]) angleList = [self.angular_position((float(i)/fx,float(j)/fy)) for i in range(self.imsize[0]) for j in range(self.imsize[1])] poolAngles = ThreadPool(numThreads) image = poolAngles.map(sourceProjection.pixel_value, angleList) poolAngles.close() poolAngles.join() idx = 0 for x in range(self.imsize[0]): for y in range(self.imsize[1]): pixel = image[idx] if pixel is None: print x,y else: self.image[y,x] = pixel idx = idx + 1
def pick_proxies(proxy_list, test_url, timeout): ''' ???????????????????????? ''' proxy_num = len(proxy_list) # ???? pool = Pool(16) # ????? kwargs = [{'test_url': test_url, 'proxy': proxy, 'timeout': timeout} for proxy in proxy_list] # ???? response_time_list = pool.map(multi_test_wrapper, kwargs) # ???? # ???????? map_list = [] # (????, ????)????????????????????? for i in xrange(proxy_num): if response_time_list[i] < timeout: map_list.append((i, response_time_list[i])) # ??????? # map_list = sorted(map_list, key=lambda d: d[1]) # ????????? new_proxy_list = [] for map_ in map_list: new_proxy_list.append(proxy_list[map_[0]]) # print proxies_list[map_[0]], map_[1], '?' return new_proxy_list
def getRandomArticlesFromCategory(self, categoryName, sampleSize = 50, articleNameCompare = None, multi = False): cacheName = "randomArticleCacheNames/"+categoryName articles = self.getArticles(categoryName) articles = [art for art in articles if not art==articleNameCompare] subSize = min(sampleSize, len(articles)) subNames = random.sample(articles, subSize) if multi: _bound_instance_method_alias = functools.partial(_instance_method_alias, self) pool = ThreadPool(self.cpuCount) subText = pool.map(_bound_instance_method_alias, subNames) #subText = pool.map(_bound_instance_method_alias, range(len(subNames))) else: subText = [self.getArticleContent(art) for art in subNames] catlen = len(articles) ''' with open(cacheName, "w") as f: for i in subNames: f.write(i+"\n") for i, t in enumerate(subText): with open("randomArticleCache/"+subNames[i], "w") as f: #print subText[i] f.write("\n".join(subText[i])) ''' return catlen, len(subText), subNames, subText
def get_offline_user_data(): print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'get_offline_user_data') if r_session.exists('api_error_info'): return # if datetime.now().minute < 50: return offline_users = [] for b_user in r_session.mget(*['user:%s' % name.decode('utf-8') for name in r_session.sdiff('users', *r_session.smembers('global:online.users'))]): user_info = json.loads(b_user.decode('utf-8')) username = user_info.get('username') if not user_info.get('active'): continue every_hour_key = 'user:%s:cron_queued' % username if r_session.exists(every_hour_key): continue offline_users.append(username) pool = ThreadPool(processes=5) pool.map(get_data, offline_users) pool.close() pool.join() # ??????????????
def get_offline_user_data(): print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'get_offline_user_data') if r_session.exists('api_error_info'): return if datetime.now().minute < 50: return offline_users = [] for b_user in r_session.mget(*['user:%s' % name.decode('utf-8') for name in r_session.sdiff('users', *r_session.smembers('global:online.users'))]): user_info = json.loads(b_user.decode('utf-8')) username = user_info.get('username') if not user_info.get('active'): continue every_hour_key = 'user:%s:cron_queued' % username if r_session.exists(every_hour_key): continue offline_users.append(username) pool = ThreadPool(processes=5) pool.map(get_data, offline_users) pool.close() pool.join() # ??????????????
def migrate(): """migrate data from database to json """ conn = sqlite3.connect("CUMTB.db") res = conn.execute("SELECT * FROM CUMTB") data = res.fetchall() json_data = list() pool = Pool(cpu_count() * 2) def read_conf(lst): res = os.popen("./encrypt {0} {1}".format(*lst)) _data = json.loads(res.readline()) json_data.append(_data) pool.map(read_conf, data) pool.close() pool.join() with open("there_is_nothing.json", 'w') as f: json.dump(json_data, f, indent=2)
def get_pages_in_category_tree(source, category, count): pages = set() seen_categories = set() current_categories = {category} while len(pages) < count: log.debug(len(pages)) if not current_categories: break next_categories = set() with multiprocessing.Pool(processes=len(current_categories)) as pool: results = pool.map(lambda category: get_category_members(source, category), current_categories) for result in results: next_categories.update(result['subcats']) pages.update(result['pages']) seen_categories.update(current_categories) current_categories = next_categories - seen_categories log.debug(len(pages)) return list(pages)