我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用time.time()。
def test_worker_alarm(manager): called = [] def handler(signal, frame): called.append(True) signal.signal(signal.SIGALRM, handler) @manager.task def foo(sleep): time.sleep(sleep) w = Worker(manager, task_timeout=1) w.process_one(make_task('foo', args=(0.1,))) assert not called w.process_one(make_task('foo', args=(1.1,))) assert called
def get_captcha(): import time t = str(int(time.time()*1000)) captcha_url = "https://www.zhihu.com/captcha.gif?r={0}&type=login".format(t) t = session.get(captcha_url, headers=header) with open("captcha.jpg","wb") as f: f.write(t.content) f.close() from PIL import Image try: im = Image.open('captcha.jpg') im.show() im.close() except: pass captcha = input("?????\n>") return captcha
def run_async_at(self, where, gen, *args, **kwargs): """Must be used with 'yield' as 'rtask = yield computation.run_async_at(where, gen, ...)' Run given generator function 'gen' with arguments 'args' and 'kwargs' at remote server 'where'. If the request is successful, 'rtask' will be a (remote) task; check result with 'isinstance(rtask, pycos.Task)'. The generator is supposed to be (mostly) I/O bound and not consume CPU time. Unlike other 'run' variants, tasks created with 'async' are not "tracked" by scheduler (see online documentation for more details). If 'where' is a string, it is assumed to be IP address of a node, in which case the task is scheduled at that node on a server at that node. If 'where' is a Location instance, it is assumed to be server location in which case the task is scheduled at that server. 'gen' must be generator function, as it is used to run task at remote location. """ yield self._run_request('run_async', where, 0, gen, *args, **kwargs)
def _pulse_proc(self, task=None): """For internal use only. """ task.set_daemon() last_pulse = time.time() timeout = 2 * self._pulse_interval while 1: msg = yield task.receive(timeout=timeout) if msg == 'pulse': last_pulse = time.time() elif msg == 'quit': break elif msg is None and (time.time() - last_pulse) > (10 * self._pulse_interval): logger.warning('scheduler may have gone away!') else: logger.debug('ignoring invalid pulse message') self._pulse_task = None
def __init__(self, name, addr): self.name = name self.addr = addr self.cpus_used = 0 self.cpus = 0 self.platform = None self.avail_info = None self.servers = {} self.disabled_servers = {} self.load = 0.0 self.status = Scheduler.NodeClosed self.task = None self.last_pulse = time.time() self.lock = pycos.Lock() self.avail = pycos.Event() self.avail.clear()
def shutdown(self, wait=True): """This method should be called by user program to close the http server. If 'wait' is True the server waits for poll_sec so the http client gets all the updates before server is closed. """ if wait: pycos.logger.info('HTTP server waiting for %s seconds for client updates ' 'before quitting', self._poll_sec) if pycos.Pycos().cur_task(): def _shutdown(task=None): yield task.sleep(self._poll_sec + 0.5) self._server.shutdown() self._server.server_close() pycos.Task(_shutdown) else: time.sleep(self._poll_sec + 0.5) self._server.shutdown() self._server.server_close() else: self._server.shutdown() self._server.server_close()
def create_vector_dictionary(self): """ Extracts the current word vectors from TensorFlow embeddings and (if print_simlex=True) prints their SimLex scores. """ log_time = time.time() [current_vectors] = self.sess.run([self.W_dynamic]) self.word_vectors = {} for idx in range(0, self.vocabulary_size): self.word_vectors[self.inverted_index[idx]] = normalise_vector(current_vectors[idx, :]) if self.log_scores_over_time or self.print_simlex: (score_simlex, score_wordsim) = simlex_scores(self.word_vectors, self.print_simlex) return (score_simlex, score_wordsim) return (1.0, 1.0)
def service_restarted(self, sentry_unit, service, filename, pgrep_full=None, sleep_time=20): """Check if service was restarted. Compare a service's start time vs a file's last modification time (such as a config file for that service) to determine if the service has been restarted. """ # /!\ DEPRECATION WARNING (beisner): # This method is prone to races in that no before-time is known. # Use validate_service_config_changed instead. # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now # used instead of pgrep. pgrep_full is still passed through to ensure # deprecation WARNS. lp1474030 self.log.warn('DEPRECATION WARNING: use ' 'validate_service_config_changed instead of ' 'service_restarted due to known races.') time.sleep(sleep_time) if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= self._get_file_mtime(sentry_unit, filename)): return True else: return False
def add_to_redis(content, through, keys): """Add content to a list of Redis ordered sets. :param content: Content object to add :param through: Content through object. For example on shares, this is the linked share content object :param keys: List of keys to add to """ if not keys: return r = get_redis_connection() for key in keys: # Only add if not in the set already # This stops shares popping up more than once, for example if not r.zrank(key, content.id): r.zadd(key, int(time.time()), content.id) r.hset(BaseStream.get_throughs_key(key), content.id, through.id)
def start(self): while True: try: self.connect() while True: #time.sleep(0.01) # attempt to reduce number of OSError: [Errno 104] ECONNRESET self.client.check_msg() #time.sleep(0.01) # attempt to reduce number of OSError: [Errno 104] ECONNRESET self.push() time.sleep(0.01) except OSError as e: Util.log(self,"failed to connect, retrying....", e) time.sleep(self.config["wait_to_reconnect"]) self.client.disconnect()
def _test_should_sleep(self, seconds_left, slept): attempt = 5 timeout = 20 interval = 3 randint = 2 deadline = self.now + seconds_left retry = h_retry.Retry(mock.Mock(), timeout=timeout, interval=interval) with mock.patch('random.randint') as m_randint, \ mock.patch('time.sleep') as m_sleep: m_randint.return_value = randint ret = retry._sleep(deadline, attempt, _EX2()) self.assertEqual(slept, ret) m_randint.assert_called_once_with(1, 2 ** attempt - 1) m_sleep.assert_called_once_with(slept)
def _populate_pool(self, pool_key, pod, subnets): # REVISIT(ltomasbo): Drop the subnets parameter and get the information # from the pool_key, which will be required when multi-network is # supported now = time.time() if (now - oslo_cfg.CONF.vif_pool.ports_pool_update_frequency < self._last_update.get(pool_key, 0)): LOG.info("Not enough time since the last pool update") return self._last_update[pool_key] = now pool_size = self._get_pool_size(pool_key) if pool_size < oslo_cfg.CONF.vif_pool.ports_pool_min: num_ports = max(oslo_cfg.CONF.vif_pool.ports_pool_batch, oslo_cfg.CONF.vif_pool.ports_pool_min - pool_size) vifs = self._drv_vif.request_vifs( pod=pod, project_id=pool_key[1], subnets=subnets, security_groups=list(pool_key[2]), num_ports=num_ports) for vif in vifs: self._existing_vifs[vif.id] = vif self._available_ports_pools.setdefault(pool_key, []).append(vif.id)
def start(self): self.client.start() #~ if not self.disable_auto_login: #~ while self.client.status == 'offline': #~ time.sleep(1) #~ logger.info('Client: %s'%self.client.status) if self.server_id: already_added = False for f in self.client.get_friend_list(): if self.client.friend_get_public_key(f) in self.server_id: already_added = True logger.info('Server already in added') break if not already_added: self.client.friend_add_with_request(self.server_id,self.password) logger.info('Started Friend request to Server') else: logger.info('No Server ID given')
def __request(self,methodname,args): logger.info('Execute: %s%s'%(methodname,repr(args))) data = xmlrpclib.dumps(args,methodname,allow_none=True) self.exec_lock.acquire() if not self.client.data_send(0,data,self.timeout): logger.warning('Raising Error, Timeout reached') self.exec_lock.release() raise IOError, 'Timeout' recdata = None time_to_wait = int(time.time()) + self.timeout while not recdata: timenow = int(time.time()) if timenow > time_to_wait: logger.warning('Raising Error, Timeout reached') self.exec_lock.release() raise IOError, 'Timeout' recdata = self.client.data_recv() time.sleep(0.1) self.exec_lock.release() returndata = xmlrpclib.loads(recdata['data'],use_datetime=True) logger.info('got %s'%str(returndata)) return returndata[0][0]
def test(args, env, agent): if args.record: if 'env' in vars(args): env = wrappers.Monitor(env, './videos/' + args.env + str(time()) + '/') else: env = wrappers.Monitor(env, './videos/' + str(time()) + '/') test_rewards = [] test_start = time() test_steps = 0 for iteration in range(1, 1 + args.n_test_iter): state = env.reset() iter_rewards = 0.0 done = False while not done: test_steps += 1 action, _ = agent.forward(state) state, reward, done, _ = env.step(action) iter_rewards += reward test_rewards.append(iter_rewards) print_stats('Test', test_rewards, args.n_test_iter, time() - test_start, test_steps, 0, agent) return test_rewards
def eval_pred(dr_model, ub): ''' evaluate dream model for predicting next basket on all training users in batches ''' item_embedding = dr_model.encode.weight dr_model.eval() dr_hidden = dr_model.init_hidden(dr_model.config.batch_size) start_time = time() id_u, score_u = [], [] # user's id, user's score num_batchs = ceil(len(ub) / dr_model.config.batch_size) for i,x in enumerate(batchify(ub, dr_model.config.batch_size)): print(i) baskets, lens, uids = x _, dynamic_user, _ = dr_model(baskets, lens, dr_hidden)# shape: batch_size, max_len, embedding_size dr_hidden = repackage_hidden(dr_hidden) for i,l,du in zip(uids, lens, dynamic_user): du_latest = du[l - 1].unsqueeze(0) # shape: 1, embedding_size score_up = torch.mm(du_latest, item_embedding.t()) # shape: 1, num_item score_u.append(score_up.cpu().data.numpy()) id_u.append(i) elapsed = time() - start_time print('[Predicting] Elapsed: {02.2f}'.format(elapsed)) return score_ub, id_u
def evaluate_dream(): dr_model.eval() dr_hidden = dr_model.init_hidden(dr_config.batch_size) total_loss = 0 start_time = time() num_batchs = ceil(len(test_ub) / dr_config.batch_size) for i,x in enumerate(batchify(test_ub, dr_config.batch_size)): baskets, lens, _ = x dynamic_user, _ = dr_model(baskets, lens, dr_hidden) loss = bpr_loss(baskets, dynamic_user, dr_model.encode.weight, dr_config) dr_hidden = repackage_hidden(dr_hidden) total_loss += loss.data # Logging elapsed = (time() - start_time) * 1000 / num_batchs total_loss = total_loss[0] / num_batchs print('[Evaluation]| Epochs {:3d} | Elapsed {:02.2f} | Loss {:05.2f} |'.format(epoch, elapsed, total_loss)) return total_loss
def check_fd(self): '''??fd?? ??read ????????????????? ????????????? ''' while True: for fd in self.conn_state.keys(): sock_state = self.conn_state[fd] # fd?read???? read_time ??? # ???fd?epoll????????????????? if sock_state.state == "read" and sock_state.read_stime \ and (time.time() - sock_state.read_stime) >= sock_state.read_itime: # ??????????fd sock_state.state = "closing" self.state_machine(fd) # ?????? time.sleep(60) #}}} #{{{fork_processes
def uptime(self): with open('/proc/uptime', 'r') as f: uptime, idletime = f.readline().split() up_seconds = int(float(uptime)) idle_seconds = int(float(idletime)) # in some machine like Linode VPS, idle time may bigger than up time if idle_seconds > up_seconds: cpu_count = multiprocessing.cpu_count() idle_seconds = idle_seconds/cpu_count # in some VPS, this value may still bigger than up time # may be the domain 0 machine has more cores # we calclate approximately for it if idle_seconds > up_seconds: for n in range(2,10): if idle_seconds/n < up_seconds: idle_seconds = idle_seconds/n break fmt = '{days} ? {hours} ?? {minutes} ? {seconds} ?' uptime_string = strfdelta(datetime.timedelta(seconds = up_seconds), fmt) idletime_string = strfdelta(datetime.timedelta(seconds = idle_seconds), fmt) return { 'up': uptime_string, 'idle': idletime_string, 'idle_rate': div_percent(idle_seconds, up_seconds), }
def setup_pubsub(self): freq_params = diagnostic_updater.FrequencyStatusParam({'min':self.diag_update_freq, 'max':self.diag_update_freq}, self.diag_freq_tolerance, self.diag_window_size) time_params = diagnostic_updater.TimeStampStatusParam(self.diag_min_delay, self.diag_max_delay) self.pub_fix = rospy.Publisher("~fix", NavSatFix, queue_size=1000) self.pub_spp_fix = diagnostic_updater.DiagnosedPublisher(rospy.Publisher("~spp_fix", NavSatFix, queue_size=1000), self.diag_updater, freq_params, time_params) self.pub_rtk_fix = diagnostic_updater.DiagnosedPublisher(rospy.Publisher("~rtk_fix", NavSatFix, queue_size=1000), self.diag_updater, freq_params, time_params) #self.pub_rtk = diagnostic_updater.DiagnosedPublisher(rospy.Publisher("~rtk_odom", Odometry, queue_size=1000), self.diag_updater, freq_params, time_params) self.pub_odom = diagnostic_updater.DiagnosedPublisher(rospy.Publisher("~odom", Odometry, queue_size=1000), self.diag_updater, freq_params, time_params) self.pub_time = diagnostic_updater.DiagnosedPublisher(rospy.Publisher("~time", TimeReference, queue_size=1000), self.diag_updater, freq_params, time_params) if self.publish_utm_rtk_tf or self.publish_rtk_child_tf: self.tf_br = tf2_ros.TransformBroadcaster() if self.publish_ephemeris: self.pub_eph = rospy.Publisher("~ephemeris", Ephemeris, queue_size=1000) if self.publish_observations: self.pub_obs = rospy.Publisher('~observations', Observations, queue_size=1000)
def append(self, argument, typehint = None): """Appends data to the bundle, creating an OSCMessage to encapsulate the provided argument unless this is already an OSCMessage. Any newly created OSCMessage inherits the OSCBundle's address at the time of creation. If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage. Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage; - if 'addr' appears in the dict, its value overrides the OSCBundle's address - if 'args' appears in the dict, its value(s) become the OSCMessage's arguments """ if isinstance(argument, OSCMessage): binary = OSCBlob(argument.getBinary()) else: msg = OSCMessage(self.address) if type(argument) == dict: if 'addr' in argument: msg.setAddress(argument['addr']) if 'args' in argument: msg.append(argument['args'], typehint) else: msg.append(argument, typehint) binary = OSCBlob(msg.getBinary()) self.message += binary self.typetags += 'b'
def OSCTimeTag(time): """Convert a time in floating seconds to its OSC binary representation """ if time > 0: fract, secs = math.modf(time) secs = secs - NTP_epoch binary = struct.pack('>LL', int(secs), int(fract * NTP_units_per_second)) else: binary = struct.pack('>LL', 0, 1) return binary ###### # # OSCMessage decoding functions # ######
def append(self, argument, typehint = None): """Appends data to the bundle, creating an OSCMessage to encapsulate the provided argument unless this is already an OSCMessage. Any newly created OSCMessage inherits the OSCBundle's address at the time of creation. If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage. Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage; - if 'addr' appears in the dict, its value overrides the OSCBundle's address - if 'args' appears in the dict, its value(s) become the OSCMessage's arguments """ if isinstance(argument, OSCMessage): binary = OSCBlob(argument.getBinary()) else: msg = OSCMessage(self.address) if type(argument) == types.DictType: if 'addr' in argument: msg.setAddress(argument['addr']) if 'args' in argument: msg.append(argument['args'], typehint) else: msg.append(argument, typehint) binary = OSCBlob(msg.getBinary()) self.message += binary self.typetags += 'b'
def OSCTimeTag(time): """Convert a time in floating seconds to its OSC binary representation """ if time > 0: fract, secs = math.modf(time) secs = secs - NTP_epoch binary = struct.pack('>LL', long(secs), long(fract * NTP_units_per_second)) else: binary = struct.pack('>LL', 0L, 1L) return binary ###### # # OSCMessage decoding functions # ######
def forwarder(tasks, interval, batch_size, source, dest): '''Forward items from one storage to another.''' from .utils import RunFlag, load_manager, redis_client from .store import QueueStore log = logging.getLogger('dsq.forwarder') if not tasks and not source: print('--tasks or --source must be provided') sys.exit(1) s = QueueStore(redis_client(source)) if source else load_manager(tasks).queue d = QueueStore(redis_client(dest)) run = RunFlag() while run: batch = s.take_many(batch_size) if batch['schedule'] or batch['queues']: try: d.put_many(batch) except Exception: s.put_many(batch) log.exception('Forward error') raise else: time.sleep(interval)
def process(self, queue_list, burst=False): # pragma: no cover signal.signal(signal.SIGALRM, self.alarm_handler) run = RunFlag() start = time() while run: task = self.manager.pop(queue_list, 1) if task: try: self.process_one(task) except StopWorker: break elif burst: break if self.lifetime and time() - start > self.lifetime: break self.manager.close()
def reschedule(self, now=None): now = now or time() items, _, size = (self.client.pipeline() .zrangebyscore(SCHEDULE_KEY, '-inf', now) .zremrangebyscore(SCHEDULE_KEY, '-inf', now) .zcard(SCHEDULE_KEY) .execute()) for chunk in iter_chunks(items, 5000): pipe = self.client.pipeline(False) for r in chunk: queue, _, task = r.partition(b':') pipe.rpush(rqname(queue), task) pipe.execute() return size
def make(self): """make the tree""" start_time = time() if __logging__: print('** Generating Tree **') # create parent object self.tree_obj = bpy.data.objects.new('Tree', None) bpy.context.scene.objects.link(self.tree_obj) bpy.context.scene.objects.active = self.tree_obj # create branches self.create_branches() # create leaf mesh if needed self.create_leaf_mesh() g_time = time() - start_time if __logging__: print('Tree generated in %f seconds' % g_time)
def __init__(self, *args, **kwargs): # The Windows terminal does not support the hide/show cursor ANSI codes # even with colorama. So we'll ensure that hide_cursor is False on # Windows. # This call neds to go before the super() call, so that hide_cursor # is set in time. The base progress bar class writes the "hide cursor" # code to the terminal in its init, so if we don't set this soon # enough, we get a "hide" with no corresponding "show"... if WINDOWS and self.hide_cursor: self.hide_cursor = False super(WindowsMixin, self).__init__(*args, **kwargs) # Check if we are running on Windows and we have the colorama module, # if we do then wrap our file with it. if WINDOWS and colorama: self.file = colorama.AnsiToWin32(self.file) # The progress code expects to be able to call self.file.isatty() # but the colorama.AnsiToWin32() object doesn't have that, so we'll # add it. self.file.isatty = lambda: self.file.wrapped.isatty() # The progress code expects to be able to call self.file.flush() # but the colorama.AnsiToWin32() object doesn't have that, so we'll # add it. self.file.flush = lambda: self.file.wrapped.flush()
def _test_generator(n, func, args): import time print(n, 'times', func.__name__) total = 0.0 sqsum = 0.0 smallest = 1e10 largest = -1e10 t0 = time.time() for i in range(n): x = func(*args) total += x sqsum = sqsum + x*x smallest = min(x, smallest) largest = max(x, largest) t1 = time.time() print(round(t1-t0, 3), 'sec,', end=' ') avg = total/n stddev = _sqrt(sqsum/n - avg*avg) print('avg %g, stddev %g, min %g, max %g\n' % \ (avg, stddev, smallest, largest))
def _test(N=2000): _test_generator(N, random, ()) _test_generator(N, normalvariate, (0.0, 1.0)) _test_generator(N, lognormvariate, (0.0, 1.0)) _test_generator(N, vonmisesvariate, (0.0, 1.0)) _test_generator(N, gammavariate, (0.01, 1.0)) _test_generator(N, gammavariate, (0.1, 1.0)) _test_generator(N, gammavariate, (0.1, 2.0)) _test_generator(N, gammavariate, (0.5, 1.0)) _test_generator(N, gammavariate, (0.9, 1.0)) _test_generator(N, gammavariate, (1.0, 1.0)) _test_generator(N, gammavariate, (2.0, 1.0)) _test_generator(N, gammavariate, (20.0, 1.0)) _test_generator(N, gammavariate, (200.0, 1.0)) _test_generator(N, gauss, (0.0, 1.0)) _test_generator(N, betavariate, (3.0, 3.0)) _test_generator(N, triangular, (0.0, 1.0, 1.0/3.0)) # Create one instance, seeded from current time, and export its methods # as module-level functions. The functions share state across all uses #(both in the user's code and in the Python libraries), but that's fine # for most programs and is easier for the casual user than making them # instantiate their own Random() instance.
def do_full_login(account): lock_network.acquire() time.sleep(locktime) lock_network.release() if account['type'] == 'ptc': login_ptc(account) elif account['type'] == 'google': login_google(account) new_session(account) else: lprint('[{}] Error: Login type should be either ptc or google.'.format(account['num'])) sys.exit() cursor_accs = db_accs.cursor() while True: try: cursor_accs.execute("INSERT OR REPLACE INTO accounts VALUES(?,?,?,?,?,?,?)", [account['user'], account['access_token'], account['access_expire_timestamp'], account['api_url'], 0, '0', '0']) db_accs.commit() return except sqlite3.OperationalError as e: lprint('[-] Sqlite operational error: {}, account: {} Retrying...'.format(e, account['user'])) except sqlite3.InterfaceError as e: lprint('[-] Sqlite interface error: {}, account: {} Retrying...'.format(e, account['user']))
def update_data(): timenow = int(round(time.time(),0)) cursor_data = db_data.cursor() for l in range(0,len(data_buffer)): [pokeid, spawnid, latitude, longitude, expiretime, addinfo] = data_buffer.pop() db_repeat = True while db_repeat: try: cursor_data.execute("INSERT OR REPLACE INTO spawns VALUES(?,?,?,?,?,?,?,?)", [spawnid, round(latitude, 5), round(longitude, 5), addinfo, pokeid, expiretime, timenow, wID]) db_repeat = False except sqlite3.OperationalError as e: lprint('[-] Sqlite operational error: {} Retrying...'.format(e)) while True: try: db_data.commit() return except sqlite3.OperationalError as e: lprint('[-] Sqlite operational error: {} Retrying...'.format(e))
def _get_cache(ttl, cache_path): ''' If url contains valid cache, returns it, else returns empty list. ''' # Check if we have a valid cached version. try: cached_time = os.path.getmtime(cache_path) except OSError: return [] if current_time() - cached_time < ttl: log.debug('%s is less than ttl', cache_path) try: with open(cache_path) as json_file: loaded_json = json.load(json_file) return loaded_json except IOError: return [] except ValueError: log.error('%s was not json formatted', cache_path) return [] else: log.debug('%s was older than ttl', cache_path) return []
def run_demo(updated_services): # build both graphs from the edge descriptions graph_a = Graph() build_dummy_graph(GRAPH_A, graph_a) graph_b = Graph() build_dummy_graph(GRAPH_B, graph_b) # print them as .dot files for reference graph_a.print_graph("graph-a", True, True) graph_b.print_graph("graph-b", False, True) # generate the differences by considering an update on service 'a' start_time = time.time() diff_graph = graph_a.get_diff(graph_b, updated_services) print("--- graph differences calculated in %s seconds --- " % (time.time() - start_time)) diff_graph.print_graph("graph-diff", False, True)
def login(self, response): response_text = response.text match_obj = re.match('.*name="_xsrf" value="(.*?)"', response_text, re.DOTALL) xsrf = '' if match_obj: xsrf = (match_obj.group(1)) if xsrf: post_url = "https://www.zhihu.com/login/phone_num" post_data = { "_xsrf": xsrf, "phone_num": "18487255487", "password": "ty158917", "captcha": "" } import time t = str(int(time.time() * 1000)) captcha_url = "https://www.zhihu.com/captcha.gif?r={0}&type=login".format(t) yield scrapy.Request(captcha_url, headers=self.headers, meta={"post_data":post_data}, callback=self.login_after_captcha)
def pycos_proc(n, task=None): s = random.uniform(0.5, 3) print('%f: process %d sleeping for %f seconds' % (time.time(), n, s)) yield task.sleep(s) print('%f: process %d terminating' % (time.time(), n)) # create 10 clients
def _run_request(self, request, where, cpu, gen, *args, **kwargs): """Internal use only. """ if isinstance(gen, str): name = gen else: name = gen.func_name if name in self._xfer_funcs: code = None else: # if not inspect.isgeneratorfunction(gen): # logger.warning('"%s" is not a valid generator function', name) # raise StopIteration([]) code = inspect.getsource(gen).lstrip() def _run_req(task=None): msg = {'req': 'job', 'auth': self._auth, 'job': _DispycosJob_(request, task, name, where, cpu, code, args, kwargs)} if (yield self.scheduler.deliver(msg, timeout=MsgTimeout)) == 1: reply = yield task.receive() if isinstance(reply, Task): if self.status_task: msg = DispycosTaskInfo(reply, args, kwargs, time.time()) self.status_task.send(DispycosStatus(Scheduler.TaskCreated, msg)) if not request.endswith('async'): reply = yield task.receive() else: reply = None raise StopIteration(reply) yield Task(_run_req).finish()
def __init__(self, name, addr): self.name = name self.addr = addr self.status = None self.servers = {} self.update_time = time.time() self.tasks_submitted = 0 self.tasks_done = 0 self.avail_info = None
def _add_timeout(self, fd): if fd._timeout: self._lock.acquire() fd._timeout_id = _time() + fd._timeout + 0.0001 i = bisect_left(self._timeouts, (fd._timeout_id, fd)) self._timeouts.insert(i, (fd._timeout_id, fd)) if self._polling: self.interrupt() self._lock.release() else: fd._timeout_id = None
def _add_timeout(self, fd): if fd._timeout: fd._timeout_id = _time() + fd._timeout + 0.0001 i = bisect_left(self._timeouts, (fd._timeout_id, fd)) self._timeouts.insert(i, (fd._timeout_id, fd)) else: fd._timeout_id = None
def wait(self, timeout=None): """Must be used with 'yield' as 'yield cv.wait()'. """ task = Pycos.cur_task(self._scheduler) if self._owner != task: raise RuntimeError('"%s"/%s: invalid lock release - owned by "%s"/%s' % (task._name, task._id, self._owner._name, self._owner._id)) assert self._depth > 0 depth = self._depth self._depth = 0 self._owner = None if self._waitlist: wake = self._waitlist.pop(0) wake._proceed_(True) self._notifylist.append(task) start = _time() if (yield task._await_(timeout)) is None: try: self._notifylist.remove(task) except ValueError: pass raise StopIteration(False) while self._owner is not None: self._waitlist.insert(0, task) if timeout is not None: timeout -= (_time() - start) if timeout <= 0: raise StopIteration(False) start = _time() if (yield task._await_(timeout)) is None: try: self._waitlist.remove(task) except ValueError: pass raise StopIteration(False) assert self._depth == 0 self._owner = task self._depth = depth raise StopIteration(True)
def receive(self, category=None, timeout=None, alarm_value=None): """Similar to 'receive' of Task, except it retrieves (waiting, if necessary) messages in given 'category'. """ # assert Pycos.cur_task() == self._task c = self._categories.get(category, None) if c: msg = c.popleft() raise StopIteration(msg) if timeout: start = _time() while 1: msg = yield self._task.receive(timeout=timeout, alarm_value=alarm_value) if msg == alarm_value: raise StopIteration(msg) for categorize in self._categorize: c = categorize(msg) if c == category: raise StopIteration(msg) if c is not None: bucket = self._categories.get(c, None) if not bucket: bucket = self._categories[c] = collections.deque() bucket.append(msg) break else: self._categories[None].append(msg) if timeout: now = _time() timeout -= now - start start = now
def _suspend(self, task, timeout, alarm_value, state): """Internal use only. See sleep/suspend in Task. """ self._lock.acquire() if self.__cur_task != task: self._lock.release() logger.warning('invalid "suspend" - "%s" != "%s"', task, self.__cur_task) return -1 tid = task._id if state == Pycos._AwaitMsg_ and task._msgs: s, update = task._msgs[0] if s == state: task._msgs.popleft() self._lock.release() return update if timeout is None: task._timeout = None else: if not isinstance(timeout, (float, int)): logger.warning('invalid timeout %s', timeout) self._lock.release() return -1 if timeout <= 0: self._lock.release() return alarm_value else: task._timeout = _time() + timeout + 0.0001 heappush(self._timeouts, (task._timeout, tid, alarm_value)) self._scheduled.discard(tid) self._suspended.add(tid) task._state = state self._lock.release() return 0
def _run_request(self, request, where, cpu, gen, *args, **kwargs): """Internal use only. """ if isinstance(gen, str): name = gen else: name = gen.__name__ if name in self._xfer_funcs: code = None else: # if not inspect.isgeneratorfunction(gen): # logger.warning('"%s" is not a valid generator function', name) # raise StopIteration([]) code = inspect.getsource(gen).lstrip() def _run_req(task=None): msg = {'req': 'job', 'auth': self._auth, 'job': _DispycosJob_(request, task, name, where, cpu, code, args, kwargs)} if (yield self.scheduler.deliver(msg, timeout=MsgTimeout)) == 1: reply = yield task.receive() if isinstance(reply, Task): if self.status_task: msg = DispycosTaskInfo(reply, args, kwargs, time.time()) self.status_task.send(DispycosStatus(Scheduler.TaskCreated, msg)) if not request.endswith('async'): reply = yield task.receive() else: reply = None raise StopIteration(reply) yield Task(_run_req).finish()