我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用time.sleep()。
def process_request(self, request, spider): if spider.name == "jobbole": self.browser.get(request.url) import time time.sleep(3) print ("??:{0}".format(request.url)) return HtmlResponse(url=self.browser.current_url, body=self.browser.page_source, encoding="utf-8", request=request) #linux? # from pyvirtualdisplay import Display # display = Display(visible=0, size=(800, 600)) # display.start() # # browser = webdriver.Chrome() # browser.get()
def test_worker_alarm(manager): called = [] def handler(signal, frame): called.append(True) signal.signal(signal.SIGALRM, handler) @manager.task def foo(sleep): time.sleep(sleep) w = Worker(manager, task_timeout=1) w.process_one(make_task('foo', args=(0.1,))) assert not called w.process_one(make_task('foo', args=(1.1,))) assert called
def ShowOnePage(self, now_page_items, page): for idx, item in enumerate(now_page_items): print "\ndownload " + item[1] self.saveFile(item[0], page, idx) #print '========one page done.=================' print '========Please hit the Enter.=================' if self.unload_page_num == page: print '========all pages done. clean the repeated files.==========' self.CleanRepeatImage() #at last, deal with the repeated images. print 'Nothing left. Now close this application.' # self.enable = False #let the main thread know it's time to quit os._exit(0) #can teminal main thread. # ??????? time.sleep(1) print 'take a snap for 1s.' # myInput = raw_input() # if myInput == ":q": # self.CleanRepeatImage() #if break manually, must clean work dir. # self.enable = False # deal with the repeated image
def Start(self): self.enable = True page = self.page print u'????????......' # ???????????????? thread.start_new_thread(self.LoadPage, ()) time.sleep(2) #wait the sub thread to be done. # ----------- ???????? ----------- while self.enable: # ??self?page??????? if len(self.pages) > 0: now_page_items = self.pages[0] # del now page items del self.pages[0] print '---main thred --', page self.ShowOnePage(now_page_items, page) page += 1 print self.enable # ----------- ?????? -----------
def blinking(): if keepalive: threading.Timer(10.0, blinking).start() # Only blink when we are actually building if building or error: # If error, blink red. if error: color = "red" else: color = "yellow" alloff() pin = getcode(color) GPIO.output(pin, True) time.sleep(3) GPIO.output(pin, False) # Check every 10s if we are building, if not or done get latest status
def compute_task(task=None): import time client = yield task.receive() # first message is client task result = 0 while True: n = yield task.receive() if n is None: # end of requests client.send(result) break # long-running computation (without 'yield') is simulated with # 'time.sleep'; during this time client may send messages to this task # (which will be received and put in this task's message queue) or this # task can send messages to client time.sleep(n) result += n # client (local) task runs computations
def __discover_node(self, msg, task=None): for _ in range(10): node_task = yield Task.locate('dispycos_node', location=msg.location, timeout=MsgTimeout) if not isinstance(node_task, Task): yield task.sleep(0.1) continue self._disabled_nodes.pop(msg.location.addr, None) node = self._nodes.pop(msg.location.addr, None) if node: logger.warning('Rediscovered dispycosnode at %s; discarding previous incarnation!', msg.location.addr) self._disabled_nodes.pop(node.addr, None) if self._cur_computation: status_task = self._cur_computation.status_task else: status_task = None if status_task: for server in node.servers.itervalues(): for rtask, job in server.rtasks.itervalues(): status = pycos.MonitorException(rtask, (Scheduler.TaskAbandoned, None)) status_task.send(status) status_task.send(DispycosStatus(Scheduler.ServerAbandoned, server.task.location)) info = DispycosNodeInfo(node.name, node.addr, node.cpus, node.platform, node.avail_info) status_task.send(DispycosStatus(Scheduler.NodeAbandoned, info)) node = self._disabled_nodes.get(msg.location.addr, None) if not node: node = Scheduler._Node(msg.name, msg.location.addr) self._disabled_nodes[msg.location.addr] = node node.task = node_task yield self.__get_node_info(node, task=task) raise StopIteration
def restart_pg(): ''' Stops and Starts PLUMgrid service after flushing iptables. ''' stop_pg() service_start('plumgrid') time.sleep(3) if not service_running('plumgrid'): if service_running('libvirt-bin'): raise ValueError("plumgrid service couldn't be started") else: if service_start('libvirt-bin'): time.sleep(8) if not service_running('plumgrid') \ and not service_start('plumgrid'): raise ValueError("plumgrid service couldn't be started") else: raise ValueError("libvirt-bin service couldn't be started") status_set('active', 'Unit is ready')
def make_filesystem(blk_device, fstype='ext4', timeout=10): """Make a new filesystem on the specified block device.""" count = 0 e_noent = os.errno.ENOENT while not os.path.exists(blk_device): if count >= timeout: log('Gave up waiting on block device %s' % blk_device, level=ERROR) raise IOError(e_noent, os.strerror(e_noent), blk_device) log('Waiting for block device %s to appear' % blk_device, level=DEBUG) count += 1 time.sleep(1) else: log('Formatting block device %s as filesystem %s.' % (blk_device, fstype), level=INFO) check_call(['mkfs', '-t', fstype, blk_device])
def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" self.log.debug('Creating instance ' '({}|{}|{})'.format(instance_name, image_name, flavor)) image = nova.images.find(name=image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, flavor=flavor) count = 1 status = instance.status while status != 'ACTIVE' and count < 60: time.sleep(3) instance = nova.servers.get(instance.id) status = instance.status self.log.debug('instance status: {}'.format(status)) count += 1 if status != 'ACTIVE': self.log.error('instance creation timed out') return None return instance
def delete_resource(self, resource, resource_id, msg="resource", max_wait=120): """Delete one openstack resource, such as one instance, keypair, image, volume, stack, etc., and confirm deletion within max wait time. :param resource: pointer to os resource type, ex:glance_client.images :param resource_id: unique name or id for the openstack resource :param msg: text to identify purpose in logging :param max_wait: maximum wait time in seconds :returns: True if successful, otherwise False """ self.log.debug('Deleting OpenStack resource ' '{} ({})'.format(resource_id, msg)) num_before = len(list(resource.list())) resource.delete(resource_id) tries = 0 num_after = len(list(resource.list())) while num_after != (num_before - 1) and tries < (max_wait / 4): self.log.debug('{} delete check: ' '{} [{}:{}] {}'.format(msg, tries, num_before, num_after, resource_id)) time.sleep(4) num_after = len(list(resource.list())) tries += 1 self.log.debug('{}: expected, actual count = {}, ' '{}'.format(msg, num_before - 1, num_after)) if num_after == (num_before - 1): return True else: self.log.error('{} delete timed out'.format(msg)) return False
def service_restarted(self, sentry_unit, service, filename, pgrep_full=None, sleep_time=20): """Check if service was restarted. Compare a service's start time vs a file's last modification time (such as a config file for that service) to determine if the service has been restarted. """ # /!\ DEPRECATION WARNING (beisner): # This method is prone to races in that no before-time is known. # Use validate_service_config_changed instead. # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now # used instead of pgrep. pgrep_full is still passed through to ensure # deprecation WARNS. lp1474030 self.log.warn('DEPRECATION WARNING: use ' 'validate_service_config_changed instead of ' 'service_restarted due to known races.') time.sleep(sleep_time) if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= self._get_file_mtime(sentry_unit, filename)): return True else: return False
def get_iam_credential_report(self): report = None while report == None: try: report = self.iam_client.get_credential_report() except botocore.exceptions.ClientError as e: if 'ReportNotPresent' in e.message: self.iam_client.generate_credential_report() else: raise e time.sleep(5) document = StringIO.StringIO(report['Content']) reader = csv.DictReader(document) report_rows = [] for row in reader: report_rows.append(row) return report_rows
def serial_clicks_by_control_file(tag): log('launch click process of ' + tag) p_open(Res.adb_start_launcher(tag)) sleep(Res.pkg_init_delay) # init taking time lines = get_usable_lines_from_file(Res.splash_file) for i in range(len(lines)): items = lines[i].split(Res.split_mark) if i == Res.action_cm_launcher_index: send_key_home(tag) sleep(1) # invoke default home takes time p_open(Res.adb_tap_with_tag(tag, items[1], items[2])) if len(items) > Res.delay_index_one_point: sleep(items[Res.delay_index_one_point]) sleep(3 if i < Res.action_cm_launcher_index else Res.default_delay_in_click) if i >= Res.action_always_index: send_key_home(tag)
def start(self): while True: try: self.connect() while True: #time.sleep(0.01) # attempt to reduce number of OSError: [Errno 104] ECONNRESET self.client.check_msg() #time.sleep(0.01) # attempt to reduce number of OSError: [Errno 104] ECONNRESET self.push() time.sleep(0.01) except OSError as e: Util.log(self,"failed to connect, retrying....", e) time.sleep(self.config["wait_to_reconnect"]) self.client.disconnect()
def test_transform_service_heartbeat(self, coordinator): # mock coordinator fake_kazoo_driver = MagicMock(name="MagicKazooDriver", spec=KazooDriver) coordinator.return_value = fake_kazoo_driver # option1 serv_thread = transform_service.TransformService() serv_thread.daemon = True serv_thread.start() time.sleep(2) # option2 # mocks dont seem to work when spawning a service # pid = _spawn_transform_service() # time.sleep() # os.kill(pid, signal.SIGNAL_SIGTERM) fake_kazoo_driver.heartbeat.assert_called_with()
def start(self): self.client.start() #~ if not self.disable_auto_login: #~ while self.client.status == 'offline': #~ time.sleep(1) #~ logger.info('Client: %s'%self.client.status) if self.server_id: already_added = False for f in self.client.get_friend_list(): if self.client.friend_get_public_key(f) in self.server_id: already_added = True logger.info('Server already in added') break if not already_added: self.client.friend_add_with_request(self.server_id,self.password) logger.info('Started Friend request to Server') else: logger.info('No Server ID given')
def wait_for_publish(seconds): """ Wait for all publishing tasks to terminate. Search string is: label = Actions::Katello::ContentView::Publish and state = running """ count = 0 print "Waiting for publish tasks to finish..." # Make sure that publish tasks gets the chance to appear before looking for them time.sleep(2) while get_json(URL + publish_tasks)["total"] != 0: time.sleep(seconds) count += 1 print "Finished waiting after " + str(seconds * count) + " seconds"
def _run_scan(self, scanner): scanner.action( action="scans/" + str(scanner.scan_id) + "/launch", method="POST") scan_uuid = scanner.res["scan_uuid"] running = True counter = 0 while running: scanner.action( action="scans?folder_id=" + str(scanner.tag_id), method="GET") for scan in scanner.res["scans"]: if (scan["uuid"] == scan_uuid and (scan['status'] == "running" or scan['status'] == "pending")): time.sleep(2) counter += 2 if (scan["uuid"] == scan_uuid and scan['status'] != "running" and scan['status'] != "pending"): running = False
def WaitForFileServerToStart(port): """ Wait for the Flask file server to start up. Test it by trying the PyUpdater update URL, e.g. http://127.0.0.1:12345. If we receive a ConnectionError, we continue waiting, but if we receive an HTTP response code (404), we return True. For a frozen app, e.g. a Mac .app bundle, the location of the updates must be supplied by an environment variable, whereas when running from the source repo, the location of the updates is likely to be ./pyu-data/deploy/ """ url = 'http://%s:%s/fileserver-is-ready' % (LOCALHOST, port) attempts = 0 while True: try: attempts += 1 requests.get(url, timeout=1) return True except requests.exceptions.ConnectionError: time.sleep(0.25) if attempts > 10: logger.warning("WaitForFileServerToStart: timeout") return
def login(): # Try to login or sleep/wait until logged in, or exit if user/pass wrong NotLoggedIn = True while NotLoggedIn: try: reddit = praw.Reddit( user_agent=credsUserAgent, client_id=credsClientID, client_secret=credsClientSecret, username=credsUserName, password=credsPassword) print_and_log("Logged in") NotLoggedIn = False except praw.errors.InvalidUserPass: print_and_log("Wrong username or password", error=True) exit(1) except Exception as err: print_and_log(str(err), error=True) time.sleep(5) return reddit
def check_fd(self): '''??fd?? ??read ????????????????? ????????????? ''' while True: for fd in self.conn_state.keys(): sock_state = self.conn_state[fd] # fd?read???? read_time ??? # ???fd?epoll????????????????? if sock_state.state == "read" and sock_state.read_stime \ and (time.time() - sock_state.read_stime) >= sock_state.read_itime: # ??????????fd sock_state.state = "closing" self.state_machine(fd) # ?????? time.sleep(60) #}}} #{{{fork_processes
def _test(stdscr): import time colors_init() label_width = max([len(k) for k in CURSES_COLORPAIRS.keys()]) cols = 4 for idx, k in enumerate(CURSES_COLORPAIRS.keys()): label = "{{:<{}}}".format(label_width).format(k) x = (idx % cols) * (label_width + 1) y = idx // cols pair = curses.color_pair(CURSES_COLORPAIRS[k]) stdscr.addstr(y, x, label, pair) time.sleep(0.1) stdscr.refresh() stdscr.getch()
def forwarder(tasks, interval, batch_size, source, dest): '''Forward items from one storage to another.''' from .utils import RunFlag, load_manager, redis_client from .store import QueueStore log = logging.getLogger('dsq.forwarder') if not tasks and not source: print('--tasks or --source must be provided') sys.exit(1) s = QueueStore(redis_client(source)) if source else load_manager(tasks).queue d = QueueStore(redis_client(dest)) run = RunFlag() while run: batch = s.take_many(batch_size) if batch['schedule'] or batch['queues']: try: d.put_many(batch) except Exception: s.put_many(batch) log.exception('Forward error') raise else: time.sleep(interval)
def acquire(self, waitflag=None, timeout=-1): """Dummy implementation of acquire(). For blocking calls, self.locked_status is automatically set to True and returned appropriately based on value of ``waitflag``. If it is non-blocking, then the value is actually checked and not set if it is already acquired. This is all done so that threading.Condition's assert statements aren't triggered and throw a little fit. """ if waitflag is None or waitflag: self.locked_status = True return True else: if not self.locked_status: self.locked_status = True return True else: if timeout > 0: import time time.sleep(timeout) return False
def _talk_to_chief(self, path, data=None, default=None): tries = 0 while tries < FLAGS.coord_retries: tries += 1 try: url = 'http://%s:%d%s' % (FLAGS.coord_host, FLAGS.coord_port, path) log_traffic('Contacting coordinator - url: %s, tries: %d ...' % (url, tries-1)) res = urllib.request.urlopen(urllib.request.Request(url, data, { 'content-type': 'text/plain' })) str = res.read() status = res.getcode() log_traffic('Coordinator responded - url: %s, status: %s' % (url, status)) if status == 200: return str log_traffic('Problem reaching coordinator - url: %s, status: %d' % (url, status)) except Exception as ex: log_traffic('Problem reaching coordinator - url: %s, exception: %r' % (url, ex)) pass time.sleep(10) return default
def set_pwm_freq(self, freq_hz): """Set the PWM frequency to the provided value in hertz.""" prescaleval = 25000000.0 # 25MHz prescaleval /= 4096.0 # 12-bit prescaleval /= float(freq_hz) prescaleval -= 1.0 logger.debug('Setting PWM frequency to {0} Hz'.format(freq_hz)) logger.debug('Estimated pre-scale: {0}'.format(prescaleval)) prescale = int(math.floor(prescaleval + 0.5)) logger.debug('Final pre-scale: {0}'.format(prescale)) oldmode = self._device.readU8(MODE1); newmode = (oldmode & 0x7F) | 0x10 # sleep self._device.write8(MODE1, newmode) # go to sleep self._device.write8(PRESCALE, prescale) self._device.write8(MODE1, oldmode) time.sleep(0.005) self._device.write8(MODE1, oldmode | 0x80)
def test_uint_multi_port(self, x_series_device, seed): # Reset the pseudorandom number generator with seed. random.seed(seed) do_ports = random.sample( [d for d in x_series_device.do_ports if d.do_port_width <= 16], 2) total_port_width = sum([d.do_port_width for d in do_ports]) with nidaqmx.Task() as task: task.do_channels.add_do_chan( flatten_channel_string([d.name for d in do_ports]), line_grouping=LineGrouping.CHAN_FOR_ALL_LINES) # Generate random values to test. values_to_test = [int(random.getrandbits(total_port_width)) for _ in range(10)] values_read = [] for value_to_test in values_to_test: task.write(value_to_test) time.sleep(0.001) values_read.append(task.read()) assert values_read == values_to_test
def test_one_sample_one_line(self, x_series_device, seed): # Reset the pseudorandom number generator with seed. random.seed(seed) do_line = random.choice(x_series_device.do_lines).name with nidaqmx.Task() as task: task.do_channels.add_do_chan( do_line, line_grouping=LineGrouping.CHAN_PER_LINE) writer = DigitalSingleChannelWriter(task.out_stream) reader = DigitalSingleChannelReader(task.in_stream) # Generate random values to test. values_to_test = [bool(random.getrandbits(1)) for _ in range(10)] values_read = [] for value_to_test in values_to_test: writer.write_one_sample_one_line(value_to_test) time.sleep(0.001) values_read.append(reader.read_one_sample_one_line()) numpy.testing.assert_array_equal(values_read, values_to_test)
def test_one_sample_port_byte(self, x_series_device, seed): # Reset the pseudorandom number generator with seed. random.seed(seed) do_port = random.choice( [d for d in x_series_device.do_ports if d.do_port_width <= 8]) with nidaqmx.Task() as task: task.do_channels.add_do_chan( do_port.name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES) # Generate random values to test. values_to_test = [int(random.getrandbits(do_port.do_port_width)) for _ in range(10)] writer = DigitalSingleChannelWriter(task.out_stream) reader = DigitalSingleChannelReader(task.in_stream) values_read = [] for value_to_test in values_to_test: writer.write_one_sample_port_byte(value_to_test) time.sleep(0.001) values_read.append(reader.read_one_sample_port_byte()) numpy.testing.assert_array_equal(values_read, values_to_test)
def test_one_sample_port_uint16(self, x_series_device, seed): # Reset the pseudorandom number generator with seed. random.seed(seed) do_port = random.choice( [do for do in x_series_device.do_ports if do.do_port_width <= 16]) with nidaqmx.Task() as task: task.do_channels.add_do_chan( do_port.name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES) # Generate random values to test. values_to_test = [int(random.getrandbits(do_port.do_port_width)) for _ in range(10)] writer = DigitalSingleChannelWriter(task.out_stream) reader = DigitalSingleChannelReader(task.in_stream) values_read = [] for value_to_test in values_to_test: writer.write_one_sample_port_uint16(value_to_test) time.sleep(0.001) values_read.append(reader.read_one_sample_port_uint16()) numpy.testing.assert_array_equal(values_read, values_to_test)
def Start(self): self.enable = True page = self.page print u'????????......' # ???????????????? thread.start_new_thread(self.LoadPage, ()) time.sleep(2) #wait the sub thread to be done. # ----------- ???????? ----------- while self.enable: # ??self?page??????? if len(self.pages) > 0: now_page_items = self.pages[0] # del now page items del self.pages[0] self.ShowOnePage(now_page_items, page) page += 1 print self.enable # ----------- ?????? -----------
def do_full_login(account): lock_network.acquire() time.sleep(locktime) lock_network.release() if account['type'] == 'ptc': login_ptc(account) elif account['type'] == 'google': login_google(account) new_session(account) else: lprint('[{}] Error: Login type should be either ptc or google.'.format(account['num'])) sys.exit() cursor_accs = db_accs.cursor() while True: try: cursor_accs.execute("INSERT OR REPLACE INTO accounts VALUES(?,?,?,?,?,?,?)", [account['user'], account['access_token'], account['access_expire_timestamp'], account['api_url'], 0, '0', '0']) db_accs.commit() return except sqlite3.OperationalError as e: lprint('[-] Sqlite operational error: {}, account: {} Retrying...'.format(e, account['user'])) except sqlite3.InterfaceError as e: lprint('[-] Sqlite interface error: {}, account: {} Retrying...'.format(e, account['user']))
def _loop_messages(self): '''Slack message loop.''' if not self._slack.rtm_connect(): raise Exception("Could not connect to Slack RTM API.\nBot token might be invalid.") while self.is_running: try: events = self._slack.rtm_read() except websocket._exceptions.WebSocketConnectionClosedException: self._slack.rtm_connect() continue bot_events = self._get_bot_events(events) if bot_events: for event in bot_events: self._send_typing(event['channel']) self._process_event(event) time.sleep(self._reaction_interval)
def query_forever(cb, interval, udp): while True: try: sensors = cb.sensors() for sensor in sensors: summary = {} summary['computer_name'] = sensor['computer_name'].strip() summary['id'] = sensor['id'] summary['computer_sid'] = sensor['computer_sid'].strip() summary['num_storefiles_bytes'] = sensor['num_storefiles_bytes'] summary['num_eventlog_bytes'] = sensor['num_eventlog_bytes'] output(json.dumps(summary), udp) except Exception, e: print e pass time.sleep(float(interval)) return
def start(self): self.deviceHandler.start() if self.protocol == "udp": self.loadState() self.logger.debug("udpHeartbeatSeconds = {0}".format(self.udpHeartbeatSeconds)) self.logger.debug("udpDataPacketInterval = {0}".format(self.udpDataPacketInterval)) self.udpServer = SocketServer.UDPServer(('0.0.0.0', 0), IotUDPHandler) self.udpServer.service = self self.udpServer.role = IotUDPHandler.CLIENT self.logger.info("starting UDP client at {0}:{1} connecting to {2}, state at {3}".format(self.udpServer.server_address[0], self.udpServer.server_address[1], self.serverAddr, self.stateFile)) timer = threading.Timer(0.5, self.repeat) timer.daemon = True timer.start() self.udpServer.serve_forever() elif self.protocol == "ssl": while True: self.logger.info("Connecting by SSL to server at {0}".format(self.serverAddr)) try: sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) self.logger.debug("using caCertFile={0}, deviceCertFile={1}, deviceKeyFile={2}".format(self.caCertFile, self.deviceCertFile, self.deviceKeyFile)) sslSocket = ssl.wrap_socket(sock, ca_certs=self.caCertFile, cert_reqs=ssl.CERT_REQUIRED, certfile=self.deviceCertFile, keyfile=self.deviceKeyFile, ssl_version=ssl.PROTOCOL_TLSv1) sslSocket.connect((self.serverAddr.split(':')[0], int(self.serverAddr.split(':')[1]))) servercert = sslSocket.getpeercert() subject = dict(x[0] for x in servercert['subject']) self.logger.info("Connected to server with valid certificate, CN={0}".format(subject['commonName'])) self.sslSocket = sslSocket sslThread = threading.Thread(target = self.sslListen, args = (self.sslSocket,)) sslThread.daemon = True sslThread.start() while True: payload = self.deviceHandler.getMessagePayload() self.logger.debug("Sending payload to {0} by SSL: {1}".format(self.serverAddr, payload)) iotcommon.sendMessage(self.sslSocket, payload) time.sleep(self.sslIntervalSeconds) except Exception as e: self.logger.exception(e) time.sleep(10)
def displaySensor1(self,number, description, trend): self.canvas.itemconfigure(self.txtSensor1, text="{0:.1f}".format(number)+u'\u2103') self.sensor1ts = datetime.datetime.now() color = self.mapColor(number) if description is not None: self.canvas.itemconfigure(self.txtSensor1Desc, text=description) self.canvas.itemconfigure(self.txtSensor1, fill=color) self.canvas.itemconfigure(self.txtSensor1BigIcon, fill=color) self.canvas.itemconfigure(self.txtSensor1SmallIcon, text=u'\u2022') def hide(): time.sleep(0.5) self.canvas.itemconfigure(self.txtSensor1SmallIcon, text="") threading.Thread(target = hide).start() if trend == -1: self.canvas.itemconfigure(self.txtSensor1BigIcon, text=u'\u2198') elif trend == 1: self.canvas.itemconfigure(self.txtSensor1BigIcon, text=u'\u2197') else: self.canvas.itemconfigure(self.txtSensor1BigIcon, text="")
def repeat(self): while True: try: self.displayTime() if (datetime.datetime.now() - self.sensor1ts).total_seconds() > self.expirationSeconds: self.canvas.itemconfigure(self.txtSensor1, text="") self.canvas.itemconfigure(self.txtSensor1BigIcon, text="") if (datetime.datetime.now() - self.sensor2ts).total_seconds() > self.expirationSeconds: self.canvas.itemconfigure(self.txtSensor2, text="") self.canvas.itemconfigure(self.txtSensor2BigIcon, text="") #t = random.random()*60-20 #self.displaySensor1(t, "test") except Exception as e: self.logger.exception(e) except: pass time.sleep(1)
def client_proc(computation, njobs, task=None): # schedule computation with the scheduler; scheduler accepts one computation # at a time, so if scheduler is shared, the computation is queued until it # is done with already scheduled computations if (yield computation.schedule()): raise Exception('Could not schedule computation') # send 5 requests to remote process (compute_task) def send_requests(rtask, task=None): # first send this local task (to whom rtask sends result) rtask.send(task) for i in range(5): # even if recipient doesn't use "yield" (such as executing long-run # computation, or thread-blocking function such as 'time.sleep' as # in this case), the message is accepted by another scheduler # (netpycos.Pycos) at the receiver and put in recipient's message # queue rtask.send(random.uniform(10, 20)) # assume delay in input availability yield task.sleep(random.uniform(2, 5)) # end of input is indicated with None rtask.send(None) result = yield task.receive() # get result print(' %s computed result: %.4f' % (rtask.location, result)) for i in range(njobs): rtask = yield computation.run(compute_task) if isinstance(rtask, pycos.Task): print(' job %d processed by %s' % (i, rtask.location)) pycos.Task(send_requests, rtask) yield computation.close()
def _suspend(self, task, timeout, alarm_value, state): """Internal use only. See sleep/suspend in Task. """ self._lock.acquire() if self.__cur_task != task: self._lock.release() logger.warning('invalid "suspend" - "%s" != "%s"', task, self.__cur_task) return -1 tid = task._id if state == Pycos._AwaitMsg_ and task._msgs: s, update = task._msgs[0] if s == state: task._msgs.popleft() self._lock.release() return update if timeout is None: task._timeout = None else: if not isinstance(timeout, (float, int)): logger.warning('invalid timeout %s', timeout) self._lock.release() return -1 if timeout <= 0: self._lock.release() return alarm_value else: task._timeout = _time() + timeout + 0.0001 heappush(self._timeouts, (task._timeout, tid, alarm_value)) self._scheduled.discard(tid) self._suspended.add(tid) task._state = state self._lock.release() return 0
def __discover_node(self, msg, task=None): for _ in range(10): node_task = yield Task.locate('dispycos_node', location=msg.location, timeout=MsgTimeout) if not isinstance(node_task, Task): yield task.sleep(0.1) continue self._disabled_nodes.pop(msg.location.addr, None) node = self._nodes.pop(msg.location.addr, None) if node: logger.warning('Rediscovered dispycosnode at %s; discarding previous incarnation!', msg.location.addr) self._disabled_nodes.pop(node.addr, None) if self._cur_computation: status_task = self._cur_computation.status_task else: status_task = None if status_task: for server in node.servers.values(): for rtask, job in server.rtasks.values(): status = pycos.MonitorException(rtask, (Scheduler.TaskAbandoned, None)) status_task.send(status) status_task.send(DispycosStatus(Scheduler.ServerAbandoned, server.task.location)) info = DispycosNodeInfo(node.name, node.addr, node.cpus, node.platform, node.avail_info) status_task.send(DispycosStatus(Scheduler.NodeAbandoned, info)) node = self._disabled_nodes.get(msg.location.addr, None) if not node: node = Scheduler._Node(msg.name, msg.location.addr) self._disabled_nodes[msg.location.addr] = node node.task = node_task yield self.__get_node_info(node, task=task) raise StopIteration
def stop_pg(): ''' Stops PLUMgrid service. ''' service_stop('plumgrid') time.sleep(2)
def remove_iovisor(): ''' Removes iovisor kernel module. ''' _exec_cmd(cmd=['rmmod', 'iovisor'], error_msg='Error Removing IOVisor Kernel Module') time.sleep(1)