我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用sched.scheduler()。
def detect_inactive_hosts(scan_hosts): """ Scans the network to find scan_hosts are live or dead scan_hosts can be like 10.0.2.2-4 to cover range. See Scapy docs for specifying targets. """ global scheduler scheduler.enter(RUN_FREQUENCY, 1, detect_inactive_hosts, (scan_hosts, )) inactive_hosts = [] try: ans, unans = sr(IP(dst=scan_hosts)/ICMP(), retry=0, timeout=1) ans.summary(lambda r : r.sprintf("%IP.src% is alive")) for inactive in unans: print ("%s is inactive" %inactive.dst) inactive_hosts.append(inactive.dst) print ("Total %d hosts are inactive" %(len(inactive_hosts))) except KeyboardInterrupt: exit(0)
def upload(pv, inverters, scheduler, timestamp, boundary): """Retrieves and uploads inverter data, and schedules the next upload.""" values = [inverter.request_values() for inverter in inverters] # Filter systems with normal operating mode values = [val for val in values if val['operating_mode'] == 'normal'] if values: data = { 'd': time.strftime('%Y%m%d'), 't': time.strftime('%H:%M'), 'v1': round(sum(value['energy_today'] for value in values) * 1000), 'v2': sum(value['output_power'] for value in values), 'v5': sum(value['internal_temp'] for value in values) / len(values), 'v6': sum(value['grid_voltage'] for value in values) / len(values) } logger.info('Uploading: %s', data) pv.add_status(data) else: logger.info('Not uploading, no inverter has operating mode normal') sched_args = (pv, inverters, scheduler, timestamp + boundary, boundary) scheduler.enterabs(timestamp + boundary, 1, upload, sched_args)
def run_update(args): logging.info('Getting updates...') db = init_db(args.db) cur = db.cursor() cfg = next(yaml.safe_load_all(open(args.chores, 'r', encoding='utf-8'))) chores_avail = [] tasks = sched.scheduler(time.time) for name, config in cfg.items(): result = cur.execute('SELECT updated, last_result FROM chore_status WHERE name = ?', (name,)).fetchone() if result: result = chores.ChoreStatus(*result) chorename = config.pop('chore') chore = chores.CHORE_HANDLERS[chorename](name, status=result, **config) chores_avail.append((chorename, chore)) try: while 1: for chorename, chore in chores_avail: tasks.enterabs( chore.status.updated + args.keep * 60, chores.CHORE_PRIO[chorename], wrap_fetch, (chore, cur) ) tasks.run() db.commit() if args.keep: logging.info('A round of updating completed.') else: break except KeyboardInterrupt: logging.warning('Interrupted.') finally: db.commit()
def __init__(self, collection=None, **kwargs): self.logger = logging.getLogger('executor') self.logger.setLevel(logging.INFO) self.sche = sched.scheduler(time.time, time.sleep) self.sessions_queue = {} # {ID: {delay: cmd, delay2: cmd2, ....}} self.setCollection(collection) self.reset_prof = kwargs.get('reset_profiling', False) self.profile_size = int(kwargs.get('profile_size', 1)) # 1 MB by default self.drop_coll = kwargs.get('drop_collection', False) self.creat_coll = kwargs.get('create_collection', True) self.bins = int(kwargs.get('bins', 20)) self.time_scale_factor = float(kwargs.get('time_scale_factor', 1.0)) self.histtype = kwargs.get('histtype', 'step') self.exec_time_cache = {} # for display execution result self.type_cache = { # caching for display 'find' : [], # [ID(str), ...] 'insert' : [], 'update' : [], 'delete' : [] }
def _send(self): """ Send a batch of randomly selected packets from the sending pool, then ensure the sending pool gets refilled if necessary. The packets are encapsulated in an Ethernet frame of type 0xcafe and removed from the sending pool, and finally broadcast in a batch. This function reschedules itself to occur every sending_freq seconds. """ self._scheduler.enter(self._sending_freq, 1, self._send) log_debug("Sending scheduler queue length: {}".format(len(self._scheduler.queue))) if self._sending: batch = [] s = sample(self._sending_pool, self._batch_size) for pkt in s: batch.append(Ether(dst="ff:ff:ff:ff:ff:ff", src=self._mac_address, type=ETHERTYPE) / pkt) self._sending_pool.remove(pkt) t_before = time() _gen_send_repeatable(self._sending_socket, batch, iface=self._wireless_interface, verbose=False) t_after = time() with open(self._stats_file_name, 'a') as stats_file: stats_file.write('{},{},{}\n'.format(t_before, t_after, len(batch))) self._sent_pkt_counter += len(batch) log_network("snt {} in {}s".format(len(batch), t_after - t_before)) self._prepare_sending_pool()
def start(self): """ Run the aDTN network functionality in two threads, one for sending and the other for receiving. Received Ethernet frames are filtered for ethertype and processed if they match the 0xcafe type. The sending thread runs a scheduler for periodic sending of aDTN packets. """ self._start_time = time() self._sent_pkt_counter = 0 self._received_pkt_counter = 0 self._decrypted_pkt_counter = 0 self._prepare_sending_pool() self._scheduler.enter(self._sending_freq, 1, self._send) self._sniffing = True self._thread_receive = Thread(target=self._sniff, name="ReceivingThread") self._sending = True self._thread_send = Thread(target=self._scheduler.run, name="SendingThread", kwargs={"blocking": True}) log_network("start-{}-{}".format(self._batch_size, self._sending_freq)) self._thread_receive.start() sleep(5) self._thread_send.start()
def stop(self): """ Stop aDTN. Make sure the two threads created at start are finished properly. """ self._sending = False try: while not self._scheduler.empty(): event = self._scheduler.queue.pop() self._scheduler.cancel(event) # By now the scheduler has run empty, so join the thread: self._thread_send.join() sleep(5) # Now we just have to join the receiving thread to stop aDTN completely: self._sniffing = False self._thread_receive.join() log_network("stop") except ValueError: # In case the popped event started running in the meantime... log_debug("Scheduler is not empty, retry stopping.") self.stop() # ...call the stop function once more.
def autoBlock(config): stdLog(u'???????', 'info') stdLog(u'???...', 'info') if adminLogin(config['user'], config['configFilename'][:-5] + '.co'): stdLog(u'????', 'success') while(True): s = sched.scheduler(time.time, time.sleep) tomorrow = datetime.datetime.replace(datetime.datetime.now() + datetime.timedelta(days = 1), hour = 0, minute = 0, second = 0, microsecond = 0) s.enter((tomorrow - datetime.datetime.now()).seconds, 1, _block, (config,)) s.run() else: stdLog(u'????', 'error') sys.exit(1)
def run(self, debug=False): try: self.conn = self.__lirc_conn(); while True: if self.active and self.__find_sync(): schedule = sched.scheduler(time.time, time.sleep) for _, p in self.players.items(): if p.moving(): schedule.enter(Race.DELAY * p.nth, 1, self.__send, (p,)) schedule.run() # Apply state changes as per requests from TCP server. while not self.q.empty(): self.__handle_message(self.q.get(False)) if debug: break except KeyboardInterrupt: logging.warn("Terminating Race") finally: if self.conn: self.conn.close()
def __init__(self, num, pool_size): self.pool_size = pool_size res = self.db().find() for one in res: self.db().remove(one) if num == 0: self.run_num = 100000 else: self.run_num = num * 2 self.passed = [] self.hot = [] self.cold = [] self.pending = [] self.count = 0 self.s2 = sched.scheduler(time.time, time.sleep) self.s2.enter(0, 1, self.get_freeproxy_in_xdaili) self.s2.enter(3, 1, self.grasp_proxy) self.s2.run() self.s1 = sched.scheduler(time.time, time.sleep) self.s1.enter(3600, 2, self.regular.clean) self.s1.run()
def notify_new_loans(sleep_time): global loans_provided try: new_provided = api.return_active_loans()['provided'] if loans_provided: get_id_set = lambda loans: set([x['id'] for x in loans]) # lambda to return a set of ids from the api result loans_amount = {} loans_info = {} for loan_id in get_id_set(new_provided) - get_id_set(loans_provided): loan = [x for x in new_provided if x['id'] == loan_id][0] # combine loans with the same rate k = 'c'+loan['currency']+'r'+loan['rate']+'d'+str(loan['duration']) loans_amount[k] = float(loan['amount']) + (loans_amount[k] if k in loans_amount else 0) loans_info[k] = loan # send notifications with the grouped info for k, amount in loans_amount.iteritems(): loan = loans_info[k] t = "{0} {1} loan filled for {2} days at a rate of {3:.4f}%" text = t.format(amount, loan['currency'], loan['duration'], float(loan['rate']) * 100) log.notify(text, notify_conf) loans_provided = new_provided except Exception as ex: ex.message = ex.message if ex.message else str(ex) print("Error during new loans notification: {0}".format(ex.message)) scheduler.enter(sleep_time, 1, notify_new_loans, (sleep_time, ))
def __init__(self): super().__init__() self.id_it = ID_NOT_SET self.scheduler = sched.scheduler(time.time, time.sleep) vbox = QtWidgets.QVBoxLayout() self.setLayout(vbox) vbox.setAlignment(QtCore.Qt.AlignTop) # ..for details ### self.details_ll = QtWidgets.QLabel("-----") ### self.details_ll.setWordWrap(True) self.question_ll = QtWidgets.QLabel("<h4>Question</h4>") vbox.addWidget(self.question_ll) self.question_le = QtWidgets.QLineEdit() self.question_le.textChanged.connect(self.on_question_text_changed) vbox.addWidget(self.question_le)
def handle_gathering_event(cls, gather_obj): ''' This is the method called by the scheduler when an event expires This basically takes the gatherer instance given as args to the scheduler And calls its measurement function. To avoid timing issues while measuring a new event for the next call is created before the measurement call. ''' delay_in_sec = ms_to_sec(gather_obj.delayms) new_gather_event = cls.scheduler.enter(delay_in_sec, GATHERING_EVENT_PRIORITY, cls.handle_gathering_event, kwargs={ "gather_obj": gather_obj }) gather_obj.set_event(new_gather_event) gather_obj.measure()
def _iterations_until(self, completed, update_state, argument): """ Poll for a state change to complete callable-s :param completed: A callable accepting argument, returning true if the state change has successfully completed. :param update_state: The action to execute in order to poll for a state change :param argument: The arguments on which to execute both the check and the action. Probably a tuple. :return: The number of iterations taken :rtype: int """ if completed(*argument): return 0 s = scheduler(time.time, time.sleep) i = 0 started_at = time.time() while not completed(*argument) and not self._has_timed_out(started_at): delta = max(0, min(self._poll, self._timeout - (time.time() - started_at))) s.enter(delta, 0, update_state, argument) s.run() i += 1 return i
def detect_inactive_hosts(scan_hosts): """ Scans the network to find scan_hosts are live or dead scan_hosts can be like 10.0.2.2-4 to cover range. See Scapy docs for spefifying targets. """ global scheduler scheduler.enter(RUN_FREQUENCY, 1, detect_inactive_hosts, (scan_hosts, )) inactive_hosts = [] try: ans, unans = sr(IP(dst = scan_hosts)/ICMP(), retry = 0, timeout = 1) ans.summary(lambda(s, r) : r.sprintf("%IP.src% is alive")) for inactive in unans: print "%s is inactive" %inactive.dst inactive_hosts.append(inactive.dst) print "Total %d hosts are inactive" %(len(inactive_hosts)) except KeyboardInterrupt: exit(0)
def run(self): s = sched.scheduler(time.time, time.sleep) logger.info('Hard cron daemon started') while not _cron_stopping: now = time.time() s.enter(60 - now % 60, 1, self.launch, ()) s.run()
def broadlink_rm_temperature_timer(scheduler, delay, device): scheduler.enter(delay, 1, broadlink_rm_temperature_timer, [scheduler, delay, device]) try: temperature = str(device.check_temperature()) topic = topic_prefix + "temperature" logging.debug("Sending RM temperature " + temperature + " to topic " + topic) mqttc.publish(topic, temperature, qos=qos, retain=retain) except: logging.exception("Error")
def broadlink_sp_energy_timer(scheduler, delay, device): scheduler.enter(delay, 1, broadlink_sp_energy_timer, [scheduler, delay, device]) try: energy = str(device.get_energy()) topic = topic_prefix + "energy" logging.debug("Sending SP energy " + energy + " to topic " + topic) mqttc.publish(topic, energy, qos=qos, retain=retain) except: logging.exception("Error")
def __init__(self, scheduler): Thread.__init__(self) self.scheduler = scheduler
def run(self): try: self.scheduler.run() except: logging.exception("Error")
def schedule(timer, priority, command, conn): s = sched.scheduler(time.time, time.sleep) s.enter(timer, priority, execute, (command, conn)) s.run() #from datetime import datetime, timedelta # def db_init(db): # db.execute("create table if not exists scheduler(id primary key, time, action)") # db.commit() #split = inp.split(' ') #timer = int(inp[0]) #action = " ".join(inp[1:]) #command = 'MODE {} -b {}'.format('#uguubot',action) #run_at = now + timedelta(hours=3) #delay = (run_at - now).total_seconds() # now = datetime.now() # print now # change = timedelta(weeks=0, days=0, hours=0, minutes=1, seconds=0) # print change # future = now + change # print future # now = datetime.now() # run_at = now + timedelta(minutes=1) # delay = (run_at - now).total_seconds() # threading.Timer(delay, action('test')).start() #command = 'PRIVMSG {} :{}'.format('#uguubot',inp)
def _refresh_flows_thread_inner(self): def refresh_flows(scheduler): self._register_flows(self.flow_definitions, True) scheduler.enter(60, 1, refresh_flows, (scheduler,)) scheduler = sched.scheduler(time.time, time.sleep) scheduler.enter(60, 1, refresh_flows, (scheduler,)) scheduler.run()
def addSession(self, ID, time_table, priority=1): """Add a session into executor. Args: ID (str): session ID time_table:: { time(float): cmd(SON), time(float): cmd(SON), time(float): cmd(SON), ..... } priority (int): the priority of execution of this session Note: "time" in time_table represent the delay of execution after executor begin. When duration of certain operation is too long, whole execution will delay. Read more about scheduler in: https://docs.python.org/2/library/sched.html """ if ID in self.sessions_queue: # raise KeyError('ID [%s] already exist!' % ID) logger.warning('ID [%s] already exist in executor\'s session queue!' % ID) logger.warning('New operation will overwrite old one') time_table = {t*self.time_scale_factor: time_table[t] for t in time_table} self.exec_time_cache[ID] = [] self.sessions_queue[ID] = time_table cmd_type = time_table.values()[0].keys()[0] if cmd_type in self.type_cache: self.type_cache[cmd_type].append(ID) for t in time_table: self.sche.enter(t+3, priority, self.runCommand, [ID, time_table[t]])
def test_enter(self): l = [] fun = lambda x: l.append(x) scheduler = sched.scheduler(time.time, time.sleep) for x in [0.5, 0.4, 0.3, 0.2, 0.1]: z = scheduler.enter(x, 1, fun, (x,)) scheduler.run() self.assertEqual(l, [0.1, 0.2, 0.3, 0.4, 0.5])
def test_enterabs(self): l = [] fun = lambda x: l.append(x) scheduler = sched.scheduler(time.time, time.sleep) for x in [0.05, 0.04, 0.03, 0.02, 0.01]: z = scheduler.enterabs(x, 1, fun, (x,)) scheduler.run() self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
def test_priority(self): l = [] fun = lambda x: l.append(x) scheduler = sched.scheduler(time.time, time.sleep) for priority in [1, 2, 3, 4, 5]: z = scheduler.enterabs(0.01, priority, fun, (priority,)) scheduler.run() self.assertEqual(l, [1, 2, 3, 4, 5])
def test_cancel(self): l = [] fun = lambda x: l.append(x) scheduler = sched.scheduler(time.time, time.sleep) now = time.time() event1 = scheduler.enterabs(now + 0.01, 1, fun, (0.01,)) event2 = scheduler.enterabs(now + 0.02, 1, fun, (0.02,)) event3 = scheduler.enterabs(now + 0.03, 1, fun, (0.03,)) event4 = scheduler.enterabs(now + 0.04, 1, fun, (0.04,)) event5 = scheduler.enterabs(now + 0.05, 1, fun, (0.05,)) scheduler.cancel(event1) scheduler.cancel(event5) scheduler.run() self.assertEqual(l, [0.02, 0.03, 0.04])
def test_empty(self): l = [] fun = lambda x: l.append(x) scheduler = sched.scheduler(time.time, time.sleep) self.assertTrue(scheduler.empty()) for x in [0.05, 0.04, 0.03, 0.02, 0.01]: z = scheduler.enterabs(x, 1, fun, (x,)) self.assertFalse(scheduler.empty()) scheduler.run() self.assertTrue(scheduler.empty())
def __init__(self, timef=time.time, delayf=time.sleep): # Declaration self.__sched_obj = None # Initialization self.__sched_obj = sched.scheduler(timef, delayf)
def main(): try: client = CloudFeedClient(CONF.rackspace.feed_url) sc = sched.scheduler(time.time, time.sleep) # Method enter(delay, priority, action, argument) sc.enter(0, 1, start, (sc, client)) sc.run() except RuntimeError as e: raise SystemExit(e.message)
def __init__(self, batch_size, sending_interval, wireless_interface, data_store): """ Initialize an aDTN instance and its respective key manager and message store, as well as a sending message pool from which the next sending batch gets generated. Define aDTNInnerPacket to be the payload of aDTNPacket. Define aDTNPacket to be the payload of Ethernet frames of type 0xcafe. Set up a scheduler to handle message sending. Define a thread to handle received messages. The wireless interface should be previously set to ad-hoc mode and its ESSID should be the same in other devices running aDTN. :param batch_size: number of packets to transmit at each sending operation :param sending_interval: number of seconds between two sending operations :param wireless_interface: wireless interface to send and receive packets """ self._batch_size = batch_size self._sending_freq = sending_interval self._wireless_interface = wireless_interface self._km = KeyManager() self.data_store = DataStore(data_store) self._sending_pool = [] self._scheduler = sched.scheduler(time, sleep) self._sending = None self._sniffing = None self._thread_send = None self._thread_receive = None self._sent_pkt_counter = None self._received_pkt_counter = None self._decrypted_pkt_counter = None self._start_time = None self._mac_address = macget(getcard(wireless_interface)) self._sending_socket = L2Socket(iface=self._wireless_interface) bind_layers(aDTNPacket, aDTNInnerPacket) bind_layers(Ether, aDTNPacket, type=ETHERTYPE) log_debug("MAC address in use: {}".format(self._mac_address)) self._stats_file_name = '{}_{}.stats'.format(batch_size, sending_interval)
def __init__(self, config, onUpdateConfig = None): #disable debug after testing is finished #setDebug() Thread.__init__(self, name='updater') self.setDaemon(True) self.appSettings = config self.onUpdateConfig = onUpdateConfig self.env = self.appSettings.get('Agent','Environment', fallback='live') global SETUP_URL global UPDATE_URL global TIME_TO_CHECK if self.env == "live": SETUP_URL = SETUP_URL + SETUP_NAME else: SETUP_URL = SETUP_URL + self.env + "_" + SETUP_NAME UPDATE_URL = UPDATE_URL + self.env if 'UpdateUrl' in config.cloudConfig: UPDATE_URL = config.cloudConfig.UpdateUrl if 'UpdateCheckRate' in config.cloudConfig: interval = int(config.cloudConfig.UpdateCheckRate) TIME_TO_CHECK = interval + random.randint(0, interval*10) if 'SetupUrl' in config.cloudConfig: SETUP_URL = config.cloudConfig.SetupUrl self.scheduler = scheduler(time, sleep) self.Continue = True self.currentVersion = '' self.newVersion = '' self.downloadUrl = '' self.UpdateCleanup() self.startTime = datetime.now() - timedelta(days=1)
def run(self): debug('UpdaterThread started') while self.Continue: sleep(TIME_TO_SLEEP) self.SetupUpdater() self.scheduler.run() debug('UpdaterThread finished')
def SetupUpdater(self): self.scheduler.enter(TIME_TO_CHECK, 1, self.CheckUpdate, ())
def recognize_speech(self): """ send recognize speech event and process the response :param speech: file-like containing speech for request :param mic_stop_event: threading.Event when speech is an infinite stream, to monitor for signal from downchannel stream to end the recognize request. """ if self.speech_profile not in SPEECH_CLOUD_ENDPOINTING_PROFILES: if self.expect_speech_timeout_event: self.scheduler.cancel(self.expect_speech_timeout_event) self._audio_input_device.start_recording() self.handle_parts(self.send_event_parse_response(self._generate_recognize_payload(self._audio_input_device))) logger.debug("Recognize dialog ID: {}".format(self._current_dialog_request_id))
def send_ping(self): """ self-scheduling task to send http2 PING every _PING_RATE seconds """ logger.debug("PINGING AVS") self._connection.ping(b'\x00' * 8) logger.info("PINGED AVS") self.scheduler.enter(_PING_RATE, 1, self.send_ping)
def run(self): """ main loop for AVS client 1. checks for any expired scheduled tasks that need to run 2. handles outstanding directives 3. runs one iteration of audio player state-machine loop :return: """ self.scheduler.run(blocking=False) self._handle_directives() self.player.run()
def test_enter_concurrent(self): q = queue.Queue() fun = q.put timer = Timer() scheduler = sched.scheduler(timer.time, timer.sleep) scheduler.enter(1, 1, fun, (1,)) scheduler.enter(3, 1, fun, (3,)) t = threading.Thread(target=scheduler.run) t.start() timer.advance(1) self.assertEqual(q.get(timeout=TIMEOUT), 1) self.assertTrue(q.empty()) for x in [4, 5, 2]: z = scheduler.enter(x - 1, 1, fun, (x,)) timer.advance(2) self.assertEqual(q.get(timeout=TIMEOUT), 2) self.assertEqual(q.get(timeout=TIMEOUT), 3) self.assertTrue(q.empty()) timer.advance(1) self.assertEqual(q.get(timeout=TIMEOUT), 4) self.assertTrue(q.empty()) timer.advance(1) self.assertEqual(q.get(timeout=TIMEOUT), 5) self.assertTrue(q.empty()) timer.advance(1000) t.join(timeout=TIMEOUT) self.assertFalse(t.is_alive()) self.assertTrue(q.empty()) self.assertEqual(timer.time(), 5)
def test_queue(self): l = [] fun = lambda x: l.append(x) scheduler = sched.scheduler(time.time, time.sleep) now = time.time() e5 = scheduler.enterabs(now + 0.05, 1, fun) e1 = scheduler.enterabs(now + 0.01, 1, fun) e2 = scheduler.enterabs(now + 0.02, 1, fun) e4 = scheduler.enterabs(now + 0.04, 1, fun) e3 = scheduler.enterabs(now + 0.03, 1, fun) # queue property is supposed to return an order list of # upcoming events self.assertEqual(scheduler.queue, [e1, e2, e3, e4, e5])
def test_args_kwargs(self): flag = [] def fun(*a, **b): flag.append(None) self.assertEqual(a, (1,2,3)) self.assertEqual(b, {"foo":1}) scheduler = sched.scheduler(time.time, time.sleep) z = scheduler.enterabs(0.01, 1, fun, argument=(1,2,3), kwargs={"foo":1}) scheduler.run() self.assertEqual(flag, [None])
def test_run_non_blocking(self): l = [] fun = lambda x: l.append(x) scheduler = sched.scheduler(time.time, time.sleep) for x in [10, 9, 8, 7, 6]: scheduler.enter(x, 1, fun, (x,)) scheduler.run(blocking=False) self.assertEqual(l, [])
def fun(): scheduler.enter(10, 2, print_event, ('10', start)) scheduler.enter(20, 1, print_event, ('20', start))