我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用schedule.run_pending()。
def handle(self, *args, **options): logger.info("Getting ready to trawl Poloniex...") schedule.every(1).minutes.do(pull_poloniex_data) # @Alex # run resampling for all periods and calculate indicator values for hor_period in PERIODS_LIST: schedule.every(hor_period / time_speed).minutes.do(_resample_then_metrics, {'period': hor_period}) keep_going = True while keep_going: try: schedule.run_pending() time.sleep(1) except Exception as e: logger.debug(str(e)) logger.info("Poloniex Trawl shut down.") keep_going = False
def mine(self, query, minePeriod, requestFrequency, analyzeFrequency, requestAmount = 50, similarityCutoff = 90): try: self.query = query self.cutoff = similarityCutoff self.amount = requestAmount startStr = strftime("[%Y-%m-%d %H:%M:%S]", localtime()) schedule.every(requestFrequency).seconds.do(self.requestTweets) schedule.every(analyzeFrequency).seconds.do(self.analyzeGroup) end = time()+minePeriod while time() <= end: schedule.run_pending() endStr = strftime("[%Y-%m-%d %H:%M:%S]", localtime()) print("Mine complete from\n" + startStr +" - " + endStr +"\n") except Exception as e: print(e)
def handle(self, *args, **kwargs): logger.info("%s - starting jobs schedule" % (__name__)) try: ''' schedule.every().hour.do(job_update_entities) schedule.every().hour.do(job_update_clients) schedule.every().hour.do(job_update_checks) schedule.every().hour.do(job_update_trends) #schedule.every(10).minutes.do(job_update_events) ''' schedule.every(settings.CACHE_ENTITY_TTL).seconds.do(job_update_entities) schedule.every(settings.CACHE_CLIENT_TTL).seconds.do(job_update_clients) schedule.every(settings.CACHE_TRENDS_TTL).seconds.do(job_update_trends) while True: schedule.run_pending() sleep(1) except KeyboardInterrupt: logger.info("%s - user signal exit!" % (__name__)) exit(0)
def schedule_thread_worker(schedule, logger): ''' schedule thread, takes care of running processes in the future ''' global CTRL_C logLine = 'starting thread_worker' logger.debug(logLine) while not CTRL_C['STOP']: #print('looping', CTRL_C) sys.stdout.flush() schedule.run_pending() logLine = 'scheduler woke {0}'.format( threading.current_thread().getName()) time.sleep(1) logger.debug(logLine) logger.debug('Threading stop:{0}'.format( threading.current_thread().getName())) sys.exit()
def madness(self): def fast(): self.pyEfi.redisDb.incr('pyefi:eye:fast') def pollRateTest(): self.pollRateRedis('pyefi:eye:fast') schedule.every(1).seconds.do(pollRateTest) schedule.every(0.2).seconds.do(fast) while 1: schedule.run_pending() # This is the scheduler tick rate, # nothing will poll faster than this. sleep(0.005)
def main(greeting=True): bot = Fanbot.create_from_modules(secrets, compliments) if greeting: bot.hello_world() try: # Initially hardcoding a schedule. Could be swapped out later # PLACE YOUR CUSTOM SCHEDULES HERE. SEE SCHEDULE MODULE DOCUMENTATION. schedule.every(30).minutes.do(bot.respond_to_tweets) schedule.every().day.at("10:30").do(bot.post_compliment) while True: schedule.run_pending() # You can tune the sleep length too. Should be roughly the same # as the most frequently scheduled job above time.sleep(30*60) # seconds except KeyboardInterrupt: if greeting: bot.goodbye()
def polling(): for m in sys.modules['thunderbolt100k.widgets'].modules: if m.endswith('__init__.py'): continue widget_name = os.path.basename(m).replace('.py', '') widget = getattr(sys.modules['thunderbolt100k.widgets'], widget_name) if constants.CONFIG.get('{0}_INTERVAL'.format(widget.__name__.upper())): interval = int(constants.CONFIG.get('{0}_INTERVAL'.format(widget_name.upper()))) else: interval = 5 # Default is 5 min if not specified schedule.every(interval).minutes.do(widget_main, widget) widget_main(widget) # Run the function instantly and then schedule while True: schedule.run_pending() time.sleep(60)
def run_schedule(interval=1): """ Continuously run scheduled jobs. Taken from https://github.com/mrhwick/schedule/blob/8e1d5f806d34d9ecde3c068490c8d1513ed774c3/schedule/__init__.py#L63 """ cease_continuous_run = threading.Event() class ScheduleThread(threading.Thread): def __init__(self, app): super().__init__() self.app = app def run(self): with self.app.app_context(): while not cease_continuous_run.is_set(): schedule.run_pending() time.sleep(interval) continuous_thread = ScheduleThread(app) continuous_thread.start() return cease_continuous_run
def check_forever(checkers): global reload_conf_pending, interrupted, open_backdoor schedule_checks(checkers) logger.info("Starting infinite loop") while not reload_conf_pending: if interrupted: break if open_backdoor: open_backdoor = False code.interact( banner="Kibitzr debug shell", local=locals(), ) schedule.run_pending() if interrupted: break time.sleep(1)
def dynamically_scrape_data(filename, link_list, interval, num_retries = 10): """ Repeatedly runs the scraper every time the specified interval has passed and continuously appends the data to a file. """ def job(): scrape_all_data_from_all_featured_products(filename, link_list, num_retries) job() schedule.every(interval).minutes.do(job) while True: schedule.run_pending() time.sleep(30) print "Dynamic scraping finished"
def clean_links_and_dynamically_scrape(filename, interval, num_retries = 10): """ Repeatedly updates the link list and runs the scraper every time the specified interval has passed and continuously appends the data to a file. """ def job(): clean_links_and_scrape(filename, num_retries) job() schedule.every(interval).minutes.do(job) while True: schedule.run_pending() time.sleep(30) print "Dynamic scraping finished"
def main(): try: smart_module = SmartModule() smart_module.asset.load_asset_info() smart_module.load_site_data() smart_module.discover() smart_module.load_influx_settings() except Exception as excpt: Log.exception("Error initializing Smart Module. %s.", excpt) while 1: try: time.sleep(0.5) schedule.run_pending() except Exception as excpt: Log.exception("Error in Smart Module main loop. %s.", excpt) break
def run(slack_client): ''' Main event loop. ''' if slack_client.rtm_connect(): while True: events = slack_client.rtm_read() for event in events: try: process_event(slack_client, event) except ApiCallException as api_call_exception: logging.warning(api_call_exception) schedule.run_pending() time.sleep(SLEEP_TIME) else: raise ConnectionError()
def start(self): try: self.modem.check() self.set_os_time() Thread(target=self.track_detectors).start() Thread(target=self.arming_alarm).start() Thread(target=self.start_photos_scheduler).start() while True: schedule.run_pending() time.sleep(1) # Tests # self.sms.send('519585106', 'test') except Exception as ex: self.logger.error('Raptor: {0}'.format(ex)) pass
def main(): config.process_args("HEALTH", default_config_path=cfg.DEFAULT_CONF_PATH, defaults=cfg.DEFAULT, validation_schema=cfg.SCHEMA) # Init Elastic index in backend for src in CONF["sources"]: es.ensure_index_exists(CONF["backend"]["elastic"], src["region"]) # Setup periodic job that does aggregation magic run_every_min = CONF["config"]["run_every_minutes"] schedule.every(run_every_min).minutes.do(job) job() while True: schedule.run_pending() time.sleep(1)
def enable_housekeeping(run_interval=3600): cease_continuous_run = threading.Event() class ScheduleThread(threading.Thread): @staticmethod def run(): while not cease_continuous_run.is_set(): schedule.run_pending() time.sleep(run_interval) continuous_thread = ScheduleThread() continuous_thread.start() schedule.every(6).hours.do(LoggingNight.garbage_collect_cache)
def worker(): while True: schedule.run_pending() time.sleep(schedule.idle_seconds())
def run_schedule(): while True: sleep_time = schedule.next_run() - datetime.now() print('Next job to run at {}, which is {} from now' .format(str(schedule.next_run()), str(sleep_time))) # Sleep an extra second to make up for microseconds time.sleep(max(1, sleep_time.seconds + 1)) schedule.run_pending()
def validate_loop(): """ Schedule loop, need for schedule module """ while True: schedule.run_pending() time.sleep(1)
def __start_schedule_deamon(): def schedule_run(): while True: schedule.run_pending() time.sleep(1) t = threading.Thread(target=schedule_run) t.setDaemon(True) t.start()
def run_scheduler(): global scheduler_running global scheduler_paused global scheduler_done while scheduler_running: if not scheduler_paused: schedule.run_pending() sleep(1)
def deferred(self): """Called from the thread, schedules pending tasks.""" schedule.every(10).minutes.do(self._add_rssfeedparsertask) schedule.every().minute.do(self._add_episodedownloadtask) # Add the RSSFeedParserTask immediately so we don't waste 10 minutes self._add_rssfeedparsertask() while not self._stop_event.is_set(): schedule.run_pending() sleep(1)
def main(period=None): """Constantly check services availability. This runs infinite availability check with given period. :param period: period in seconds """ if not config.get_config().get("regions"): LOG.error("No regions configured. Quitting.") return 1 period = period or config.get_config().get("period", 60) if SERVICE_CONN_TIMEOUT + SERVICE_READ_TIMEOUT > period: LOG.error("Period can not be lesser than timeout, " "otherwise threads could crowd round.") return 1 backend = config.get_config().get("backend") if backend["type"] != "elastic": LOG.error("Unexpected backend: %(type)s" % backend) return 1 if not storage.get_elasticsearch(check_availability=True): LOG.error("Failed to set up Elasticsearch") return 1 LOG.info("Start watching with period %s seconds" % period) schedule.every(period).seconds.do(watch_services) while True: time.sleep(1) schedule.run_pending()
def _run_motd(self): now = datetime.datetime.now() await asyncio.sleep(60 - now.second) while self.run_timer: schedule.run_pending() await asyncio.sleep(60)
def main(): '''with open('log.csv', 'r+') as logfile: get_filing_list(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6].lower() == 'true', sys.argv[7].lower() == 'true', None)''' # run the scraping job every 6 hours # TODO: move scraping interval to environment var schedule.every(6).hours.do(get_filing_list, sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[7].lower() == 'true', sys.argv[7].lower() == 'true', None) while True: schedule.run_pending() time.sleep(1)
def refresher_run(url=None, fetcher=None, database=None, checker=None): refresher = Refresher(url_prefix=url, fetcher=fetcher, database=database, checker=checker) schedule.every(5).minutes.do(refresher.main) while True: schedule.run_pending() time.sleep(5)
def start(self): self.agent.start() self.logger.info('Starting monitor agent...') while not self.stop: schedule.run_pending() time.sleep(self.configuration.data['frequency'])
def backgroundThread(): schedule.every().day.at("12:00").do(update) #schedule.every(5).minutes.do(update) kill_update = threading.Event() class SearchUpdateThread(threading.Thread): def run(self): while not kill_update.is_set(): schedule.run_pending() time.sleep(1*60*60) #Every hour. searchThread = SearchUpdateThread() searchThread.setDaemon(True) searchThread.start()
def send_home_forecast(self) -> None: def __periodically_send_morning_forecast__() -> None: __send_home_forecast__("sending periodic morning weather forecast...", lambda weathers: list(filter(lambda w: w.date == datetime.date.today(), weathers))) def __periodically_send_evening_forecast__() -> None: __send_home_forecast__("sending periodic evening weather forecast...", lambda weathers: list(filter(lambda w: w.date != datetime.date.today(), weathers))) def __send_home_forecast__(logging_header: str, weathers_predicate) -> None: logging.debug(logging_header) forecast = self._weather_service.get_forecast(self._home_settings.full_name, self._measurement_system) forecast.weathers = weathers_predicate(forecast.weathers) for channel in self._discord_client.get_all_channels(): if self.__should_send_forecast__(channel): try: asyncio.get_event_loop().run_until_complete( SendForecastDiscordCommand(self._discord_client, channel, forecast).execute()) except discord.HTTPException: msg_template = "failed to send home weather forecast (@{}) to channel {}.{}" msg = msg_template.format(self._home_settings.full_name, channel.server.name, channel.name) logging.exception(msg) if self._home_settings.morning_forecast_time is not None: schedule.every().day.at(self._home_settings.morning_forecast_time)\ .do(__periodically_send_morning_forecast__) if self._home_settings.evening_forecast_time is not None: schedule.every().day.at(self._home_settings.evening_forecast_time)\ .do(__periodically_send_evening_forecast__) while True: schedule.run_pending() await asyncio.sleep(1)
def job_monitor(): while 1: #print('checking jobs') schedule.run_pending() time.sleep(5)
def schedule_match(): try: schedule.every(30).seconds.do(refresh_Scorecard).tag('score_updates', 'task') while 1: schedule.run_pending() time.sleep(1) except KeyboardInterrupt: quit() # method used to cancel the schedule match
def startScheduler(): log( LOG_LEVEL_INFO, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_TEXT, "Started" ) while True: if holdControl.state == "normal": with scheduleLock: log( LOG_LEVEL_DEBUG, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_TEXT, "Running pending" ) schedule.run_pending() time.sleep( 10 )
def with_heartbeat(fn): # timedtask wrapper def call_func(*args): loop = 0 fn(*args) schedule.every(HEARTBEAT).seconds.do(fn, *args) while 1: print("#" * 15, "loop:%s" % loop, "#" * 15) schedule.run_pending() sleep(HEARTBEAT) loop += 1 return call_func
def with_heartbeat_30s(fn): # timedtask wrapper def call_func(*args): HEARTBEAT = 30 loop = 0 fn(*args) schedule.every(HEARTBEAT).seconds.do(fn, *args) while 1: print("#" * 15, "loop:%s" % loop, "#" * 15) schedule.run_pending() sleep(HEARTBEAT) loop += 1 return call_func
def with_heartbeat_1d(fn): # timedtask wrapper def call_func(*args): HEARTBEAT = 30 loop = 0 fn(*args) schedule.every().day.at("10:30").do(fn, *args) while 1: print("#" * 15, "loop:%s" % loop, "#" * 15) schedule.run_pending() sleep(HEARTBEAT) loop += 1 return call_func
def handle(self, *args, **options): schedule.every(1).hours.do(tasks.kill_obsolete_timers) schedule.every(5).seconds.do(tasks.send_reminders) self.stdout.write(self.style.SUCCESS('Starting job runner...')) while True: time.sleep(1) try: schedule.run_pending() except: traceback.print_exc()
def spawn(): while True: schedule.run_pending() time.sleep(1)
def schedule_actions(): # Example: nohup python MyScheduledProgram.py &> ~/Desktop/output.log # ps auxw to see running ones print(datetime.datetime.now()) print("Starting to run") times = ['6:07', '06:24'] # Buy today's positions for set_time in times: schedule.every().monday.at(set_time).do(action) schedule.every().tuesday.at(set_time).do(action) schedule.every().wednesday.at(set_time).do(action) schedule.every().thursday.at(set_time).do(action) schedule.every().friday.at(set_time).do(action) # Sell yesterday's positions set_time = '06:01' schedule.every().monday.at(set_time).do(sell_scheduled) schedule.every().tuesday.at(set_time).do(sell_scheduled) schedule.every().wednesday.at(set_time).do(sell_scheduled) schedule.every().thursday.at(set_time).do(sell_scheduled) schedule.every().friday.at(set_time).do(sell_scheduled) while True: schedule.run_pending() sys.stdout.flush() time.sleep(1) # Check every 1 second
def run(): while True: schedule.run_pending() time.sleep(1)
def run_schedule(): while 1: schedule.run_pending() time.sleep(1)
def run(self): schedule.every(1).seconds.do(self.run_threaded, self.run_generator_dispatch) schedule.every(1).seconds.do(self.run_threaded, self.run_processor_dispatch) schedule.every(1).seconds.do(self.run_threaded, self.run_query_project_status) while True: schedule.run_pending() time.sleep(1)
def run(self): def delete_job(): try: Mail.delete_one_day_ago() except: logger.exception("Exception raised in MailDeleteBatch#run()#delete_job()") raise schedule.every().hour.do(delete_job) while True: schedule.run_pending() time.sleep(1)
def dynamically_scrape_and_append_sales_data(filename, interval, num_retries = 10): """ Dynamically scrapes sales data and appends the data to a file by generating a list of links, checking it against an old list and only keeping new links, and scraping those links for sales data. """ old_list = [] def job(old_list): new_list = collect_all_featured_links() new_links = remove_old_links(old_list, new_list) bad_links = collect_bad_links(new_links) clean_links = remove_bad_links_from_link_list(bad_links, new_links) scrape_and_append_sales_data_from_featured_links(filename, clean_links, num_retries) old_list = new_list job(old_list) schedule.every(interval).hours.do(job) while True: schedule.run_pending() time.sleep(30) print "Dynamic scraping finished"
def dynamically_scrape_combined_data(data_filename, sales_filename, interval, num_retries = 10): """ Dynamically scrapes a continuously updated list of unique clean links and appends the data to their respective files. """ old_list = [] def job(old_list): new_list = collect_all_featured_links() new_links = remove_old_links(old_list, new_list) bad_links = collect_bad_links(new_links) clean_links = remove_bad_links_from_link_list(bad_links, new_links) scrape_combined_data_from_all_featured_products(data_filename, sales_filename, clean_links, num_retries) old_list = new_list job(old_list) schedule.every(interval).hours.do(job) while True: schedule.run_pending() time.sleep(30) print "Dynamic scraping finished"
def main(): if not _config.is_registered(): register() refreshinstalled() system_notify() schedule.every(2).hours.do(refreshinstalled) schedule.every(10).minutes.do(system_notify) schedule.every(30).seconds.do(do_update) while True: schedule.run_pending() sleep(5)
def run_sched(): """Loop through the schedule to check if new task""" global running # schedule.every().hour.do(job) # schedule.every().day.at("10:30").do(job) while running: schedule.run_pending() sleep(1)
def run_sched(self): """Loop through the schedule to check if new task""" cg.send('> Started Thread w/ self._c = {}'.format(self._checkSchedule)) while self._checkSchedule: schedule.run_pending() sleep(1) cg.send('> Ended thread w/ self._c = {}'.format(self._checkSchedule))
def main(): schedule.every(10).minutes.do(fck_afd) schedule.every().day.at("12:00").do(tweet_time_of_day) while True: schedule.run_pending() time.sleep(60)
def delete_cache(first_time='0:05', second_time='1:05'): try: # Deleting folders from 1am to 23pm at 24.05 o'clock schedule.every().day.at(first_time).do(delete_unnecessary_folders23) # Deleting folders from which we created during 24th hour at 1.05 o'clock schedule.every().day.at(second_time).do(delete_unnecessary_folders24) while 1: schedule.run_pending() sleep(50) except Exception as e: print("{0} Error: {1}".format(strftime("%Y-%m-%d %H:%M:%S", gmtime()), e)) schedule.clear() print('Notification: Timer is reset') delete_cache()