我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用schedule.every()。
def handle(self, *args, **options): logger.info("Getting ready to trawl Poloniex...") schedule.every(1).minutes.do(pull_poloniex_data) # @Alex # run resampling for all periods and calculate indicator values for hor_period in PERIODS_LIST: schedule.every(hor_period / time_speed).minutes.do(_resample_then_metrics, {'period': hor_period}) keep_going = True while keep_going: try: schedule.run_pending() time.sleep(1) except Exception as e: logger.debug(str(e)) logger.info("Poloniex Trawl shut down.") keep_going = False
def mine(self, query, minePeriod, requestFrequency, analyzeFrequency, requestAmount = 50, similarityCutoff = 90): try: self.query = query self.cutoff = similarityCutoff self.amount = requestAmount startStr = strftime("[%Y-%m-%d %H:%M:%S]", localtime()) schedule.every(requestFrequency).seconds.do(self.requestTweets) schedule.every(analyzeFrequency).seconds.do(self.analyzeGroup) end = time()+minePeriod while time() <= end: schedule.run_pending() endStr = strftime("[%Y-%m-%d %H:%M:%S]", localtime()) print("Mine complete from\n" + startStr +" - " + endStr +"\n") except Exception as e: print(e)
def activate(self): self.run_timer = True try: with open(self.plugin_config.motd_file, "r", encoding="utf8") as f: self.motds = json.load(f) schedule.every().day.at("00:00").do(self._display_motd) asyncio.ensure_future(self._run_motd()) except FileNotFoundError: with open(self.plugin_config.motd_file, "w", encoding="utf8") as f: self.motds = {} f.write("{}") except json.decoder.JSONDecodeError: self.logger.exception(f"Could not decode {self.plugin_config.motd_file}! ", exc_info=True) # This is stupid self.valid_months = { "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", "Any" } self.valid_days = { "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday", "Any", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31" }
def handle(self, *args, **kwargs): logger.info("%s - starting jobs schedule" % (__name__)) try: ''' schedule.every().hour.do(job_update_entities) schedule.every().hour.do(job_update_clients) schedule.every().hour.do(job_update_checks) schedule.every().hour.do(job_update_trends) #schedule.every(10).minutes.do(job_update_events) ''' schedule.every(settings.CACHE_ENTITY_TTL).seconds.do(job_update_entities) schedule.every(settings.CACHE_CLIENT_TTL).seconds.do(job_update_clients) schedule.every(settings.CACHE_TRENDS_TTL).seconds.do(job_update_trends) while True: schedule.run_pending() sleep(1) except KeyboardInterrupt: logger.info("%s - user signal exit!" % (__name__)) exit(0)
def auto_delete_delivery(): def delete_job(): from xiaodi.api.mysql import session_scope, Task now = datetime.utcnow() for deadline, id_ in TaskDepot.scan_delete_task(): deadline, id_ = tranform_utctime(deadline), int(id_) id_, deadline = int(id_), tranform_utctime(deadline) if now < deadline: break with session_scope() as session: delivery = session.query(Task).filter(Task.id == id_).first() user = gen_temp_object(id=delivery.puser_id) DeliveryManager.delete(user, delivery, session=session) LOG.info('auto delete task finished') schedule.every(5).minutes.do(delete_job)
def auto_finish_delivery(): def finish_job(): from xiaodi.api.mysql import session_scope, Task now = datetime.utcnow() for id_, deadline in TaskDepot.scan_finish_task(): deadline, id_ = tranform_utctime(deadline), int(id_) if now < deadline: break with session_scope() as session: delivery = session.query(Task).filter(Task.id == id_).first() user = gen_temp_object(id=delivery.puser_id) DeliveryManager.finish(user, delivery, session=session) LOG.info('auto finish task finished') schedule.every(30).minutes.do(finish_job)
def madness(self): def fast(): self.pyEfi.redisDb.incr('pyefi:eye:fast') def pollRateTest(): self.pollRateRedis('pyefi:eye:fast') schedule.every(1).seconds.do(pollRateTest) schedule.every(0.2).seconds.do(fast) while 1: schedule.run_pending() # This is the scheduler tick rate, # nothing will poll faster than this. sleep(0.005)
def main(greeting=True): bot = Fanbot.create_from_modules(secrets, compliments) if greeting: bot.hello_world() try: # Initially hardcoding a schedule. Could be swapped out later # PLACE YOUR CUSTOM SCHEDULES HERE. SEE SCHEDULE MODULE DOCUMENTATION. schedule.every(30).minutes.do(bot.respond_to_tweets) schedule.every().day.at("10:30").do(bot.post_compliment) while True: schedule.run_pending() # You can tune the sleep length too. Should be roughly the same # as the most frequently scheduled job above time.sleep(30*60) # seconds except KeyboardInterrupt: if greeting: bot.goodbye()
def polling(): for m in sys.modules['thunderbolt100k.widgets'].modules: if m.endswith('__init__.py'): continue widget_name = os.path.basename(m).replace('.py', '') widget = getattr(sys.modules['thunderbolt100k.widgets'], widget_name) if constants.CONFIG.get('{0}_INTERVAL'.format(widget.__name__.upper())): interval = int(constants.CONFIG.get('{0}_INTERVAL'.format(widget_name.upper()))) else: interval = 5 # Default is 5 min if not specified schedule.every(interval).minutes.do(widget_main, widget) widget_main(widget) # Run the function instantly and then schedule while True: schedule.run_pending() time.sleep(60)
def dynamically_scrape_data(filename, link_list, interval, num_retries = 10): """ Repeatedly runs the scraper every time the specified interval has passed and continuously appends the data to a file. """ def job(): scrape_all_data_from_all_featured_products(filename, link_list, num_retries) job() schedule.every(interval).minutes.do(job) while True: schedule.run_pending() time.sleep(30) print "Dynamic scraping finished"
def clean_links_and_dynamically_scrape(filename, interval, num_retries = 10): """ Repeatedly updates the link list and runs the scraper every time the specified interval has passed and continuously appends the data to a file. """ def job(): clean_links_and_scrape(filename, num_retries) job() schedule.every(interval).minutes.do(job) while True: schedule.run_pending() time.sleep(30) print "Dynamic scraping finished"
def prepare_jobs(self, jobs): suffixed_names = { 'week': 'weekly', 'day': 'daily', 'hour': 'hourly', 'minute': 'minutes', 'second': 'seconds', } for job in jobs: if not job.enabled: continue interval_name = job.time_unit.lower() if job.interval > 0: # There can't be a job less than 0 (0 minutes? 0 seconds?) plural_interval_name = interval_name + 's' d = getattr(schedule.every(job.interval), plural_interval_name) d.do(self.run_job, job) Log.info(" Loading %s job: %s.", suffixed_names[interval_name], job.name) elif interval_name == 'day': schedule.every().day.at(job.at_time).do(self.run_job, job) Log.info(" Loading time-based job: " + job.name) else: d = getattr(schedule.every(), interval_name) d.do(self.run_job, job) Log.info(" Loading %s job: %s", interval_name, job.name)
def display_weather(self): if not self._scheduled: # Start a fresh thread for weather updates cg.send('Starting update_weather()') self.update_weather() self._checkSchedule = True self._scheduled = True schedule.every(5).minutes.do(self.update_weather) cg.thread(self.run_sched) # Start a separate thread elif self._checkSchedule: cg.send('Error: update_weather() is already running') elif not self._checkSchedule: # Allow the weather updates to continue cg.send('Toggling weather updates back on') self._checkSchedule = True cg.thread(self.run_sched) # Start a separate thread else: cg.send('No appropriate display_weather() action...')
def main(): config.process_args("HEALTH", default_config_path=cfg.DEFAULT_CONF_PATH, defaults=cfg.DEFAULT, validation_schema=cfg.SCHEMA) # Init Elastic index in backend for src in CONF["sources"]: es.ensure_index_exists(CONF["backend"]["elastic"], src["region"]) # Setup periodic job that does aggregation magic run_every_min = CONF["config"]["run_every_minutes"] schedule.every(run_every_min).minutes.do(job) job() while True: schedule.run_pending() time.sleep(1)
def enable_housekeeping(run_interval=3600): cease_continuous_run = threading.Event() class ScheduleThread(threading.Thread): @staticmethod def run(): while not cease_continuous_run.is_set(): schedule.run_pending() time.sleep(run_interval) continuous_thread = ScheduleThread() continuous_thread.start() schedule.every(6).hours.do(LoggingNight.garbage_collect_cache)
def start(job): schedule.every(30).minutes.do(job) threading.Thread(target=worker).start()
def build_schedule(): print('Starting sqlcron. Current time: {}' .format(str(datetime.now()))) interval = int(os.getenv('INTERVAL', '1')) unit = os.getenv('UNIT', 'day') time_of_day = os.getenv('TIME') evaluation_string = 'schedule.every(interval).' + unit if time_of_day: evaluation_string += '.at(time_of_day)' evaluation_string += '.do(run)' eval(evaluation_string)
def download_elections(): call_command('download_elections') ## check if there's an Election today; if so, start checking every minute whether to set live and start import
def start_update_engagement(): def update_job(): with DBSession() as session: for engagement in session.query(Engagement).all(): en = json.loads(engagement.engagement) en.append(0) engagement.engagement = json.dumps(en) session.add(engagement) session.commit() schedule.every().day.at('00:00').do(update_job)
def deferred(self): """Called from the thread, schedules pending tasks.""" schedule.every(10).minutes.do(self._add_rssfeedparsertask) schedule.every().minute.do(self._add_episodedownloadtask) # Add the RSSFeedParserTask immediately so we don't waste 10 minutes self._add_rssfeedparsertask() while not self._stop_event.is_set(): schedule.run_pending() sleep(1)
def main(period=None): """Constantly check services availability. This runs infinite availability check with given period. :param period: period in seconds """ if not config.get_config().get("regions"): LOG.error("No regions configured. Quitting.") return 1 period = period or config.get_config().get("period", 60) if SERVICE_CONN_TIMEOUT + SERVICE_READ_TIMEOUT > period: LOG.error("Period can not be lesser than timeout, " "otherwise threads could crowd round.") return 1 backend = config.get_config().get("backend") if backend["type"] != "elastic": LOG.error("Unexpected backend: %(type)s" % backend) return 1 if not storage.get_elasticsearch(check_availability=True): LOG.error("Failed to set up Elasticsearch") return 1 LOG.info("Start watching with period %s seconds" % period) schedule.every(period).seconds.do(watch_services) while True: time.sleep(1) schedule.run_pending()
def main(): '''with open('log.csv', 'r+') as logfile: get_filing_list(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6].lower() == 'true', sys.argv[7].lower() == 'true', None)''' # run the scraping job every 6 hours # TODO: move scraping interval to environment var schedule.every(6).hours.do(get_filing_list, sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[7].lower() == 'true', sys.argv[7].lower() == 'true', None) while True: schedule.run_pending() time.sleep(1)
def schedule_jobs(api, api_twitter): schedule.every(settings.PULL_REQUEST_POLLING_INTERVAL_SECONDS).seconds.do( lambda: poll_pull_requests(api, api_twitter)) schedule.every(settings.ISSUE_COMMENT_POLLING_INTERVAL_SECONDS).seconds.do( lambda: poll_read_issue_comments(api)) schedule.every(settings.ISSUE_CLOSE_STALE_INTERVAL_SECONDS).seconds.do( lambda: poll_issue_close_stale(api)) # Call manually the first time, so that we are guaranteed this will run # at least once in the interval... poll_issue_close_stale(api)
def init_schedule(self): # set the schedule schedule.every().day.at(self.start_time).do(self.on) schedule.every().day.at(self.stop_time).do(self.off)
def refresher_run(url=None, fetcher=None, database=None, checker=None): refresher = Refresher(url_prefix=url, fetcher=fetcher, database=database, checker=checker) schedule.every(5).minutes.do(refresher.main) while True: schedule.run_pending() time.sleep(5)
def job(): trello = TrelloApi('API-KEY-HERE') #get cards from trello cards = trello.lists.get_card('LIST-ID-HERE') # open premade card_names main file in append mode (a+) to keep track of card names card_names = open('./card_names.txt', 'a+') # open premade tasks file in append mode (a+) to use for printer tasks = open('./tasks.txt', 'a+') # iterate over each card check that card name is NOT in the card_names file for obj in cards: # if name is in the card_names file do nothing if obj['id'] in open('card_names.txt').read(): print 'already in the master list' pass else: # add name into task file and the card_names file card_names.write(obj['id']+'\n') tasks.write('\n\n\n\n\n'+ obj['name'] + '\n\n\n\n\n:)') print "yo i'm printing" # print task file if there's content if os.stat('tasks.txt').st_size > 0: call(['lpr','-o','fit-to-page', 'tasks.txt']) # clear task file f= open('tasks.txt', 'r+') f.truncate() # schedule job to run the code every 2 seconds
def start(self): """Sets up the schedule based on the configuration file.""" schedule.every(self.configuration.data['frequency']).seconds.do(self.execute)
def backgroundThread(): schedule.every().day.at("12:00").do(update) #schedule.every(5).minutes.do(update) kill_update = threading.Event() class SearchUpdateThread(threading.Thread): def run(self): while not kill_update.is_set(): schedule.run_pending() time.sleep(1*60*60) #Every hour. searchThread = SearchUpdateThread() searchThread.setDaemon(True) searchThread.start()
def send_home_forecast(self) -> None: def __periodically_send_morning_forecast__() -> None: __send_home_forecast__("sending periodic morning weather forecast...", lambda weathers: list(filter(lambda w: w.date == datetime.date.today(), weathers))) def __periodically_send_evening_forecast__() -> None: __send_home_forecast__("sending periodic evening weather forecast...", lambda weathers: list(filter(lambda w: w.date != datetime.date.today(), weathers))) def __send_home_forecast__(logging_header: str, weathers_predicate) -> None: logging.debug(logging_header) forecast = self._weather_service.get_forecast(self._home_settings.full_name, self._measurement_system) forecast.weathers = weathers_predicate(forecast.weathers) for channel in self._discord_client.get_all_channels(): if self.__should_send_forecast__(channel): try: asyncio.get_event_loop().run_until_complete( SendForecastDiscordCommand(self._discord_client, channel, forecast).execute()) except discord.HTTPException: msg_template = "failed to send home weather forecast (@{}) to channel {}.{}" msg = msg_template.format(self._home_settings.full_name, channel.server.name, channel.name) logging.exception(msg) if self._home_settings.morning_forecast_time is not None: schedule.every().day.at(self._home_settings.morning_forecast_time)\ .do(__periodically_send_morning_forecast__) if self._home_settings.evening_forecast_time is not None: schedule.every().day.at(self._home_settings.evening_forecast_time)\ .do(__periodically_send_evening_forecast__) while True: schedule.run_pending() await asyncio.sleep(1)
def update(): global bike_info try: bike_info = update_data(to_update) except Exception as e: print('something bad happened: ' + str(e)) # schedule execution of update every minute
def schedule_match(): try: schedule.every(30).seconds.do(refresh_Scorecard).tag('score_updates', 'task') while 1: schedule.run_pending() time.sleep(1) except KeyboardInterrupt: quit() # method used to cancel the schedule match
def reloadSchedule(): with scheduleLock: schedule.clear() activeSched = None with thermostatLock: thermoSched = JsonStore( "thermostat_schedule.json" ) if holdControl != "down" : if heatControl.state == "down": activeSched = thermoSched[ "heat" ] log( LOG_LEVEL_INFO, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_CUSTOM + "/load", "heat" ) if useTestSchedule: activeSched = getTestSchedule() log( LOG_LEVEL_INFO, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_CUSTOM + "/load", "test" ) print "Using Test Schedule!!!" if activeSched != None: for day, entries in activeSched.iteritems(): for i, entry in enumerate( entries ): getattr( schedule.every(), day ).at( entry[ 0 ] ).do( setScheduledTemp, entry[ 1 ] ) log( LOG_LEVEL_DEBUG, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_TEXT, "Set " + day + ", at: " + entry[ 0 ] + " = " + str( entry[ 1 ] ) + scaleUnits ) ############################################################################## # # # Web Server Interface # # # ############################################################################## ############################################################################## # encoding: UTF-8 # # Form based authentication for CherryPy. Requires the # # Session tool to be loaded. # ##############################################################################
def with_heartbeat(fn): # timedtask wrapper def call_func(*args): loop = 0 fn(*args) schedule.every(HEARTBEAT).seconds.do(fn, *args) while 1: print("#" * 15, "loop:%s" % loop, "#" * 15) schedule.run_pending() sleep(HEARTBEAT) loop += 1 return call_func
def with_heartbeat_30s(fn): # timedtask wrapper def call_func(*args): HEARTBEAT = 30 loop = 0 fn(*args) schedule.every(HEARTBEAT).seconds.do(fn, *args) while 1: print("#" * 15, "loop:%s" % loop, "#" * 15) schedule.run_pending() sleep(HEARTBEAT) loop += 1 return call_func
def with_heartbeat_1d(fn): # timedtask wrapper def call_func(*args): HEARTBEAT = 30 loop = 0 fn(*args) schedule.every().day.at("10:30").do(fn, *args) while 1: print("#" * 15, "loop:%s" % loop, "#" * 15) schedule.run_pending() sleep(HEARTBEAT) loop += 1 return call_func
def handle(self, *args, **options): schedule.every(1).hours.do(tasks.kill_obsolete_timers) schedule.every(5).seconds.do(tasks.send_reminders) self.stdout.write(self.style.SUCCESS('Starting job runner...')) while True: time.sleep(1) try: schedule.run_pending() except: traceback.print_exc()
def reloadSchedule(): with scheduleLock: schedule.clear() activeSched = None with thermostatLock: thermoSched = JsonStore( "thermostat_schedule.json" ) if holdControl.state != "down": if heatControl.state == "down": activeSched = thermoSched[ "heat" ] log( LOG_LEVEL_INFO, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_CUSTOM + "/load", "heat" ) elif coolControl.state == "down": activeSched = thermoSched[ "cool" ] log( LOG_LEVEL_INFO, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_CUSTOM + "/load", "cool" ) if useTestSchedule: activeSched = getTestSchedule() log( LOG_LEVEL_INFO, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_CUSTOM + "/load", "test" ) print "Using Test Schedule!!!" if activeSched != None: for day, entries in activeSched.iteritems(): for i, entry in enumerate( entries ): getattr( schedule.every(), day ).at( entry[ 0 ] ).do( setScheduledTemp, entry[ 1 ] ) log( LOG_LEVEL_DEBUG, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_TEXT, "Set " + day + ", at: " + entry[ 0 ] + " = " + str( entry[ 1 ] ) + scaleUnits ) ############################################################################## # # # Web Server Interface # # # ##############################################################################
def schedule_actions(): # Example: nohup python MyScheduledProgram.py &> ~/Desktop/output.log # ps auxw to see running ones print(datetime.datetime.now()) print("Starting to run") times = ['6:07', '06:24'] # Buy today's positions for set_time in times: schedule.every().monday.at(set_time).do(action) schedule.every().tuesday.at(set_time).do(action) schedule.every().wednesday.at(set_time).do(action) schedule.every().thursday.at(set_time).do(action) schedule.every().friday.at(set_time).do(action) # Sell yesterday's positions set_time = '06:01' schedule.every().monday.at(set_time).do(sell_scheduled) schedule.every().tuesday.at(set_time).do(sell_scheduled) schedule.every().wednesday.at(set_time).do(sell_scheduled) schedule.every().thursday.at(set_time).do(sell_scheduled) schedule.every().friday.at(set_time).do(sell_scheduled) while True: schedule.run_pending() sys.stdout.flush() time.sleep(1) # Check every 1 second
def run(self): schedule.every(1).seconds.do(self.run_threaded, self.run_generator_dispatch) schedule.every(1).seconds.do(self.run_threaded, self.run_processor_dispatch) schedule.every(1).seconds.do(self.run_threaded, self.run_query_project_status) while True: schedule.run_pending() time.sleep(1)
def job(): db.session.query(UserAlbum).delete() # schedule.every(1).minutes.do(job)
def schedule_function(self, func, time): print("Scheduled function " + func.__name__ + " at " + time) schedule.every().day.at(time).do(self.run_on_trading_day, func)
def schedule_checks(checkers): schedule.clear() for checker in checkers: conf = checker.conf period = conf["period"] logger.info( "Scheduling checks for %r every %r seconds", conf["name"], period, ) schedule.every(period).seconds.do(checker.check)
def run(self): def delete_job(): try: Mail.delete_one_day_ago() except: logger.exception("Exception raised in MailDeleteBatch#run()#delete_job()") raise schedule.every().hour.do(delete_job) while True: schedule.run_pending() time.sleep(1)
def write_new_file_and_scrape_all_data(filename, link_list, num_retries = 10): """ Writes a new file, scrapes data from every product link in a list, and appends each product's data to the previously created file. """ open_new_file(filename) scrape_all_data_from_all_featured_products(filename, link_list, num_retries)
def write_new_file_and_dynamically_scrape_all_data(filename, link_list, interval, num_retries = 10): """ Writes a new file and repeatedly runs the scraper every time the specified interval has passed and continuously appends the data to a file. """ open_new_file(filename) dynamically_scrape_data(filename, link_list, num_retries, interval)
def write_new_file_update_links_and_dynamically_scrape(filename, interval, num_retries = 10): """ Writes a new file and repeatedly updates the link list and runs the scraper every time the specified interval has passed and continuously appends the data to a file. """ open_new_file(filename) clean_links_and_dynamically_scrape(filename, interval, num_retries)
def dynamically_scrape_and_append_sales_data(filename, interval, num_retries = 10): """ Dynamically scrapes sales data and appends the data to a file by generating a list of links, checking it against an old list and only keeping new links, and scraping those links for sales data. """ old_list = [] def job(old_list): new_list = collect_all_featured_links() new_links = remove_old_links(old_list, new_list) bad_links = collect_bad_links(new_links) clean_links = remove_bad_links_from_link_list(bad_links, new_links) scrape_and_append_sales_data_from_featured_links(filename, clean_links, num_retries) old_list = new_list job(old_list) schedule.every(interval).hours.do(job) while True: schedule.run_pending() time.sleep(30) print "Dynamic scraping finished"
def scrape_combined_data_from_all_featured_products(data_filename, sales_filename, link_list, num_retries = 10): """ Scrapes all data from every featured product and appends that data to their respective files. """ for url in link_list: scrape_and_append_combined_data(url, data_filename, sales_filename, num_retries)
def dynamically_scrape_combined_data(data_filename, sales_filename, interval, num_retries = 10): """ Dynamically scrapes a continuously updated list of unique clean links and appends the data to their respective files. """ old_list = [] def job(old_list): new_list = collect_all_featured_links() new_links = remove_old_links(old_list, new_list) bad_links = collect_bad_links(new_links) clean_links = remove_bad_links_from_link_list(bad_links, new_links) scrape_combined_data_from_all_featured_products(data_filename, sales_filename, clean_links, num_retries) old_list = new_list job(old_list) schedule.every(interval).hours.do(job) while True: schedule.run_pending() time.sleep(30) print "Dynamic scraping finished"
def main(): if not _config.is_registered(): register() refreshinstalled() system_notify() schedule.every(2).hours.do(refreshinstalled) schedule.every(10).minutes.do(system_notify) schedule.every(30).seconds.do(do_update) while True: schedule.run_pending() sleep(5)