我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用time.time.time()。
def drip(self): """Let some of the bucket drain. How much of the bucket drains depends on how long it has been since I was last called. @returns: True if I am now empty. @returntype: bool """ if self.parentBucket is not None: self.parentBucket.drip() if self.rate is None: self.content = 0 return True else: now = time() deltaT = now - self.lastDrip self.content = long(max(0, self.content - deltaT * self.rate)) self.lastDrip = now return False
def getBucketFor(self, *a, **kw): """You want a bucket for that? I'll give you a bucket. Any parameters are passed on to L{getBucketKey}, from them it decides which bucket you get. @returntype: L{Bucket} """ if ((self.sweepInterval is not None) and ((time() - self.lastSweep) > self.sweepInterval)): self.sweep() if self.parentFilter: parentBucket = self.parentFilter.getBucketFor(self, *a, **kw) else: parentBucket = None key = self.getBucketKey(*a, **kw) bucket = self.buckets.get(key) if bucket is None: bucket = self.bucketFactory(parentBucket) self.buckets[key] = bucket return bucket
def _update_adabn(self, eval_data): '''Update moving mean and moving var with eval data''' from time import time start = time() with self._restore_eval_data(eval_data): for _ in range(self.num_adabn_epoch): eval_data.reset() for nbatch, eval_batch in enumerate(eval_data): self.forward(eval_batch, is_train=True) for out in self.get_outputs(): # Cause memory leak (though not increase after this _update_adabn) without this wait # TODO: fixme out.wait_to_read() # for name, block in zip(self._exec_group.aux_names, self._exec_group.aux_arrays): # if 'moving' in name: # for a in block: # a.wait_to_read() logger.debug( 'AdaBN with {} epochs takes {} seconds', self.num_adabn_epoch, time() - start )
def recvall(the_socket, timeout=5): the_socket.setblocking(0) total_data = [] data = "" begin = time() while True: sleep(0.05) if total_data and time()-begin > timeout: break elif time()-begin > timeout*2: break try: data = the_socket.recv(1024) if data: total_data.append(data) begin = time() except Exception: pass return "".join(total_data)
def ltdownload_metadata(address, binhash, metadata_queue, timeout=40): metadata = None start_time = time.time() try: session = lt.session() r = random.randrange(10000, 50000) session.listen_on(r, r+10) session.add_dht_router('router.bittorrent.com',6881) session.add_dht_router('router.utorrent.com',6881) session.add_dht_router('dht.transmission.com',6881) session.add_dht_router('127.0.0.1',6881) session.start_dht() metadata = fetch_torrent(session, binhash.encode('hex'), timeout) session = None except: traceback.print_exc() finally: metadata_queue.put((binhash, address, metadata, 'lt', start_time))
def calcRemainingTime(self): seekable = self.getSeek() if seekable is not None: len = seekable.getLength() try: tmp = self.cueGetEndCutPosition() if tmp: len = (False, tmp) except: pass pos = seekable.getPlayPosition() speednom = self.seekstate[1] or 1 speedden = self.seekstate[2] or 1 if not len[0] and not pos[0]: if len[1] <= pos[1]: return 0 time = (len[1] - pos[1])*speedden/(90*speednom) return time return False
def do_real_import(self, vsfile, filepath,mdXML,import_tags): """ Make the import call to vidispine, and wait for self._importer_timeout seconds for the job to complete. Raises a VSException representing the job error if the import job fails, or ImportStalled if the timeout occurs :param vsfile: VSFile object to import :param filepath: filepath of the VSFile :param mdXML: compiled metadata XML to import alongside the media :param import_tags: shape tags describing required transcodes :return: None """ import_job = vsfile.importToItem(mdXML, tags=import_tags, priority="LOW", jobMetadata={"gnm_app": "vsingester"}) job_start_time = time.time() close_sent = False while import_job.finished() is False: self.logger.info("\tJob status is %s" % import_job.status()) if time.time() - job_start_time > self._importer_timeout: self.logger.error("\tJob has taken more than {0} seconds to complete, concluding that it must be stalled.".format(self._importer_timeout)) import_job.abort() self.logger.error("\tSent abort signal to job") raise ImportStalled(filepath) if time.time() - job_start_time > self._close_file_timeout and not close_sent: vsfile.setState("CLOSED") sleep(5) import_job.update(noraise=False)
def greaterThan(self, t): """Compare this DateTime object to another DateTime object OR a floating point number such as that which is returned by the python time module. Returns true if the object represents a date/time greater than the specified DateTime or time module style time. Revised to give more correct results through comparison of long integer microseconds. """ if t is None: t = 0 if isinstance(t, float): return self._micros > long(t * 1000000) try: return self._micros > t._micros except AttributeError: return self._micros > t
def equalTo(self, t): """Compare this DateTime object to another DateTime object OR a floating point number such as that which is returned by the python time module. Returns true if the object represents a date/time equal to the specified DateTime or time module style time. Revised to give more correct results through comparison of long integer microseconds. """ if t is None: t = 0 if isinstance(t, float): return self._micros == long(t * 1000000) try: return self._micros == t._micros except AttributeError: return self._micros == t
def lessThan(self, t): """Compare this DateTime object to another DateTime object OR a floating point number such as that which is returned by the python time module. Returns true if the object represents a date/time less than the specified DateTime or time module style time. Revised to give more correct results through comparison of long integer microseconds. """ if t is None: t = 0 if isinstance(t, float): return self._micros < long(t * 1000000) try: return self._micros < t._micros except AttributeError: return self._micros < t
def lessThanEqualTo(self, t): """Compare this DateTime object to another DateTime object OR a floating point number such as that which is returned by the python time module. Returns true if the object represents a date/time less than or equal to the specified DateTime or time module style time. Revised to give more correct results through comparison of long integer microseconds. """ if t is None: t = 0 if isinstance(t, float): return self._micros <= long(t * 1000000) try: return self._micros <= t._micros except AttributeError: return self._micros <= t
def strftime(self, format): """Format the date/time using the *current timezone representation*.""" x = _calcDependentSecond2(self._year, self._month, self._day, self._hour, self._minute, self._second) ltz = self._calcTimezoneName(x, 0) tzdiff = _tzoffset(ltz, self._t) - _tzoffset(self._tz, self._t) zself = self + tzdiff / 86400.0 microseconds = int((zself._second - zself._nearsec) * 1000000) unicode_format = False if isinstance(format, explicit_unicode_type): format = format.encode('utf-8') unicode_format = True ds = datetime(zself._year, zself._month, zself._day, zself._hour, zself._minute, int(zself._nearsec), microseconds).strftime(format) if unicode_format: return ds.decode('utf-8') return ds # General formats from previous DateTime
def ISO8601(self): """Return the object in ISO 8601-compatible format containing the date, time with seconds-precision and the time zone identifier. See: http://www.w3.org/TR/NOTE-datetime Dates are output as: YYYY-MM-DDTHH:MM:SSTZD T is a literal character. TZD is Time Zone Designator, format +HH:MM or -HH:MM If the instance is timezone naive (it was not specified with a timezone when it was constructed) then the timezone is ommitted. The HTML4 method below offers the same formatting, but converts to UTC before returning the value and sets the TZD "Z". """ if self.timezoneNaive(): return "%0.4d-%0.2d-%0.2dT%0.2d:%0.2d:%0.2d" % ( self._year, self._month, self._day, self._hour, self._minute, self._second) tzoffset = _tzoffset2iso8601zone(_tzoffset(self._tz, self._t)) return "%0.4d-%0.2d-%0.2dT%0.2d:%0.2d:%0.2d%s" % ( self._year, self._month, self._day, self._hour, self._minute, self._second, tzoffset)
def add_timer(self, callback, when, interval, ident): ''' Add timer to the data structure. :param callback: Arbitrary callable object. :type callback: ``callable object`` :param when: The first expiration time, seconds since epoch. :type when: ``integer`` :param interval: Timer interval, if equals 0, one time timer, otherwise the timer will be periodically executed :type interval: ``integer`` :param ident: (optional) Timer identity. :type ident: ``integer`` :returns: A timer object which should not be manipulated directly by clients. Used to delete/update the timer :rtype: ``solnlib.timer_queue.Timer`` ''' timer = Timer(callback, when, interval, ident) self._timers.add(timer) return timer
def get_expired_timers(self): ''' Get a list of expired timers. :returns: a list of ``Timer``, empty list if there is no expired timers. :rtype: ``list`` ''' next_expired_time = 0 now = time() expired_timers = [] for timer in self._timers: if timer.when <= now: expired_timers.append(timer) if expired_timers: del self._timers[:len(expired_timers)] if self._timers: next_expired_time = self._timers[0].when return (next_expired_time, expired_timers)
def add_timer(self, callback, when, interval, ident=None): ''' Add timer to the queue. :param callback: Arbitrary callable object. :type callback: ``callable object`` :param when: The first expiration time, seconds since epoch. :type when: ``integer`` :param interval: Timer interval, if equals 0, one time timer, otherwise the timer will be periodically executed :type interval: ``integer`` :param ident: (optional) Timer identity. :type ident: ``integer`` :returns: A timer object which should not be manipulated directly by clients. Used to delete/update the timer ''' with self._lock: timer = self._timers.add_timer(callback, when, interval, ident) self._wakeup() return timer
def drip(self): """ Let some of the bucket drain. The L{Bucket} drains at the rate specified by the class variable C{rate}. @returns: C{True} if the bucket is empty after this drip. @returntype: C{bool} """ if self.parentBucket is not None: self.parentBucket.drip() if self.rate is None: self.content = 0 else: now = time() deltaTime = now - self.lastDrip deltaTokens = deltaTime * self.rate self.content = max(0, self.content - deltaTokens) self.lastDrip = now return self.content == 0
def getBucketFor(self, *a, **kw): """ Find or create a L{Bucket} corresponding to the provided parameters. Any parameters are passed on to L{getBucketKey}, from them it decides which bucket you get. @returntype: L{Bucket} """ if ((self.sweepInterval is not None) and ((time() - self.lastSweep) > self.sweepInterval)): self.sweep() if self.parentFilter: parentBucket = self.parentFilter.getBucketFor(self, *a, **kw) else: parentBucket = None key = self.getBucketKey(*a, **kw) bucket = self.buckets.get(key) if bucket is None: bucket = self.bucketFactory(parentBucket) self.buckets[key] = bucket return bucket
def __init__(self, status, output, tick=.1, update_interval=1): """ :type status: Status :type output: file """ super(ProgressReporterThread, self).__init__() self.status = status self.output = output self._tick = tick self._update_interval = update_interval self._spinner_pos = 0 self._status_line = '' self._prev_bytes = 0 self._prev_time = time() self._should_stop = threading.Event()
def sum_up(self): actually_downloaded = (self.status.downloaded - self.status.resumed_from) time_taken = self.status.time_finished - self.status.time_started self.output.write(CLEAR_LINE) try: speed = actually_downloaded / time_taken except ZeroDivisionError: # Either time is 0 (not all systems provide `time.time` # with a better precision than 1 second), and/or nothing # has been downloaded. speed = actually_downloaded self.output.write(SUMMARY.format( downloaded=humanize_bytes(actually_downloaded), total=(self.status.total_size and humanize_bytes(self.status.total_size)), speed=humanize_bytes(speed), time=time_taken, )) self.output.flush()
def run(self, count=None, *args, **kwargs): from time import time self._start_time = time() count = count if count else self._count cb_count = self._callback_freq for i, row in enumerate(self._source_pipe): self.i = i if count and i == count: break if cb_count == 0: cb_count = self._callback_freq self._callback(self, i) cb_count -= 1
def activateTimeshiftEnd(self, back = True): self.back = back self.showTimeshiftState = True ts = self.getTimeshift() print "activateTimeshiftEnd" if ts is None: return if ts.isTimeshiftActive(): print "!! activate timeshift called - but shouldn't this be a normal pause?" self.pauseService() else: print "play, ..." self.session.open(MessageBox, _("Timeshift"), MessageBox.TYPE_INFO, timeout = 3) ts.activateTimeshift() # activate timeshift will automatically pause self.ts_init_delay_timer.start(2000, True) # hack for spark #spark needs some time to initialize
def __init__(self, parentBucket=None): self.content = 0 self.parentBucket=parentBucket self.lastDrip = time()
def __init__(self, parentFilter=None): self.buckets = {} self.parentFilter = parentFilter self.lastSweep = time()
def sweep(self): """I throw away references to empty buckets.""" for key, bucket in self.buckets.items(): if (bucket._refcount == 0) and bucket.drip(): del self.buckets[key] self.lastSweep = time()
def t(self): """ The computed time step. """ return ((int(self.time) - self.t0) // self.step) + self.drift
def time(self): """ The current time. By default, this returns time.time() each time it is accessed. If you want to generate a token at a specific time, you can set this property to a fixed value instead. Deleting the value returns it to its 'live' state. """ return self._time if (self._time is not None) else time()
def time(self, value): self._time = value
def time(self): self._time = None
def fetch_torrent(session, ih, timeout): name = ih.upper() url = 'magnet:?xt=urn:btih:%s' % (name,) data = '' params = { 'save_path': '/tmp/downloads/', 'storage_mode': lt.storage_mode_t(2), 'paused': False, 'auto_managed': False, 'duplicate_is_error': True} try: handle = lt.add_magnet_uri(session, url, params) except: return None status = session.status() #print 'downloading metadata:', url handle.set_sequential_download(1) meta = None down_time = time.time() down_path = None for i in xrange(0, timeout): if handle.has_metadata(): info = handle.get_torrent_info() down_path = '/tmp/downloads/%s' % info.name() #print 'status', 'p', status.num_peers, 'g', status.dht_global_nodes, 'ts', status.dht_torrents, 'u', status.total_upload, 'd', status.total_download meta = info.metadata() break time.sleep(1) if down_path and os.path.exists(down_path): os.system('rm -rf "%s"' % down_path) session.remove_torrent(handle) return meta
def setResumePoint(session): global resumePointCache, resumePointCacheLast service = session.nav.getCurrentService() ref = session.nav.getCurrentlyPlayingServiceOrGroup() if (service is not None) and (ref is not None): # and (ref.type != 1): # ref type 1 has its own memory... seek = service.seek() if seek: pos = seek.getPlayPosition() if not pos[0]: key = ref.toString() lru = int(time()) l = seek.getLength() if l: l = l[1] else: l = None resumePointCache[key] = [lru, pos[1], l] if len(resumePointCache) > 50: candidate = key for k,v in resumePointCache.items(): if v[0] < lru: candidate = k del resumePointCache[candidate] if lru - resumePointCacheLast > 3600: saveResumePoints()
def delResumePoint(ref): global resumePointCache, resumePointCacheLast try: del resumePointCache[ref.toString()] except KeyError: pass if int(time()) - resumePointCacheLast > 3600: saveResumePoints()
def getResumePoint(session): global resumePointCache ref = session.nav.getCurrentlyPlayingServiceOrGroup() if (ref is not None) and (ref.type != 1): try: entry = resumePointCache[ref.toString()] entry[0] = int(time()) # update LRU timestamp return entry[1] except KeyError: return None
def saveResumePoints(): global resumePointCache, resumePointCacheLast import cPickle try: f = open('/home/root/resumepoints.pkl', 'wb') cPickle.dump(resumePointCache, f, cPickle.HIGHEST_PROTOCOL) except Exception, ex: print "[InfoBar] Failed to write resumepoints:", ex resumePointCacheLast = int(time())
def ScreenSaverTimerStart(self): time = int(config.usage.screen_saver.value) flag = self.seekstate[0] if not flag: ref = self.session.nav.getCurrentlyPlayingServiceOrGroup() if ref and not (hasattr(self.session, "pipshown") and self.session.pipshown): ref = ref.toString().split(":") flag = ref[2] == "2" or os.path.splitext(ref[10])[1].lower() in AUDIO_EXTENSIONS if time and flag: self.screenSaverTimer.startLongTimer(time) else: self.screenSaverTimer.stop()
def __init__(self): self["MovieListActions"] = HelpableActionMap(self, "InfobarMovieListActions", { "movieList": (self.showMovies, _("Open the movie list")), "up": (self.up, _("Open the movie list")), "down": (self.down, _("Open the movie list")) }, prio=2) # InfoBarTimeshift requires InfoBarSeek, instantiated BEFORE! # Hrmf. # # Timeshift works the following way: # demux0 demux1 "TimeshiftActions" "TimeshiftActivateActions" "SeekActions" # - normal playback TUNER unused PLAY enable disable disable # - user presses "yellow" button. FILE record PAUSE enable disable enable # - user presess pause again FILE record PLAY enable disable enable # - user fast forwards FILE record FF enable disable enable # - end of timeshift buffer reached TUNER record PLAY enable enable disable # - user backwards FILE record BACK # !! enable disable enable # # in other words: # - when a service is playing, pressing the "timeshiftStart" button ("yellow") enables recording ("enables timeshift"), # freezes the picture (to indicate timeshift), sets timeshiftMode ("activates timeshift") # now, the service becomes seekable, so "SeekActions" are enabled, "TimeshiftEnableActions" are disabled. # - the user can now PVR around # - if it hits the end, the service goes into live mode ("deactivates timeshift", it's of course still "enabled") # the service looses it's "seekable" state. It can still be paused, but just to activate timeshift right # after! # the seek actions will be disabled, but the timeshiftActivateActions will be enabled # - if the user rewinds, or press pause, timeshift will be activated again # note that a timeshift can be enabled ("recording") and # activated (currently time-shifting).
def startTimeshift(self, pauseService = True): print "enable timeshift" ts = self.getTimeshift() if ts is None: if not pauseService and not int(config.usage.timeshift_start_delay.value): self.session.open(MessageBox, _("Timeshift not possible!"), MessageBox.TYPE_ERROR, simple = True) print "no ts interface" return 0 if ts.isTimeshiftEnabled(): print "hu, timeshift already enabled?" else: if not ts.startTimeshift(): # we remove the "relative time" for now. #self.pvrStateDialog["timeshift"].setRelative(time.time()) if pauseService: # PAUSE. #self.setSeekState(self.SEEK_STATE_PAUSE) self.activateTimeshiftEnd(False) self.showTimeshiftState = True else: self.showTimeshiftState = False # enable the "TimeshiftEnableActions", which will override # the startTimeshift actions self.__seekableStatusChanged() # get current timeshift filename and calculate new self.save_timeshift_file = False self.save_timeshift_in_movie_dir = False self.setCurrentEventTimer() self.current_timeshift_filename = ts.getTimeshiftFilename() self.new_timeshift_filename = self.generateNewTimeshiftFileName() else: print "timeshift failed"
def generateNewTimeshiftFileName(self): name = "timeshift record" info = { } self.getProgramInfoAndEvent(info, name) serviceref = info["serviceref"] service_name = "" if isinstance(serviceref, eServiceReference): service_name = ServiceReference(serviceref).getServiceName() begin_date = strftime("%Y%m%d %H%M", localtime(time())) filename = begin_date + " - " + service_name if config.recording.filename_composition.value == "short": filename = strftime("%Y%m%d", localtime(time())) + " - " + info["name"] elif config.recording.filename_composition.value == "long": filename += " - " + info["name"] + " - " + info["description"] else: filename += " - " + info["name"] # standard if config.recording.ascii_filenames.value: filename = ASCIItranslit.legacyEncode(filename) print "New timeshift filename: ", filename return filename # same as activateTimeshiftEnd, but pauses afterwards.
def currentEventTime(self): remaining = 0 ref = self.session.nav.getCurrentlyPlayingServiceOrGroup() if ref: epg = eEPGCache.getInstance() event = epg.lookupEventTime(ref, -1, 0) if event: now = int(time()) start = event.getBeginTime() duration = event.getDuration() end = start + duration remaining = end - now return remaining
def inputCallback(self, value): if value: print "stopping recording after", int(value), "minutes." entry = self.recording[self.selectedEntry] if int(value) != 0: entry.autoincrease = False entry.end = int(time()) + 60 * int(value) self.session.nav.RecordTimer.timeChanged(entry)
def restartInactiveTimer(self): time = abs(int(config.usage.inactivity_timer.value)) if time: self.inactivityTimer.startLongTimer(time) else: self.inactivityTimer.stop()