我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用resource.getrusage()。
def start(self, verbose=False): if verbose: self.process = subprocess.Popen(filter(None, self.cmd.split(" ")), stdin=None, stdout=None, stderr=None) else: self.process = subprocess.Popen(filter(None, self.cmd.split(" ")), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.stat_fd = open("/proc/" + str(self.process.pid) + "/stat") self.init() try: self.set_init_state() except: return False self.initial_mem_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss #time.sleep(1) self.kafl_shm.seek(0x0) self.kafl_shm.write(self.virgin_bitmap) self.kafl_shm.flush() return True
def get_used_memory(): """ Return the used memory in MB """ if platform.system() == 'Linux': for line in open('/proc/self/status'): if line.startswith('VmRSS:'): return int(line.split()[1]) >> 10 else: warnings.warn("Please install psutil to have better " "support with spilling") if platform.system() == "Darwin": import resource rss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss return rss >> 20 # TODO: support windows return 0
def memory_usage(): # If we are on linux if platform == "linux" or platform == "linux2": kilobytes = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # peak memory usage (bytes on OS X, kilobytes on Linux) gigabytes = kilobytes * 1e-6 return gigabytes # If we are on Mac OS X elif platform == "darwin": kilobytes = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # peak memory usage (bytes on OS X, kilobytes on Linux) gigabytes = kilobytes * 1e-9 return gigabytes # We don't support Windows elif platform == "win32": raise EnvironmentError("The Windows operating system is not supported") # Unrecognized platform else: raise EnvironmentError("Unrecognized platform") # -----------------------------------------------------------------
def report_resources(agent): ru = resource.getrusage(resource.RUSAGE_SELF) out = {"utime": ru.ru_utime, "stime": ru.ru_stime, "maxrss": ru.ru_maxrss, "ixrss": ru.ru_ixrss, "idrss": ru.ru_idrss, "isrss": ru.ru_isrss, "minflt": ru.ru_minflt, "majflt": ru.ru_majflt, "nswap": ru.ru_nswap, "inblock": ru.ru_inblock, "oublock": ru.ru_oublock, "msgsnd": ru.ru_msgsnd, "msgrcv": ru.ru_msgrcv, "nsignals": ru.ru_nsignals, "nvcsw": ru.ru_nvcsw, "nivcsw": ru.ru_nivcsw } return out
def test_lots_of_queries(self): import resource import objgraph class LoadTest(Model): k = columns.Integer(primary_key=True) v = columns.Integer() sync_table(LoadTest) gc.collect() objgraph.show_most_common_types() print("Starting...") for i in range(1000000): if i % 25000 == 0: # print memory statistic print("Memory usage: %s" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)) LoadTest.create(k=i, v=i) objgraph.show_most_common_types() raise Exception("you shouldn't be here")
def get_resource_usage_report(): r = resource.getrusage(0) key_to_description = { "ru_utime": "time in user mode (float)", "ru_stime": "time in system mode (float)", "ru_maxrss": "maximum resident set size", "ru_ixrss": "shared memory size", "ru_idrss": "unshared memory size", "ru_isrss": "unshared stack size", "ru_minflt": "page faults not requiring I/O", "ru_majflt": "page faults requiring I/O", "ru_nswap": "number of swap outs", "ru_inblock": "block input operations", "ru_oublock": "block output operations", "ru_msgsnd": "messages sent", "ru_msgrcv": "messages received", "ru_nsignals": "signals received", "ru_nvcsw": "voluntary context switches", "ru_nivcsw": "involuntary context switches", } return dict([(v, getattr(r, k)) for k, v in key_to_description.items()])
def memory_fitness(threshold=2e9, maximum=3e9): """ Returns a penalty for using too much memory. Add this to your fitness function. This measures the current processes maximum resident set (maxrss) which is the all time peak memory usage for the calling process. Argument threshold is where the this penalty begins. Argument maximum is where this penalty becomes an error (ValueError). Returns in the range [0, 1] where 0 is no penalty and 1 is the maximum memory usage. Linear ramp from threshold to maximum. """ rsc = resource.getrusage(resource.RUSAGE_SELF) size = rsc.ru_maxrss * 1024 fit = (size - threshold) / (maximum - threshold) if fit > 1: raise ValueError("Individual exceded memory limit (size %d bytes, maximum %d)."%(size, maximum)) return max(0, fit)
def test_gcr_memory(self): self.request = FakeRequest() count = 0 current = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0 / 1024.0 while True: count += 1 get_current_request() if count % 1000000 == 0: break if count % 100000 == 0: gc.collect() new = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0 / 1024.0 if new - current > 10: # memory leak, this shouldn't happen assert new == current
def ingest_daemon(self): x = 0 while True: node_work_directory = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname( os.path.abspath(__file__) ))), 'VEDA_WORKING' ) FD = FileDiscovery( node_work_directory=node_work_directory ) FD.discover_studio_ingested_videos() FD.about_video_ingest() reset_queries() x += 1 if x >= 100: print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss x = 0
def youtube_daemon(self): x = 0 while True: self.course_list = generate_course_list() for course in self.course_list: print "%s%s: Callback" % (course.institution, course.edx_classid) callfunction(course) x += 1 if x >= 100: print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss x = 0 reset_queries() self.course_list = [] time.sleep(10)
def test_gcr_memory(self): self.request = get_mocked_request() count = 0 current = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0 / 1024.0 while True: count += 1 utils.get_current_request() if count % 1000000 == 0: break if count % 100000 == 0: gc.collect() new = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0 / 1024.0 if new - current > 10: # memory leak, this shouldn't happen assert new == current
def clock(): """clock() -> floating point number Return the *TOTAL USER+SYSTEM* CPU time in seconds since the start of the process. This is done via a call to resource.getrusage, so it avoids the wraparound problems in time.clock().""" u,s = resource.getrusage(resource.RUSAGE_SELF)[:2] return u+s
def get_ram(self): """Get the bot's RAM usage info.""" if have_psutil: # yay! mu = psutil.Process(os.getpid()).memory_info().rss return (True, mu / 1000000, mu / 1048576) else: # aww raw_musage = 0 got_conversion = False musage_dec = 0 musage_hex = 0 if sys.platform.startswith('linux'): # Linux & Windows report in kilobytes raw_musage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss got_conversion = True musage_dec = raw_musage / 1000 musage_hex = raw_musage / 1024 elif sys.platform == 'darwin': # Mac reports in bytes raw_musage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss got_conversion = True musage_dec = raw_musage / 1000000 # 1 million. 1000 * 1000 musage_hex = raw_musage / 1048576 # 1024 * 1024 if got_conversion: return (got_conversion, musage_dec, musage_hex) else: return (got_conversion,)
def main(): parse_command_line() print("Loading markov chains...") markov_chain.load_chains() # Print memory usage for the server when all chains are loaded memory_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1000.0 memory_usage = humanfriendly.format_size(memory_usage) print("Markov chain server loaded, memory usage %s" % memory_usage) application = tornado.web.Application([ (r"/", MainHandler), ], debug=False) application.listen(5666, address='127.0.0.1') tornado.ioloop.IOLoop.instance().start()
def Debug(): try: global debugCount debugCount = debugCount + 1 resUsage=getrusage(RUSAGE_SELF) size=resUsage.ru_maxrss info("Memory usage : " + str(debugCount) + " size: " + str(size)) info("Resouce usage info: " + str(resUsage)) # Memory leaks display currently commented out # show_growth() # obj=get_leaking_objects() # warn('Leaking objects size='+str(len(obj))) # filepathdebug='/var/log/myDebug'+str(debugCount) # with open(filepathdebug, "w+") as f: #replace filepath & filename # f.write('Debug resouce iteration: ' + str(debugCount) + " size: " + str(size)) # f.write('Leaking objects size='+str(len(obj)) + '\n') # f.write('Leaking objects size='+str(typestats()) + '\n') # f.write('Leaking objects'+str(obj) + '\n') except Exception as e: error('failed to track memory: ' + str(e))
def get_avg_cpu_load(self): """Returns the average cpu load since the last call Returns the user and system time fraction per second as tuple or None """ if not self.start_wall_time: rusage = resource.getrusage(resource.RUSAGE_SELF) self.start_wall_time = time.time() self.start_cpu_user_time = rusage.ru_utime self.start_cpu_sys_time = rusage.ru_stime return None else: now = time.time() rusage = resource.getrusage(resource.RUSAGE_SELF) time_delta = now-self.start_wall_time avg_user_time = (rusage.ru_utime-self.start_cpu_user_time)/time_delta avg_sys_time = (rusage.ru_stime-self.start_cpu_sys_time)/time_delta self.start_wall_time = now self.start_cpu_user_time = rusage.ru_utime self.start_cpu_sys_time = rusage.ru_stime return avg_user_time, avg_sys_time
def _runtest (infile): global g_tmpfile assert (g_tmpfile) parser = DDSMTParser() smtformula = parser.parse(infile) _log (1, "parser: done") _log (2, "parser: maxrss: {} MiB".format( resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1000)) _dump (smtformula, g_tmpfile) _log (1, "dumper: done") _log (1) if not _cmp (infile, g_tmpfile): bugfile = "bug-ddsmtparsertest-" + os.path.basename(infile) shutil.copyfile(g_tmpfile, bugfile) nbugs += 1 _log (1, "bug: " + bugfile)
def runCommand(cmd, rUsage=False): """ Run system command and get output, error, process handle and resource usage handle :param cmd: :param rUsage: :return: """ import sys from subprocess import Popen, PIPE, STDOUT from resource import getrusage,RUSAGE_SELF,RUSAGE_CHILDREN res = None out = None err = None try: print "Start to run command: %s" % cmd p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) # p.wait() except OSError: print (err) sys.exit(p.returncode) (out, err) = p.communicate() if rUsage: res = getrusage(RUSAGE_CHILDREN) return (out, err, p, res)
def runCommand(cmd, rUsage=False): """ Run system command and get output, error, process handle and resource usage handle :param cmd: :param rUsage: :return: """ import sys from subprocess import Popen, PIPE, STDOUT from resource import getrusage,RUSAGE_SELF,RUSAGE_CHILDREN res = None out = None err = None try: logger.info("Start to run command: %s" % cmd) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) # p.wait() except OSError: print (err) sys.exit(p.returncode) (out, err) = p.communicate() if rUsage: res = getrusage(RUSAGE_CHILDREN) return (out, err, p, res)
def check_idle_cpu_usage(duration, allowed_part): if resource is None: # TODO: use https://code.google.com/p/psutil/ from nose.plugins.skip import SkipTest raise SkipTest('CPU usage testing not supported (`import resource` failed)') r1 = resource.getrusage(resource.RUSAGE_SELF) eventlet.sleep(duration) r2 = resource.getrusage(resource.RUSAGE_SELF) utime = r2.ru_utime - r1.ru_utime stime = r2.ru_stime - r1.ru_stime # This check is reliably unreliable on Travis, presumably because of CPU # resources being quite restricted by the build environment. The workaround # is to apply an arbitrary factor that should be enough to make it work nicely. if os.environ.get('TRAVIS') == 'true': allowed_part *= 1.2 assert utime + stime < duration * allowed_part, \ "CPU usage over limit: user %.0f%% sys %.0f%% allowed %.0f%%" % ( utime / duration * 100, stime / duration * 100, allowed_part * 100)
def make_post_exec_msg(start_time: datetime=None, comment: str=None) -> str: """Build Post-Execution Message with information about RAM and Time.""" use, al, msg = 0, 0, "" if sys.platform.startswith(("win", "darwin")): msg = "No information about RAM usage available on non-Linux systems." elif sys.platform.startswith("linux"): use = int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * resource.getpagesize() if resource else 0) al = int(os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') if hasattr(os, "sysconf") else 0) msg += f"""Total Max Memory Used: ~{use / al:.2%} Percent. { bytes2human(use) } ({ use } bytes) of { bytes2human(al) } ({ al } bytes) of total RAM Memory.\n""" if start_time: _t = datetime.now() - start_time msg += f"Total Working Time: ~{ timedelta2human(_t) } ({ _t }).\n" if comment: msg += str(comment).strip() log.debug("Preparing Simple Post-Execution Messages.") atexit.register(log.info, msg) return msg
def _performance_log(func): """ Logs information for performance measurement """ def wrapper(*arg): """ wrapper """ start = datetime.datetime.now() # Code execution res = func(*arg) if _log_performance: usage = resource.getrusage(resource.RUSAGE_SELF) memory_process = (usage[2])/1000 delta = datetime.datetime.now() - start delta_milliseconds = int(delta.total_seconds() * 1000) _logger.info("PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|" .format(func.__name__, delta_milliseconds, memory_process)) return res return wrapper
def _performance_log(func): """ Logs information for performance measurement """ def wrapper(*arg): """ wrapper """ start = datetime.datetime.now() # Code execution res = func(*arg) if _log_performance: usage = resource.getrusage(resource.RUSAGE_SELF) process_memory = usage.ru_maxrss / 1000 delta = datetime.datetime.now() - start delta_milliseconds = int(delta.total_seconds() * 1000) _LOGGER.info("PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|" .format(func.__name__, delta_milliseconds, process_memory)) return res return wrapper
def main(arguments=None): logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') parser = HelpfulArgumentParser(description=__doc__, prog='igdiscover') parser.add_argument('--profile', default=False, action='store_true', help='Save profiling information to igdiscover.prof') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) subparsers = parser.add_subparsers() for command_name in COMMANDS: module = importlib.import_module('.' + command_name, 'igdiscover') subparser = subparsers.add_parser(command_name, help=module.__doc__.split('\n')[1], description=module.__doc__) subparser.set_defaults(func=module.main) module.add_arguments(subparser) args = parser.parse_args(arguments) if not hasattr(args, 'func'): parser.error('Please provide the name of a subcommand to run') elif args.profile: import cProfile as profile profile.runctx('args.func(args)', globals(), locals(), filename='igdiscover.prof') logger.info('Wrote profiling data to igdiscover.prof') else: args.func(args) if sys.platform == 'linux': rself = resource.getrusage(resource.RUSAGE_SELF) rchildren = resource.getrusage(resource.RUSAGE_CHILDREN) memory_kb = rself.ru_maxrss + rchildren.ru_maxrss cpu_time = rself.ru_utime + rself.ru_stime + rchildren.ru_utime + rchildren.ru_stime cpu_time_s = format_duration(cpu_time) logger.info('CPU time {}. Maximum memory usage {:.3f} GB'.format( cpu_time_s, memory_kb / 1E6))
def main(args): try: config = Config.from_default_path() except FileNotFoundError as e: sys.exit("Pipeline configuration file {!r} not found. Please create it!".format(e.filename)) print('IgDiscover version {} with Python {}. Configuration:'.format(__version__, platform.python_version())) for k, v in sorted(vars(config).items()): # TODO the following line is only necessary for non-YAML configurations if k.startswith('_'): continue print(' ', k, ': ', repr(v), sep='') # snakemake sets up its own logging and this cannot be easily changed # (setting keep_logger=True crashes), so remove our own log handler # for now logger.root.handlers = [] snakefile_path = pkg_resources.resource_filename('igdiscover', 'Snakefile') success = snakemake(snakefile_path, snakemakepath='snakemake', # Needed in snakemake 3.9.0 dryrun=args.dryrun, cores=args.cores, keepgoing=args.keepgoing, printshellcmds=args.print_commands, targets=args.targets if args.targets else None, ) if sys.platform == 'linux' and not args.dryrun: cputime = resource.getrusage(resource.RUSAGE_SELF).ru_utime cputime += resource.getrusage(resource.RUSAGE_CHILDREN).ru_utime h = int(cputime // 3600) m = (cputime - h * 3600) / 60 print('Total CPU time: {}h {:.2f}m'.format(h, m)) sys.exit(0 if success else 1)
def get_cpu_time(): """Return CPU time used by process and children""" if sys.platform != 'linux': return None rs = resource.getrusage(resource.RUSAGE_SELF) rc = resource.getrusage(resource.RUSAGE_CHILDREN) return rs.ru_utime + rs.ru_stime + rc.ru_utime + rc.ru_stime
def __enter__(self): self.start = time.time() print '%s\tself_premaxrss_mb\t%0.1f' % (self.note, resource.getrusage(resource.RUSAGE_SELF)[2]/1e3) print '%s\tchildren_premaxrss_mb\t%0.1f' % (self.note, resource.getrusage(resource.RUSAGE_SELF)[2]/1e3) sys.stdout.flush()
def __exit__(self, e_type, e_value, e_trace): print '%s\tself_postmaxrss_mb\t%0.1f' % (self.note, resource.getrusage(resource.RUSAGE_SELF)[2]/1e3) print '%s\tchildren_postmaxrss_mb\t%0.1f' % (self.note, resource.getrusage(resource.RUSAGE_CHILDREN)[2]/1e3) print '%s\telapsed_sec\t%d' % (self.note, time.time() - self.start) sys.stdout.flush()
def _get_time_times(timer=os.times): t = timer() return t[0] + t[1] # Using getrusage(3) is better than clock(3) if available: # on some systems (e.g. FreeBSD), getrusage has a higher resolution # Furthermore, on a POSIX system, returns microseconds, which # wrap around after 36min.
def read_cpu_time(): rusage = resource.getrusage(resource.RUSAGE_SELF) return int((rusage.ru_utime + rusage.ru_stime) * 1e9) # nanoseconds
def read_max_rss(): rusage = resource.getrusage(resource.RUSAGE_SELF) if runtime_info.OS_DARWIN: return int(rusage.ru_maxrss / 1000) # KB else: return rusage.ru_maxrss # KB
def getrusage(who=0): return [0.0, 0.0] # on non-UNIX platforms cpu_time always 0.0
def profiler(frame, event, arg): if event not in ('call','return'): return profiler #### gather stats #### rusage = getrusage(RUSAGE_SELF) t_cpu = rusage[0] + rusage[1] # user time + system time code = frame.f_code fun = (code.co_name, code.co_filename, code.co_firstlineno) #### get stack with functions entry stats #### ct = threading.currentThread() try: p_stack = ct.p_stack except AttributeError: ct.p_stack = deque() p_stack = ct.p_stack #### handle call and return #### if event == 'call': p_stack.append((time(), t_cpu, fun)) elif event == 'return': try: t,t_cpu_prev,f = p_stack.pop() assert f == fun except IndexError: # TODO investigate t,t_cpu_prev,f = p_start_time, 0.0, None call_cnt, t_sum, t_cpu_sum = p_stats.get(fun, (0, 0.0, 0.0)) p_stats[fun] = (call_cnt+1, t_sum+time()-t, t_cpu_sum+t_cpu-t_cpu_prev) return profiler
def show_memory_use(): rusage_denom = 1024. if sys.platform == 'darwin': rusage_denom = rusage_denom * rusage_denom ru = resource.getrusage(resource.RUSAGE_SELF) total_memory = 1. * (ru.ru_maxrss + ru.ru_ixrss + ru.ru_idrss + ru.ru_isrss) / rusage_denom strinfo = "\x1b[33m [Memory] Total Memory Use: %.4f MB \t Resident: %ld Shared: %ld UnshareData: %ld UnshareStack: %ld \x1b[0m" % \ (total_memory, ru.ru_maxrss, ru.ru_ixrss, ru.ru_idrss, ru.ru_isrss) return strinfo
def get_daemon_statistics(self): usage = resource.getrusage(resource.RUSAGE_SELF) return {"type": "daemon-statistics", "memory-used": getattr(usage, "ru_maxrss")}
def test_dumps_usage(self): ''' repeatedly serialize, check that usage doesn't go up ''' if cdumps is None: logger.warn('no C dumps(), skipping test_dumps_usage') return start_usage = resource.getrusage(resource.RUSAGE_SELF) usage_history = [start_usage] for o in _range(_TEST_OUTER): for i in _range(_TEST_COUNT): ob = _randob() blob = cdumps(ob) # and silently drop the result. I hope the garbage collector works! t_usage = resource.getrusage(resource.RUSAGE_SELF) usage_history.append(t_usage) end_usage = usage_history[-1] dmaxrss = end_usage.ru_maxrss - start_usage.ru_maxrss didrss = end_usage.ru_idrss - start_usage.ru_idrss dmaxrsspct = ((end_usage.ru_maxrss != 0) and (dmaxrss / end_usage.ru_maxrss)) or 0 didrsspct = ((end_usage.ru_idrss != 0) and (didrss / end_usage.ru_idrss)) or 0 sys.stderr.write('maxrss: {} - {}, d={} ({:.2f}%)\n'.format(start_usage.ru_maxrss, end_usage.ru_maxrss, dmaxrss, dmaxrsspct * 100.0)) sys.stderr.write('idrss: {} - {}, d={} ({:.2f}%)\n'.format(start_usage.ru_idrss, end_usage.ru_idrss, didrss, didrsspct * 100.0)) assert (dmaxrsspct) < 0.05, [x.ru_maxrss for x in usage_history] assert (didrsspct) < 0.05, [x.ru_idrss for x in usage_history]
def test_loads_usage(self): ''' repeatedly serialize, check that usage doesn't go up ''' if (cdumps is None) or (cloads is None): logger.warn('no C fast CBOR, skipping test_loads_usage') return ## Just a string passes! #ob = 'sntaoheusnatoheusnaotehuasnoetuhaosentuhaoesnth' ## Just an array passes! #ob = [1,2,3,4,5,6,7,8,9,12,12,13] ## Just a dict passes! #ob = {'a':'b', 'c':'d', 'e':'f', 'g':'h'} # dict of dict is doom! #ob = {'a':{'b':'c', 'd':'e', 'f':'g'}, 'x':'p'} ob = {'aoeu':[1,2,3,4],'foo':'bar','pants':{'foo':0xb44, 'pi':3.14}, 'flubber': [{'x':'y', 'z':[None, 2, []]}, 2, 'hello']} blob = cdumps(ob) start_usage = resource.getrusage(resource.RUSAGE_SELF) usage_history = [start_usage] for o in _range(_TEST_OUTER): for i in _range(_TEST_COUNT): dob = cloads(blob) # and silently drop the result. I hope the garbage collector works! t_usage = resource.getrusage(resource.RUSAGE_SELF) usage_history.append(t_usage) end_usage = usage_history[-1] dmaxrss = end_usage.ru_maxrss - start_usage.ru_maxrss didrss = end_usage.ru_idrss - start_usage.ru_idrss dmaxrsspct = ((end_usage.ru_maxrss != 0) and (dmaxrss / end_usage.ru_maxrss)) or 0 didrsspct = ((end_usage.ru_idrss != 0) and (didrss / end_usage.ru_idrss)) or 0 sys.stderr.write('maxrss: {} - {}, d={} ({:.2f}%)\n'.format(start_usage.ru_maxrss, end_usage.ru_maxrss, dmaxrss, dmaxrsspct * 100.0)) sys.stderr.write('idrss: {} - {}, d={} ({:.2f}%)\n'.format(start_usage.ru_idrss, end_usage.ru_idrss, didrss, didrsspct * 100.0)) assert (dmaxrsspct) < 0.05, [x.ru_maxrss for x in usage_history] assert (didrsspct) < 0.05, [x.ru_idrss for x in usage_history]
def _total_gb(): # given in KB so convert my_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / (1024**2) # total_usage = mpiops.comm.reduce(my_usage, root=0) total_usage = ls.mpiops.comm.allreduce(my_usage) return total_usage
def memprofile(): import resource import tracemalloc tracemalloc.start() ast = parse_file('/tmp/197.c') print('Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) snapshot = tracemalloc.take_snapshot() print("[ tracemalloc stats ]") for stat in snapshot.statistics('lineno')[:20]: print(stat)
def do_stats(self): mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss return {'mem_usage': '%s MB' % (mem / 1024)}
def memory_usage_resource(): rusage_denom = 1024. if sys.platform == 'darwin': # ... it seems that in OSX the output is different units ... rusage_denom = rusage_denom * rusage_denom mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom return mem
def info(self, ctx): """Display bot info, e.g. library versions.""" embed = discord.Embed() embed.description = ctx.bot.description embed.set_thumbnail(url=ctx.bot.user.avatar_url_as(format="png", size=128)) if k2: embed.add_field(name="Version", value=k2.version) ainfo = await ctx.bot.application_info() owner = str(ainfo.owner) embed.add_field(name="Owner", value=owner) embed.add_field(name="# of commands", value=len(ctx.bot.commands)) if ctx.guild and ctx.bot.shard_count > 1: embed.add_field(name="Shard", value=f"{ctx.guild.shard_id+1} of {ctx.bot.shard_count}") num_guilds = len(ctx.bot.guilds) num_users = sum(not member.bot for member in ctx.bot.get_all_members()) embed.add_field(name="Serving", value=f"{num_users} people in {num_guilds} guilds") embed.add_field(name="Python", value="{0}.{1}.{2}".format(*sys.version_info)) embed.add_field(name="discord.py", value=discord.__version__) usage_memory = round(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1000, 2) embed.add_field(name="Cookies eaten", value=f"{usage_memory} megabites") if k2: embed.add_field(name="Github", value=k2.url, inline=False) await ctx.send(embed=embed)
def logger(self, mssg="", decorate=1): """Logging function.""" head = "\n%s"%("#"*50,) timestamp = "\n[%s]"% datetime.ctime(datetime.now()) memusage = "[%5i Mb] "%(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024, ) if self.log: if decorate: self.log.write("".join((head, timestamp, memusage, mssg))) else: self.log.write(mssg)
def clocku(): """clocku() -> floating point number Return the *USER* CPU time in seconds since the start of the process. This is done via a call to resource.getrusage, so it avoids the wraparound problems in time.clock().""" return resource.getrusage(resource.RUSAGE_SELF)[0]
def clocks(): """clocks() -> floating point number Return the *SYSTEM* CPU time in seconds since the start of the process. This is done via a call to resource.getrusage, so it avoids the wraparound problems in time.clock().""" return resource.getrusage(resource.RUSAGE_SELF)[1]