我们从Python开源项目中,提取了以下16个代码示例,用于说明如何使用resource.getpagesize()。
def allocate_buffered_data_arrays(self, **kwargs): """ Allocates pinned memory for lightcurves if we're reusing this container """ n0 = kwargs.get('n0', self.n0) if self.buffered_transfer: n0 = kwargs.get('n0_buffer', self.n0_buffer) assert(n0 is not None) self.t = cuda.aligned_zeros(shape=(n0,), dtype=self.real_type, alignment=resource.getpagesize()) self.t = cuda.register_host_memory(self.t) self.yw = cuda.aligned_zeros(shape=(n0,), dtype=self.real_type, alignment=resource.getpagesize()) self.yw = cuda.register_host_memory(self.yw) self.w = cuda.aligned_zeros(shape=(n0,), dtype=self.real_type, alignment=resource.getpagesize()) self.w = cuda.register_host_memory(self.w) return self
def get_memory_usage(subtract_share = False): """ Returning resident size in megabytes """ pid = os.getpid() try: pagesize = resource.getpagesize() except NameError: return -1024 status_file = "/proc/%s/statm" % (pid) if not os.path.isfile(status_file): return -1024 line = open(status_file).read() size, resident, share, text, library, data, dt = [int(i) for i in line.split()] if subtract_share: resident -= share return resident * pagesize / (1024 * 1024) # return in megs
def make_post_exec_msg(start_time: datetime=None, comment: str=None) -> str: """Build Post-Execution Message with information about RAM and Time.""" use, al, msg = 0, 0, "" if sys.platform.startswith(("win", "darwin")): msg = "No information about RAM usage available on non-Linux systems." elif sys.platform.startswith("linux"): use = int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * resource.getpagesize() if resource else 0) al = int(os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') if hasattr(os, "sysconf") else 0) msg += f"""Total Max Memory Used: ~{use / al:.2%} Percent. { bytes2human(use) } ({ use } bytes) of { bytes2human(al) } ({ al } bytes) of total RAM Memory.\n""" if start_time: _t = datetime.now() - start_time msg += f"Total Working Time: ~{ timedelta2human(_t) } ({ _t }).\n" if comment: msg += str(comment).strip() log.debug("Preparing Simple Post-Execution Messages.") atexit.register(log.info, msg) return msg
def get_page_size(): return resource.getpagesize()
def allocate(self, data): if len(data) > len(self.streams): self._create_streams(len(data) - len(self.streams)) gpu_data, pow_cpus = [], [] for t, y, w, freqs in data: pow_cpu = cuda.aligned_zeros(shape=(len(freqs),), dtype=np.float32, alignment=resource.getpagesize()) pow_cpu = cuda.register_host_memory(pow_cpu) t_g, y_g, w_g = None, None, None if len(t) > 0: t_g, y_g, w_g = tuple([gpuarray.zeros(len(t), dtype=np.float32) for i in range(3)]) pow_g = gpuarray.zeros(len(pow_cpu), dtype=pow_cpu.dtype) freqs_g = gpuarray.to_gpu(np.asarray(freqs).astype(np.float32)) gpu_data.append((t_g, y_g, w_g, freqs_g, pow_g)) pow_cpus.append(pow_cpu) return gpu_data, pow_cpus
def allocate_pinned_cpu(self, **kwargs): """ Allocates pinned CPU memory for asynchronous transfer of result """ nf = kwargs.get('nf', self.nf) assert(nf is not None) self.lsp_c = cuda.aligned_zeros(shape=(nf,), dtype=self.real_type, alignment=resource.getpagesize()) self.lsp_c = cuda.register_host_memory(self.lsp_c) return self
def allocate_buffered_data_arrays(self, **kwargs): n0 = kwargs.get('n0', self.n0) if self.buffered_transfer: n0 = kwargs.get('n0_buffer', self.n0_buffer) assert(n0 is not None) kw = dict(dtype=self.real_type, alignment=resource.getpagesize()) self.t = cuda.aligned_zeros(shape=(n0,), **kw) self.t = cuda.register_host_memory(self.t) self.y = cuda.aligned_zeros(shape=(n0,), dtype=self.ytype, alignment=resource.getpagesize()) self.y = cuda.register_host_memory(self.y) if self.weighted: self.dy = cuda.aligned_zeros(shape=(n0,), **kw) self.dy = cuda.register_host_memory(self.dy) if self.balanced_magbins: self.mag_bwf = cuda.aligned_zeros(shape=(self.mag_bins,), **kw) self.mag_bwf = cuda.register_host_memory(self.mag_bwf) if self.compute_log_prob: self.mag_bin_fracs = cuda.aligned_zeros(shape=(self.mag_bins,), **kw) self.mag_bin_fracs = cuda.register_host_memory(self.mag_bin_fracs) return self
def allocate_pinned_cpu(self, **kwargs): nf = kwargs.get('nf', self.nf) assert(nf is not None) self.ce_c = cuda.aligned_zeros(shape=(nf,), dtype=self.real_type, alignment=resource.getpagesize()) self.ce_c = cuda.register_host_memory(self.ce_c) return self
def allocate_pinned_arrays(self, nfreqs=None, ndata=None): if nfreqs is None: nfreqs = int(self.max_nfreqs) if ndata is None: ndata = int(self.max_ndata) self.bls = cuda.aligned_zeros(shape=(nfreqs,), dtype=self.rtype, alignment=resource.getpagesize()) self.bls = cuda.register_host_memory(self.bls) self.nbins0 = cuda.aligned_zeros(shape=(nfreqs,), dtype=np.int32, alignment=resource.getpagesize()) self.nbins0 = cuda.register_host_memory(self.nbins0) self.nbinsf = cuda.aligned_zeros(shape=(nfreqs,), dtype=np.int32, alignment=resource.getpagesize()) self.nbinsf = cuda.register_host_memory(self.nbinsf) self.t = cuda.aligned_zeros(shape=(ndata,), dtype=self.rtype, alignment=resource.getpagesize()) self.t = cuda.register_host_memory(self.t) self.yw = cuda.aligned_zeros(shape=(ndata,), dtype=self.rtype, alignment=resource.getpagesize()) self.yw = cuda.register_host_memory(self.yw) self.w = cuda.aligned_zeros(shape=(ndata,), dtype=self.rtype, alignment=resource.getpagesize()) self.w = cuda.register_host_memory(self.w)
def test_PAGESIZE(self): # pagesize is used internally to perform different calculations # and it's determined by using SC_PAGE_SIZE; make sure # getpagesize() returns the same value. import resource self.assertEqual(os.sysconf("SC_PAGE_SIZE"), resource.getpagesize())
def getPageSize(): import resource f = open("/proc/meminfo") mem = f.readline() f.close() return resource.getpagesize() / (1024 * float(mem[10:-3].strip()))
def print_mem_usage(label=""): mem_mb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1000000 print label, ":mem_usage", mem_mb, "mb" #usage=resource.getrusage(resource.RUSAGE_SELF) #print '''%s: usertime=%s systime=%s mem=%s gb # '''%(point,usage[0],usage[1], # (float(usage[2]*resource.getpagesize()))/1024*1024*1024 )
def memusage( point = "") : usage = resource.getrusage( resource.RUSAGE_SELF) return '''%s: usertime = %s systime = %s mem = %s mb '''%( point, usage[ 0 ], usage[ 1 ], ( usage[ 2 ]*resource.getpagesize( ) ) /1000000.0 )
def memusage(): import resource import gc gc.collect() if len(gc.garbage): print 'Garbage list:' for obj in gc.garbage: print obj # print heapy.heap() #ru = resource.getrusage(resource.RUSAGE_BOTH) ru = resource.getrusage(resource.RUSAGE_SELF) pgsize = resource.getpagesize() print 'Memory usage:' print 'page size', pgsize mb = int(np.ceil(ru.ru_maxrss * pgsize / 1e6)) unit = 'MB' f = 1. if mb > 1024: f = 1024. unit = 'GB' print 'max rss: %.1f %s' % (mb/f, unit) #print 'shared memory size:', (ru.ru_ixrss / 1e6), 'MB' #print 'unshared memory size:', (ru.ru_idrss / 1e6), 'MB' #print 'unshared stack size:', (ru.ru_isrss / 1e6), 'MB' #print 'shared memory size:', ru.ru_ixrss #print 'unshared memory size:', ru.ru_idrss #print 'unshared stack size:', ru.ru_isrss procfn = '/proc/%d/status' % os.getpid() try: t = open(procfn).readlines() #print 'proc file:', t d = dict([(line.split()[0][:-1], line.split()[1:]) for line in t]) #print 'dict:', d for key in ['VmPeak', 'VmSize', 'VmRSS', 'VmData', 'VmStk' ]: # VmLck, VmHWM, VmExe, VmLib, VmPTE #print key, ' '.join(d.get(key, [])) #print 'd:', d va = d.get(key,[]) if len(va) < 2: continue v = float(va[-2]) unit = va[-1] if unit == 'kB' and v > 1024: unit = 'MB' v /= 1024. if unit == 'MB' and v > 1024: unit = 'GB' v /= 1024. print key, '%.1f %s' % (v, unit) except: pass