我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.nanmin()。
def normalize_array (solution, prediction): ''' Use min and max of solution as scaling factors to normalize prediction, then threshold it to [0, 1]. Binarize solution to {0, 1}. This allows applying classification scores to all cases. In principle, this should not do anything to properly formatted classification inputs and outputs.''' # Binarize solution sol=np.ravel(solution) # convert to 1-d array maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf if maxi == mini: print('Warning, cannot normalize') return [solution, prediction] diff = maxi - mini mid = (maxi + mini)/2. new_solution = np.copy(solution) new_solution[solution>=mid] = 1 new_solution[solution<mid] = 0 # Normalize and threshold predictions (takes effect only if solution not in {0, 1}) new_prediction = (np.copy(prediction) - float(mini))/float(diff) new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1] new_prediction[new_prediction<0] = 0 # Make probabilities smoother #new_prediction = np.power(new_prediction, (1./10)) return [new_solution, new_prediction]
def sanitize_array(array): """ Replace NaN and Inf (there should not be any!) :param array: :return: """ a = np.ravel(array) #maxi = np.nanmax((filter(lambda x: x != float('inf'), a)) # ) # Max except NaN and Inf #mini = np.nanmin((filter(lambda x: x != float('-inf'), a)) # ) # Mini except NaN and Inf maxi = np.nanmax(a[np.isfinite(a)]) mini = np.nanmin(a[np.isfinite(a)]) array[array == float('inf')] = maxi array[array == float('-inf')] = mini mid = (maxi + mini) / 2 array[np.isnan(array)] = mid return array
def min_max(self, mask=None): """Get the minimum and maximum value in this data. If a mask is provided we get the min and max value within the given mask. Infinities and NaN's are ignored by this algorithm. Args: mask (ndarray): the mask, we only include elements for which the mask > 0 Returns: tuple: (min, max) the minimum and maximum values """ if mask is not None: roi = mdt.create_roi(self.data, mask) return np.nanmin(roi), np.nanmax(roi) return np.nanmin(self.data), np.nanmax(self.data)
def test_extrema(): for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", "velocity_x", "velocity_y", "velocity_z")) for sp in [ds.sphere("c", (0.25, 'unitary')), ds.r[0.5,:,:]]: mi, ma = sp.quantities["Extrema"]("density") assert_equal(mi, np.nanmin(sp["density"])) assert_equal(ma, np.nanmax(sp["density"])) dd = ds.all_data() mi, ma = dd.quantities["Extrema"]("density") assert_equal(mi, np.nanmin(dd["density"])) assert_equal(ma, np.nanmax(dd["density"])) sp = ds.sphere("max", (0.25, 'unitary')) assert_equal(np.any(np.isnan(sp["radial_velocity"])), False) mi, ma = dd.quantities["Extrema"]("radial_velocity") assert_equal(mi, np.nanmin(dd["radial_velocity"])) assert_equal(ma, np.nanmax(dd["radial_velocity"]))
def local_entropy(ocl_ctx, img, window_radius, num_bins=8): """ compute local entropy using a sliding window """ mf = cl.mem_flags cl_queue = cl.CommandQueue(ocl_ctx) img_np = np.array(img).astype(np.float32) img_buf = cl.Buffer(ocl_ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=img_np) min_val = np.nanmin(img) max_val = np.nanmax(img) entropy = np.zeros_like(img,dtype=np.float32) dest_buf = cl.Buffer(ocl_ctx, mf.WRITE_ONLY, entropy.nbytes) cl_dir = os.path.dirname(__file__) cl_filename = cl_dir + '/cl/local_entropy.cl' with open(cl_filename, 'r') as fd: clstr = fd.read() prg = cl.Program(ocl_ctx, clstr).build() prg.local_entropy(cl_queue, entropy.shape, None, img_buf, dest_buf, np.int32(img.shape[1]), np.int32(img.shape[0]), np.int32(window_radius), np.int32(num_bins), np.float32(min_val), np.float32(max_val)) cl.enqueue_copy(cl_queue, entropy, dest_buf) cl_queue.finish() return entropy
def minmax(X): """ Returns the MinMax Semivariance of sample X. X has to be an even-length array of point pairs like: x1, x1+h, x2, x2+h, ..., xn, xn+h. :param X: :return: """ _X = np.asarray(X) if any([isinstance(_, list) or isinstance(_, np.ndarray) for _ in _X]): return [minmax(_) for _ in _X] # check even if len(_X) % 2 > 0: raise ValueError('The sample does not have an even length: {}'.format(_X)) return (np.nanmax(_X) - np.nanmin(_X)) / np.nanmean(_X)
def test_FmtHeatmap__get_min_max_from_selected_cell_values_with_cache(): df_pn = df - 5. cache = {} fmt = pbtf.FmtHeatmap(cache=cache) res = fmt._get_min_max_from_selected_cell_values(None, None, df_pn) assert len(cache) == 1 and (None, None) in cache.keys() assert res == (np.nanmin(df_pn), np.nanmax(df_pn)) min_value, max_value = np.nanmin(df.loc[['a'], ['aa', 'bb']]), np.nanmax(df.loc[['a'], ['aa', 'bb']]) res = fmt._get_min_max_from_selected_cell_values(['a'], ['aa', 'bb'], df) assert len(cache) == 2 and (frozenset(['a']), frozenset(['aa', 'bb'])) in cache.keys() assert res == (min_value, max_value) res = fmt._get_min_max_from_selected_cell_values(['a'], ['aa', 'bb'], df) assert len(cache) == 2 and (frozenset(['a']), frozenset(['aa', 'bb'])) in cache.keys() assert res == (min_value, max_value)
def test_FmtHeatmap__get_min_max_from_selected_cell_values_without_cache(): df_pn = df - 5. cache = None fmt = pbtf.FmtHeatmap(cache=cache) res = fmt._get_min_max_from_selected_cell_values(None, None, df_pn) assert cache is None assert res == (np.nanmin(df_pn), np.nanmax(df_pn)) min_value, max_value = np.nanmin(df.loc[['a'], ['aa', 'bb']]), np.nanmax(df.loc[['a'], ['aa', 'bb']]) res = fmt._get_min_max_from_selected_cell_values(['a'], ['aa', 'bb'], df) assert cache is None assert res == (min_value, max_value) res = fmt._get_min_max_from_selected_cell_values(['a'], ['aa', 'bb'], df) assert cache is None assert res == (min_value, max_value)
def depth_callback(self,data): try: self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough") except CvBridgeError as e: print(e) # print "depth" depth_min = np.nanmin(self.depth_image) depth_max = np.nanmax(self.depth_image) depth_img = self.depth_image.copy() depth_img[np.isnan(self.depth_image)] = depth_min depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8) cv2.imshow("Depth Image", depth_img) cv2.waitKey(5) # stream = open("/home/chentao/depth_test.yaml", "w") # data = {'img':depth_img.tolist()} # yaml.dump(data, stream)
def depth_callback(self,data): try: self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough") except CvBridgeError as e: print(e) # print "depth" depth_min = np.nanmin(self.depth_image) depth_max = np.nanmax(self.depth_image) depth_img = self.depth_image.copy() depth_img[np.isnan(self.depth_image)] = depth_min depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8) cv2.imshow("Depth Image", depth_img) cv2.waitKey(5)
def basemap_raster_mercator(lon, lat, grid, cmin, cmax, cmap_name): # longitude/latitude extent lons = (np.amin(lon), np.amax(lon)) lats = (np.amin(lat), np.amax(lat)) # construct spherical mercator projection for region of interest m = Basemap(projection='merc',llcrnrlat=lats[0], urcrnrlat=lats[1], llcrnrlon=lons[0],urcrnrlon=lons[1]) #vmin,vmax = np.nanmin(grid),np.nanmax(grid) masked_grid = np.ma.array(grid,mask=np.isnan(grid)) fig = plt.figure(frameon=False,figsize=(12,8),dpi=72) plt.axis('off') cmap = mpl.cm.get_cmap(cmap_name) m.pcolormesh(lon,lat,masked_grid,latlon=True,cmap=cmap,vmin=cmin,vmax=cmax) str_io = StringIO.StringIO() plt.savefig(str_io,bbox_inches='tight',format='png',pad_inches=0,transparent=True) plt.close() numpy_bounds = [ (lons[0],lats[0]),(lons[1],lats[0]),(lons[1],lats[1]),(lons[0],lats[1]) ] float_bounds = [ (float(x), float(y)) for x,y in numpy_bounds ] return str_io.getvalue(), float_bounds
def basemap_barbs_mercator(u,v,lat,lon): # lon/lat extents lons = (np.amin(lon), np.amax(lon)) lats = (np.amin(lat), np.amax(lat)) # construct spherical mercator projection for region of interest m = Basemap(projection='merc',llcrnrlat=lats[0], urcrnrlat=lats[1], llcrnrlon=lons[0],urcrnrlon=lons[1]) #vmin,vmax = np.nanmin(grid),np.nanmax(grid) fig = plt.figure(frameon=False,figsize=(12,8),dpi=72*4) plt.axis('off') m.quiver(lon,lat,u,v,latlon=True) str_io = StringIO.StringIO() plt.savefig(str_io,bbox_inches='tight',format='png',pad_inches=0,transparent=True) plt.close() numpy_bounds = [ (lons[0],lats[0]),(lons[1],lats[0]),(lons[1],lats[1]),(lons[0],lats[1]) ] float_bounds = [ (float(x), float(y)) for x,y in numpy_bounds ] return str_io.getvalue(), float_bounds
def setSymColormap(self): cmap = {'ticks': [[0, (106, 0, 31, 255)], [.5, (255, 255, 255, 255)], [1., (8, 54, 104, 255)]], 'mode': 'rgb'} cmap = {'ticks': [[0, (172, 56, 56)], [.5, (255, 255, 255)], [1., (51, 53, 120)]], 'mode': 'rgb'} lvl_min = lvl_max = 0 for plot in self.plots: plt_min = num.nanmin(plot.data) plt_max = num.nanmax(plot.data) lvl_max = lvl_max if plt_max < lvl_max else plt_max lvl_min = lvl_min if plt_min > lvl_min else plt_min abs_range = max(abs(lvl_min), abs(lvl_max)) self.gradient.restoreState(cmap) self.setLevels(-abs_range, abs_range)
def setSymColormap(self): cmap = {'ticks': [[0., (0, 0, 0, 255)], [1e-3, (106, 0, 31, 255)], [.5, (255, 255, 255, 255)], [1., (8, 54, 104, 255)]], 'mode': 'rgb'} cmap = {'ticks': [[0., (0, 0, 0)], [1e-3, (172, 56, 56)], [.5, (255, 255, 255)], [1., (51, 53, 120)]], 'mode': 'rgb'} lvl_min = num.nanmin(self._plot.data) lvl_max = num.nanmax(self._plot.data) abs_range = max(abs(lvl_min), abs(lvl_max)) self.gradient.restoreState(cmap) self.setLevels(-abs_range, abs_range)
def setArray(self, incomingArray, copy=False): """ You can use the self.array directly but if you want to copy from one array into a raster we suggest you do it this way :param incomingArray: :return: """ masked = isinstance(self.array, np.ma.MaskedArray) if copy: if masked: self.array = np.ma.copy(incomingArray) else: self.array = np.ma.masked_invalid(incomingArray, copy=True) else: if masked: self.array = incomingArray else: self.array = np.ma.masked_invalid(incomingArray) self.rows = self.array.shape[0] self.cols = self.array.shape[1] self.min = np.nanmin(self.array) self.max = np.nanmax(self.array)
def _choose_cov(self, cov_type, **cov_config): """Return covariance estimator reformat clusters""" cov_est = self._cov_estimators[cov_type] if cov_type != 'clustered': return cov_est, cov_config cov_config_upd = {k: v for k, v in cov_config.items()} clusters = cov_config.get('clusters', None) if clusters is not None: clusters = self.reformat_clusters(clusters).copy() cluster_max = np.nanmax(clusters.values3d, axis=1) delta = cluster_max - np.nanmin(clusters.values3d, axis=1) if np.any(delta != 0): raise ValueError('clusters must not vary within an entity') index = clusters.panel.minor_axis reindex = clusters.entities clusters = pd.DataFrame(cluster_max.T, index=index, columns=clusters.vars) clusters = clusters.loc[reindex].astype(np.int64) cov_config_upd['clusters'] = clusters return cov_est, cov_config_upd
def get_bbox(self): """ Returns boundary box for the coordinates. Useful for setting up the map extent for plotting on a map. :return tuple: corner coordinates (llcrnrlat, urcrnrlat, llcrnrlon, urcrnrlon) """ x, y, z = zip(self) llcrnrlat = np.nanmin(y) urcrnrlat = np.nanmax(y) llcrnrlon = np.nanmin(x) urcrnrlon = np.nanmax(x) return (llcrnrlat, urcrnrlat, llcrnrlon, urcrnrlon)
def visRenderedViews(self,outDir,nViews=0): pt = Imath.PixelType(Imath.PixelType.FLOAT) renders = sorted(glob.glob(outDir + '/render_*.png')) if (nViews > 0) and (nViews < len(renders)): renders = [renders[ix] for ix in range(nViews)] for render in renders: print render rgbIm = scipy.misc.imread(render) dMap = loadDepth(render.replace('render_','depth_')) plt.figure(figsize=(12,6)) plt.subplot(121) plt.imshow(rgbIm) dMap[dMap>=10] = np.nan plt.subplot(122) plt.imshow(dMap) print(np.nanmax(dMap),np.nanmin(dMap)) plt.show()
def find_bbox(t): # given a table t find the bounding box of the ellipses for the regions boxes=[] for r in t: a=r['Maj']/scale b=r['Min']/scale th=(r['PA']+90)*np.pi/180.0 dx=np.sqrt((a*np.cos(th))**2.0+(b*np.sin(th))**2.0) dy=np.sqrt((a*np.sin(th))**2.0+(b*np.cos(th))**2.0) boxes.append([r['RA']-dx/np.cos(r['DEC']*np.pi/180.0), r['RA']+dx/np.cos(r['DEC']*np.pi/180.0), r['DEC']-dy, r['DEC']+dy]) boxes=np.array(boxes) minra=np.nanmin(boxes[:,0]) maxra=np.nanmax(boxes[:,1]) mindec=np.nanmin(boxes[:,2]) maxdec=np.nanmax(boxes[:,3]) ra=np.mean((minra,maxra)) dec=np.mean((mindec,maxdec)) size=1.2*3600.0*np.max((maxdec-mindec,(maxra-minra)*np.cos(dec*np.pi/180.0))) return ra,dec,size
def VshGR(GRlog,itmin,itmax): # Usando o perfil GR GRmin = np.nanmin(GRlog) GRminInt = GRlog[(GRlog<=(GRmin*(1+itmin/100)))] # Valores do GRmin GRminm = np.mean(GRminInt) # Media dos valores de GRmin GRmax = np.nanmax(GRlog) GRmaxInt = GRlog[(GRlog>=(GRmax*(1-itmax/100)))] # Valores de GRmax GRmaxm = np.mean(GRmaxInt) # Media dos valores de GRmax Vsh = 100*(GRlog-GRminm)/(GRmaxm-GRminm) # Volume de argila for i in range(len(Vsh)): if (Vsh[i] > 100): Vsh[i] = 100 elif (Vsh[i] < 0): Vsh[i] = 0 print GRmin, GRminm, GRmax, GRmaxm, np.nanmin(Vsh), np.nanmax(Vsh) return Vsh
def distance_curves(x, ys, q1): """ Distances to the curves. :param x: x values of curves (they have to be sorted). :param ys: y values of multiple curves sharing x values. :param q1: a point to measure distance to. :return: """ # convert curves into a series of startpoints and endpoints xp = rolling_window(x, 2) ysp = rolling_window(ys, 2) r = np.nanmin(distance_line_segment(xp[:, 0], ysp[:, :, 0], xp[:, 1], ysp[:, :, 1], q1[0], q1[1]), axis=1) return r
def set_marker_size(self, attr, update=True): try: self._size_attr = variable = self.data.domain[attr] if len(self.data) == 0: raise Exception except Exception: self._size_attr = None self._legend_sizes = [] else: assert variable.is_continuous self._raw_sizes = values = self.data.get_column_view(variable)[0].astype(float) # Note, [5, 60] is also hardcoded in legend-size-indicator.svg self._sizes = scale(values, 5, 60).astype(np.uint8) min = np.nanmin(values) self._legend_sizes = self._legend_values(variable, [min, np.nanmax(values)]) if not np.isnan(min) else [] finally: if update: self.redraw_markers_overlay_image(new_image=True)
def sanitize_array(array): ''' Replace NaN and Inf (there should not be any!)''' a=np.ravel(array) maxi = np.nanmax((filter(lambda x: x != float('inf'), a))) # Max except NaN and Inf mini = np.nanmin((filter(lambda x: x != float('-inf'), a))) # Mini except NaN and Inf array[array==float('inf')]=maxi array[array==float('-inf')]=mini mid = (maxi + mini)/2 array[np.isnan(array)]=mid return array
def frame_to_series(self, field, frame, columns=None): """ Convert a frame with a DatetimeIndex and sid columns into a series with a sid index, using the aggregator defined by the given field. """ if isinstance(frame, pd.DataFrame): columns = frame.columns frame = frame.values if not len(frame): return pd.Series( data=(0 if field == 'volume' else np.nan), index=columns, ).values if field in ['price', 'close']: # shortcircuit for full last row vals = frame[-1] if np.all(~np.isnan(vals)): return vals return ffill(frame)[-1] elif field == 'open': return bfill(frame)[0] elif field == 'volume': return np.nansum(frame, axis=0) elif field == 'high': return np.nanmax(frame, axis=0) elif field == 'low': return np.nanmin(frame, axis=0) else: raise ValueError("Unknown field {}".format(field))
def extract_img_background(img_array, custom_limits=None, median_diffbelow=200.0, image_min=None): ''' This extracts the background of the image array provided: - masks the array to only values between the median and the min of flux - then returns the median value in 3 x 3 stamps. img_array = image to find the background for custom_limits = use this to provide custom median and min limits for the background extraction median_diffbelow = subtract this value from the median to get the upper bound for background extraction image_min = use this value as the lower bound for background extraction ''' if not custom_limits: backmax = np.median(img_array)-median_diffbelow backmin = image_min if image_min is not None else np.nanmin(img_array) else: backmin, backmax = custom_limits masked = npma.masked_outside(img_array, backmin, backmax) backmasked = npma.median(masked) return backmasked ## IMAGE SECTION FUNCTIONS ##
def quickMinMax(self, data): """ Estimate the min/max values of *data* by subsampling. """ while data.size > 1e6: ax = np.argmax(data.shape) sl = [slice(None)] * data.ndim sl[ax] = slice(None, None, 2) data = data[sl] return nanmin(data), nanmax(data)
def dataBounds(self, ax, frac=1.0, orthoRange=None): if frac >= 1.0 and orthoRange is None and self.bounds[ax] is not None: return self.bounds[ax] #self.prepareGeometryChange() if self.data is None or len(self.data) == 0: return (None, None) if ax == 0: d = self.data['x'] d2 = self.data['y'] elif ax == 1: d = self.data['y'] d2 = self.data['x'] if orthoRange is not None: mask = (d2 >= orthoRange[0]) * (d2 <= orthoRange[1]) d = d[mask] d2 = d2[mask] if frac >= 1.0: self.bounds[ax] = (np.nanmin(d) - self._maxSpotWidth*0.7072, np.nanmax(d) + self._maxSpotWidth*0.7072) return self.bounds[ax] elif frac <= 0.0: raise Exception("Value for parameter 'frac' must be > 0. (got %s)" % str(frac)) else: mask = np.isfinite(d) d = d[mask] return np.percentile(d, [50 * (1 - frac), 50 * (1 + frac)])
def normalize_data(self, values): normalized_values = copy.deepcopy(values) data = np.array(values, dtype=float)[:, 0:5] data_min = np.nanmin(data, 0) data_max = np.nanmax(data, 0) print data_min print data_max for i in range(len(values)): for j in range(5): normalized_values[i][j] = np.abs(values[i][j] - data_min[j]) / np.abs(data_max[j] - data_min[j]) return normalized_values, data_min, data_max
def writeBinData(out_file, i, GenotypeData, ScoreList, NumInfoSites): num_lines = len(GenotypeData.accessions) (likeliScore, likeliHoodRatio) = snpmatch.calculate_likelihoods(ScoreList, NumInfoSites) if len(likeliScore) > 0: NumAmb = np.where(likeliHoodRatio < snpmatch.lr_thres)[0] if len(NumAmb) >= 1 and len(NumAmb) < num_lines: try: nextLikeli = np.nanmin(likeliHoodRatio[np.where(likeliHoodRatio > snpmatch.lr_thres)[0]]) except: nextLikeli = 1 for k in NumAmb: score = float(ScoreList[k])/NumInfoSites[k] out_file.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (GenotypeData.accessions[k], int(ScoreList[k]), NumInfoSites[k], score, likeliScore[k], nextLikeli, len(NumAmb), i+1))
def image_as_uint8(im): """ Convert the given image to uint8 If the dtype is already uint8, it is returned as-is. If the image is float, and all values are between 0 and 1, the values are multiplied by 255. In all other situations, the values are scaled such that the minimum value becomes 0 and the maximum value becomes 255. """ if not isinstance(im, np.ndarray): raise ValueError('image must be a numpy array') dtype_str = str(im.dtype) # Already uint8? if dtype_str == 'uint8': return im # Handle float mi, ma = np.nanmin(im), np.nanmax(im) if dtype_str.startswith('float'): if mi >= 0 and ma <= 1: mi, ma = 0, 1 # Now make float copy before we scale im = im.astype('float32') # Scale the values between 0 and 255 if np.isfinite(mi) and np.isfinite(ma): if mi: im -= mi if ma != 255: im *= 255.0 / (ma - mi) assert np.nanmax(im) < 256 return im.astype(np.uint8) # currently not used ... the only use it to easly provide the global meta info
def test_masked(self): mat = np.ma.fix_invalid(_ndat) msk = mat._mask.copy() for f in [np.nanmin]: res = f(mat, axis=1) tgt = f(_ndat, axis=1) assert_equal(res, tgt) assert_equal(mat._mask, msk) assert_(not np.isinf(mat).any())
def test_nanmin(self): tgt = np.min(self.mat) for mat in self.integer_arrays(): assert_equal(np.nanmin(mat), tgt)
def data(self, data): """ :type: numppy.ndarray """ self._assert_shape(data, self._x_indexes, self._y_indexes) data[data == -np.inf] = 0.0 data[data == np.inf] = 0.0 self._data = data self._min_value = np.nanmin(self.data) self._max_value = np.nanmax(self.data) self._data_x_indexes = list(range(data.shape[0])) self._data_y_indexes = list(range(data.shape[1])) self._dirty = False
def _evaluate(self,x): ''' Returns the level of the function at each value in x as the minimum among all of the functions. Only called internally by HARKinterpolator1D.__call__. ''' if _isscalar(x): y = np.nanmin([f(x) for f in self.functions]) else: m = len(x) fx = np.zeros((m,self.funcCount)) for j in range(self.funcCount): fx[:,j] = self.functions[j](x) y = np.nanmin(fx,axis=1) return y
def _evaluate(self,x,y): ''' Returns the level of the function at each value in (x,y) as the minimum among all of the functions. Only called internally by HARKinterpolator2D.__call__. ''' if _isscalar(x): f = np.nanmin([f(x,y) for f in self.functions]) else: m = len(x) temp = np.zeros((m,self.funcCount)) for j in range(self.funcCount): temp[:,j] = self.functions[j](x,y) f = np.nanmin(temp,axis=1) return f
def _evaluate(self,x,y,z): ''' Returns the level of the function at each value in (x,y,z) as the minimum among all of the functions. Only called internally by HARKinterpolator3D.__call__. ''' if _isscalar(x): f = np.nanmin([f(x,y,z) for f in self.functions]) else: m = len(x) temp = np.zeros((m,self.funcCount)) for j in range(self.funcCount): temp[:,j] = self.functions[j](x,y,z) f = np.nanmin(temp,axis=1) return f
def replot(self, val): ''' ''' # Update plot self.cadence = int(val) self.implot.set_data(self.images[int(val)]) self.implot.set_clim(vmin = np.nanmin(self.images[int(val)]), vmax = np.nanmax(self.images[int(val)])) self.tracker1.set_xdata([self.time[self.cadence], self.time[self.cadence]]) self.tracker2.set_xdata([self.time[self.cadence], self.time[self.cadence]]) self.update_bkg() self.update_lc() self.update_lcbkg() self.fig.canvas.draw()
def vmin(self): return self._vmin if self._vmin else np.nanmin(self.hic_matrix)
def _plot(self, region=None, cax=None): da_sub, regions_sub = sub_data_regions(self.da, self.regions, region) da_sub_masked = np.ma.MaskedArray(da_sub, mask=np.isnan(da_sub)) bin_coords = np.r_[[(x.start - 1) for x in regions_sub], regions_sub[-1].end] x, y = np.meshgrid(bin_coords, self.window_sizes) self.mesh = self.ax.pcolormesh(x, y, da_sub_masked, cmap=self.colormap, vmax=self.vmax) self.colorbar = plt.colorbar(self.mesh, cax=cax, orientation="vertical") self.window_size_line = self.ax.axhline(self.current_window_size, color='red') if self.log_y: self.ax.set_yscale("log") self.ax.set_ylim((np.nanmin(self.window_sizes), np.nanmax(self.window_sizes)))
def _plot(self, region=None, cax=None): self._new_region(region) bin_coords = [(x.start - 1) for x in self.sr] ds = self.da_sub[self.init_row] self.line, = self.ax.plot(bin_coords, ds) if not self.is_symmetric: self.current_cutoff = (self.ax.get_ylim()[1] - self.ax.get_ylim()[0]) / 2 + self.ax.get_ylim()[0] else: self.current_cutoff = self.ax.get_ylim()[1]/ 2 self.ax.axhline(0.0, linestyle='dashed', color='grey') self.cutoff_line = self.ax.axhline(self.current_cutoff, color='r') if self.is_symmetric: self.cutoff_line_mirror = self.ax.axhline(-1*self.current_cutoff, color='r') self.ax.set_ylim((np.nanmin(ds), np.nanmax(ds)))
def update(self, ix=None, cutoff=None, region=None, update_canvas=True): if region is not None: self._new_region(region) if ix is not None and ix != self.current_ix: ds = self.da_sub[ix] self.current_ix = ix self.line.set_ydata(ds) self.ax.set_ylim((np.nanmin(ds), np.nanmax(ds))) if cutoff is None: if not self.is_symmetric: self.update(cutoff=(self.ax.get_ylim()[1]-self.ax.get_ylim()[0])/2 + self.ax.get_ylim()[0], update_canvas=False) else: self.update(cutoff=self.ax.get_ylim()[1] / 2, update_canvas=False) if update_canvas: self.fig.canvas.draw() if cutoff is not None and cutoff != self.current_cutoff: if self.is_symmetric: self.current_cutoff = abs(cutoff) else: self.current_cutoff = cutoff self.cutoff_line.set_ydata(self.current_cutoff) if self.is_symmetric: self.cutoff_line_mirror.set_ydata(-1*self.current_cutoff) if update_canvas: self.fig.canvas.draw()
def define_levels(self, nb_class, disc_func): pot = self.pot _min = np.nanmin(pot) if not nb_class: nb_class = int(get_opt_nb_class(len(pot)) - 2) if not disc_func or "prog_geom" in disc_func: levels = [_min] + [ np.nanmax(pot) / i for i in range(1, nb_class + 1)][::-1] elif "equal_interval" in disc_func: _bin = np.nanmax(pot) / nb_class levels = [_min] + [_bin * i for i in range(1, nb_class+1)] elif "percentiles" in disc_func: levels = np.percentile( np.concatenate((pot[pot.nonzero()], np.array([_min]))), np.linspace(0.0, 100.0, nb_class+1)) elif "jenks" in disc_func: levels = list(jenks_breaks(np.concatenate( ([_min], pot[pot.nonzero()])), nb_class)) levels[0] = levels[0] - _min * 0.01 elif "head_tail" in disc_func: levels = head_tail_breaks(np.concatenate( ([_min], pot[pot.nonzero()]))) elif "maximal_breaks" in disc_func: levels = maximal_breaks(np.concatenate( ([_min], pot[pot.nonzero()])), nb_class) else: raise ValueError return levels
def set_range(self, x_data, y_data): min_x, max_x = np.nanmin(x_data), np.nanmax(x_data) min_y, max_y = np.nanmin(y_data), np.nanmax(y_data) self.plotview.setRange( QRectF(min_x, min_y, max_x - min_x, max_y - min_y), padding=0.025) self.plotview.replot()