我们从Python开源项目中,提取了以下37个代码示例,用于说明如何使用numpy.fix()。
def filter_window_cartesian(img, wsize, fun, scale, **kwargs): r"""Apply a filter of square window size `fsize` on a given cartesian image `img`. Parameters ---------- img : :class:`numpy:numpy.ndarray` 2d array of values to which the filter is to be applied wsize : float Half size of the window centred on the pixel [m] fun : string name of the 2d filter from :mod:`scipy:scipy.ndimage` scale : tuple of 2 floats x and y scale of the cartesian grid [m] Returns ------- output : :class:`numpy:numpy.ndarray` Array with the same shape as `img`, containing the filter's results. """ fun = getattr(filters, "%s_filter" % fun) size = np.fix(wsize / scale + 0.5).astype(int) data_filtered = fun(img, size, **kwargs) return data_filtered
def generateBoundingBox(imap, reg, scale, t): # use heatmap to generate bounding boxes stride = 2 cellsize = 12 imap = np.transpose(imap) dx1 = np.transpose(reg[:, :, 0]) dy1 = np.transpose(reg[:, :, 1]) dx2 = np.transpose(reg[:, :, 2]) dy2 = np.transpose(reg[:, :, 3]) y, x = np.where(imap >= t) if y.shape[0] == 1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y, x)] reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) if reg.size == 0: reg = np.empty((0, 3)) bb = np.transpose(np.vstack([y, x])) q1 = np.fix((stride * bb + 1) / scale) q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) return boundingbox, reg
def pitch_strength_all_candidates(f_erbs, L, pc): """ Calculates the pitch ``strength'' of all candidate pitches Args: f_erbs (array): frequencies in ERBs L (matrix): loudness matrix pc (array): pitch candidates array Returns: S (array): strength of pitches corresponding to pc's """ # create pitch strength matrix S = np.zeros((pc.size, L.shape[1])) # define integration regions k = np.zeros(pc.size+1) for j in range(k.size-1): idx = int(k[j]) f = f_erbs[idx:] val = find(f > pc[j] / 4)[0] k[j+1] = k[j] + val k = k[1:] # TODO: fix this sloppiness # create loudness normalization matrix N = np.sqrt(np.flipud(np.cumsum(np.flipud(L * L), 0))) for j in range(pc.size): # normalize loudness n = N[int(k[j]), :] n[n == 0] = -np.inf # to make zero-loudness equal zero after normalization nL = L[int(k[j]):] / np.tile(n, (int(L.shape[0] - k[j]), 1)) # compute pitch strength S[j] = pitch_strength_one_candidate(f_erbs[int(k[j]):], nL, pc[j]) return S
def detect_face_12net(cls_prob,roi,out_side,scale,width,height,threshold): in_side = 2*out_side+11 stride = 0 if out_side != 1: stride = float(in_side-12)/(out_side-1) (x,y) = np.where(cls_prob>=threshold) boundingbox = np.array([x,y]).T bb1 = np.fix((stride * (boundingbox) + 0 ) * scale) bb2 = np.fix((stride * (boundingbox) + 11) * scale) boundingbox = np.concatenate((bb1,bb2),axis = 1) dx1 = roi[0][x,y] dx2 = roi[1][x,y] dx3 = roi[2][x,y] dx4 = roi[3][x,y] score = np.array([cls_prob[x,y]]).T offset = np.array([dx1,dx2,dx3,dx4]).T boundingbox = boundingbox + offset*12.0*scale rectangles = np.concatenate((boundingbox,score),axis=1) rectangles = rect2square(rectangles) pick = [] for i in range(len(rectangles)): x1 = int(max(0 ,rectangles[i][0])) y1 = int(max(0 ,rectangles[i][1])) x2 = int(min(width ,rectangles[i][2])) y2 = int(min(height,rectangles[i][3])) sc = rectangles[i][4] if x2>x1 and y2>y1: pick.append([x1,y1,x2,y2,sc]) return NMS(pick,0.3,'iou')
def generateBoundingBox(map, reg, scale, t): stride = 2 cellsize = 12 map = map.T dx1 = reg[0,:,:].T dy1 = reg[1,:,:].T dx2 = reg[2,:,:].T dy2 = reg[3,:,:].T (x, y) = np.where(map >= t) yy = y xx = x score = map[x,y] reg = np.array([dx1[x,y], dy1[x,y], dx2[x,y], dy2[x,y]]) if reg.shape[0] == 0: pass boundingbox = np.array([yy, xx]).T bb1 = np.fix((stride * (boundingbox) + 1) / scale).T # matlab index from 1, so with "boundingbox-1" bb2 = np.fix((stride * (boundingbox) + cellsize - 1 + 1) / scale).T # while python don't have to score = np.array([score]) boundingbox_out = np.concatenate((bb1, bb2, score, reg), axis=0) return boundingbox_out.T
def generateBoundingBox(imap, reg, scale, t): # use heatmap to generate bounding boxes stride=2 cellsize=12 imap = np.transpose(imap) dx1 = np.transpose(reg[:,:,0]) dy1 = np.transpose(reg[:,:,1]) dx2 = np.transpose(reg[:,:,2]) dy2 = np.transpose(reg[:,:,3]) y, x = np.where(imap >= t) if y.shape[0] == 1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y,x)] reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ])) if reg.size == 0: reg = np.empty((0,3)) bb = np.transpose(np.vstack([y,x])) q1 = np.fix((stride*bb+1)/scale) q2 = np.fix((stride*bb+cellsize-1+1)/scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg]) return boundingbox, reg # function pick = nms(boxes,threshold,type)
def generateBoundingBox(imap, reg, scale, t): # use heatmap to generate bounding boxes stride=2 cellsize=12 imap = np.transpose(imap) dx1 = np.transpose(reg[:,:,0]) dy1 = np.transpose(reg[:,:,1]) dx2 = np.transpose(reg[:,:,2]) dy2 = np.transpose(reg[:,:,3]) y, x = np.where(imap >= t) if y.shape[0]==1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y,x)] reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ])) if reg.size==0: reg = np.empty((0,3)) bb = np.transpose(np.vstack([y,x])) q1 = np.fix((stride*bb+1)/scale) q2 = np.fix((stride*bb+cellsize-1+1)/scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg]) return boundingbox, reg # function pick = nms(boxes,threshold,type)
def formalize_sample(samp): samp = np.array(samp) if np.any(samp != np.fix(samp)): raise ValueError('Input sample must only contain integers.') if samp.ndim == 1 or samp.ndim == 2 and samp.shape[0] == 1: samp = samp.reshape((samp.size, 1)) return samp
def float_to_rational(self, a): assert np.all(a > 0.0) d = 2**16 / np.fix(a+1).astype(int) # Uglier than it used to be: np.int(a + 1) n = np.fix(a * d + 1).astype(int) return n, d
def to_julian_date(self): """ Convert DatetimeIndex to Float64Index of Julian Dates. 0 Julian date is noon January 1, 4713 BC. http://en.wikipedia.org/wiki/Julian_day """ # http://mysite.verizon.net/aesir_research/date/jdalg2.htm year = self.year month = self.month day = self.day testarr = month < 3 year[testarr] -= 1 month[testarr] += 12 return Float64Index(day + np.fix((153 * month - 457) / 5) + 365 * year + np.floor(year / 4) - np.floor(year / 100) + np.floor(year / 400) + 1721118.5 + (self.hour + self.minute / 60.0 + self.second / 3600.0 + self.microsecond / 3600.0 / 1e+6 + self.nanosecond / 3600.0 / 1e+9 ) / 24.0)
def test_getitem_setitem_ellipsis(self): s = Series(np.random.randn(10)) np.fix(s) result = s[...] assert_series_equal(result, s) s[...] = 5 self.assertTrue((result == 5).all())
def test_reindex_corner(self): # (don't forget to fix this) I think it's fixed self.empty.reindex(self.ts.index, method='pad') # it works # corner case: pad empty series reindexed = self.empty.reindex(self.ts.index, method='pad') # pass non-Index reindexed = self.ts.reindex(list(self.ts.index)) assert_series_equal(self.ts, reindexed) # bad fill method ts = self.ts[::2] self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')
def _segpoints(self, stimulus): """Find segmentation points.""" stim_diff = np.diff(stimulus) changepoints = np.nonzero(stim_diff)[0] changepoints = np.hstack([0, changepoints, stimulus.size]) changepoints_diff = np.diff(changepoints) segpoints = changepoints[:-1] + np.fix(changepoints_diff/2) segpoints = segpoints[0::2] # Sub-sample every 2 points return segpoints
def generate_bboxes(scores_map, reg, scale, t): stride = 2 cellsize = 12 (y, x) = np.where(scores_map >= t) if len(y) < 1: return None scores = scores_map[y, x] dx1, dy1, dx2, dy2 = [reg[i, y, x] for i in range(4)] reg = np.array([dx1, dy1, dx2, dy2]) bbox = np.array([y, x]) # bb1 = np.fix((stride * bbox) / scale) # bb2 = np.fix((stride * bbox + cellsize) / scale) # !!! Use fix() for top-left point, and round() for bottom-right point # !!! So we can cover a 'whole' face !!! added by zhaoyafei 2017-07-18 bb1 = np.fix((stride * bbox) / scale) bb2 = np.round((stride * bbox + cellsize) / scale) # print 'bb1.shape:', bb1.shape # print 'bb2.shape:', bb2.shape # print 'scores.shape:', scores.shape # print 'reg.shape:', reg.shape bbox_out = np.vstack((bb1, bb2, scores, reg)) # print 'bbox_out.shape:', bbox_out.shape return bbox_out.T
def generateBoundingBox(imap, reg, scale, t): """Use heatmap to generate bounding boxes""" stride=2 cellsize=12 imap = np.transpose(imap) dx1 = np.transpose(reg[:,:,0]) dy1 = np.transpose(reg[:,:,1]) dx2 = np.transpose(reg[:,:,2]) dy2 = np.transpose(reg[:,:,3]) y, x = np.where(imap >= t) if y.shape[0]==1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y,x)] reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ])) if reg.size==0: reg = np.empty((0,3)) bb = np.transpose(np.vstack([y,x])) q1 = np.fix((stride*bb+1)/scale) q2 = np.fix((stride*bb+cellsize-1+1)/scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg]) return boundingbox, reg # function pick = nms(boxes,threshold,type)
def generateBoundingBox(imap, reg, scale, t): # use heatmap to generate bounding boxes stride = 2 cellsize = 12 imap = np.transpose(imap) dx1 = np.transpose(reg[:, :, 0]) dy1 = np.transpose(reg[:, :, 1]) dx2 = np.transpose(reg[:, :, 2]) dy2 = np.transpose(reg[:, :, 3]) y, x = np.where(imap >= t) if y.shape[0] == 1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y, x)] reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) if reg.size == 0: reg = np.empty((0, 3)) bb = np.transpose(np.vstack([y, x])) q1 = np.fix((stride * bb + 1) / scale) q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) return boundingbox, reg # function pick = nms(boxes,threshold,type)
def detect_face_12net(cls_prob,roi,out_side,scale,width,height,threshold): in_side = 2*out_side+11 stride = 0 if out_side != 1: stride = float(in_side-12)/(out_side-1) (x,y) = np.where(cls_prob>=threshold) boundingbox = np.array([x,y]).T bb1 = np.fix((stride * (boundingbox) + 0 ) * scale) bb2 = np.fix((stride * (boundingbox) + 11) * scale) boundingbox = np.concatenate((bb1,bb2),axis = 1) dx1 = roi[0][x,y] dx2 = roi[1][x,y] dx3 = roi[2][x,y] dx4 = roi[3][x,y] score = np.array([cls_prob[x,y]]).T offset = np.array([dx1,dx2,dx3,dx4]).T boundingbox = boundingbox + offset*12.0*scale rectangles = np.concatenate((boundingbox,score),axis=1) rectangles = rect2square(rectangles) pick = [] for i in range(len(rectangles)): x1 = int(max(0 ,rectangles[i][0])) y1 = int(max(0 ,rectangles[i][1])) x2 = int(min(width ,rectangles[i][2])) y2 = int(min(height,rectangles[i][3])) sc = rectangles[i][4] if x2>x1 and y2>y1: pick.append([x1,y1,x2,y2,sc]) return NMS(pick,0.5,'iou')
def pitch_strength_one_candidate(f_erbs, nL, pc): """ Calculates the pitch ``strength'' for a single candidate Args: f_erbs (array): nL : normalized loudness pc : pitch candidate Returns: s (float): value of strength for a pitch """ # fix rounds a number *towards* zero n = int(np.fix(f_erbs[-1] / pc - 0.75)) # number of harmonics if n == 0: return np.nan k = np.zeros(f_erbs.shape) # kernel # normalize freq w.r.t. candidate q = f_erbs / pc # create kernel primes = np.concatenate((np.ones(1), primes_2_to_n(n))) for i in primes: a = np.abs(q - i) # peak's weight p = a < 0.25 k[p] = np.cos(2 * np.pi * q[p]) # valley's weight v = np.logical_and(0.25 < a, a < 0.75) k[v] = k[v] + np.cos(2 * np.pi * q[v]) / 2 # apply envelope k = k * np.sqrt(1 / f_erbs) # K+-normalized kernel k = k / np.linalg.norm(k[k>0]) # strength value of pitch s = np.dot(k, nL) return s
def __init__(self, cnn=None, NetworkCode=None, StationCode=None, t=None): if t is None: ppp_soln = PPP_soln(cnn, NetworkCode, StationCode) t = ppp_soln.t # wrap around the solutions wt = np.sort(np.unique(t - np.fix(t))) # analyze the gaps in the data dt = np.diff(wt) # max dt (internal) dtmax = np.max(dt) # dt wrapped around dt_interyr = 1 - wt[-1] + wt[0] if dt_interyr > dtmax: dtmax = dt_interyr # save the value of the max wrapped delta time self.dt_max = dtmax # if dtmax < 3 months (90 days = 0.1232), then we can fit the annual # if dtmax < 1.5 months (45 days = 0.24657), then we can fit the semi-annual too if dtmax <= 0.1232: # all components (annual and semi-annual) self.A = np.array([sin(2 * pi * t), cos(2 * pi * t), sin(4 * pi * t), cos(4 * pi * t)]).transpose() self.frequencies = 2 elif dtmax <= 0.2465: # only annual self.A = np.array([sin(2 * pi * t), cos(2 * pi * t)]).transpose() self.frequencies = 1 else: # no periodic terms self.A = np.array([]) self.frequencies = 0 self.terms = self.frequencies * 2 return
def __init__(self, cnn=None, NetworkCode=None, StationCode=None, t=None): if t is None: ppp_soln = PPP_soln(cnn, NetworkCode, StationCode) t = ppp_soln.t # wrap around the solutions wt = np.sort(np.unique(t - np.fix(t))) # analyze the gaps in the data dt = np.diff(wt) # max dt (internal) dtmax = np.max(dt) # dt wrapped around dt_interyr = 1 - wt[-1] + wt[0] if dt_interyr > dtmax: dtmax = dt_interyr # save the value of the max wrapped delta time self.dt_max = dtmax # if dtmax < 3 months (90 days = 0.1232), then we can fit the annual # if dtmax < 1.5 months (45 days = 0.24657), then we can fit the semi-annual too if dtmax <= 0.1232: # all components (annual and semi-annual) self.A = np.array([sin(2 * pi * t), cos(2 * pi * t), sin(4 * pi * t), cos(4 * pi * t)]).transpose() self.frequencies = 2 elif dtmax <= 0.2465: # only annual self.A = np.array([sin(2 * pi * t), cos(2 * pi * t)]).transpose() self.frequencies = 1 else: # no periodic terms self.A = np.array([]) self.frequencies = 0 # variables to store the periodic amplitudes self.sin = np.array([]) self.cos = np.array([]) self.params = self.frequencies * 2
def eemd(data, noise_std=0.2, num_ensembles=100, num_sifts=10): """ Ensemble Empirical Mode Decomposition (EEMD) *** Must still add in post-processing with EMD *** """ # get modes to generate num_samples = len(data) num_modes = int(np.fix(np.log2(num_samples)))-1 # normalize incomming data dstd = data.std() y = data/dstd # allocate for starting value all_modes = np.zeros((num_modes+2,num_samples)) # loop over num_ensembles for e in range(num_ensembles): # perturb starting data x0 = y + np.random.randn(num_samples)*noise_std # save the starting value all_modes[0] += x0 # loop over modes for m in range(num_modes): # do the sifts imf = x0 for s in range(num_sifts): imf = _do_one_sift(imf) # save the imf all_modes[m+1] += imf # set the residual x0 = x0 - imf # save the final residual all_modes[-1] += x0 # average everything out and renormalize return all_modes*dstd/np.float64(num_ensembles)
def generateBoundingBox(map, reg, scale, t): stride = 2 cellsize = 12 map = map.T dx1 = reg[0,:,:].T dy1 = reg[1,:,:].T dx2 = reg[2,:,:].T dy2 = reg[3,:,:].T (x, y) = np.where(map >= t) yy = y xx = x ''' if y.shape[0] == 1: # only one point exceed threshold y = y.T x = x.T score = map[x,y].T dx1 = dx1.T dy1 = dy1.T dx2 = dx2.T dy2 = dy2.T # a little stange, when there is only one bb created by PNet #print "1: x,y", x,y a = (x*map.shape[1]) + (y+1) x = a/map.shape[0] y = a%map.shape[0] - 1 #print "2: x,y", x,y else: score = map[x,y] ''' #print "dx1.shape", dx1.shape #print 'map.shape', map.shape score = map[x,y] reg = np.array([dx1[x,y], dy1[x,y], dx2[x,y], dy2[x,y]]) if reg.shape[0] == 0: pass boundingbox = np.array([yy, xx]).T bb1 = np.fix((stride * (boundingbox) + 1) / scale).T # matlab index from 1, so with "boundingbox-1" bb2 = np.fix((stride * (boundingbox) + cellsize - 1 + 1) / scale).T # while python don't have to score = np.array([score]) boundingbox_out = np.concatenate((bb1, bb2, score, reg), axis=0) #print '(x,y)',x,y #print 'score', score #print 'reg', reg return boundingbox_out.T
def STFT(x, wlen, h, nfft, fs): ######################################################## # Short-Time Fourier Transform % # with MATLAB Implementation % # For Python % # Copier: Nelson Yalta 11/03/15 % ######################################################## # function: [stft, f, t] = stft(x, wlen, h, nfft, fs) # x - signal in the time domain # wlen - length of the hamming window # h - hop size # nfft - number of FFT points # fs - sampling frequency, Hz # f - frequency vector, Hz # t - time vector, s # stft - STFT matrix (only unique points, time across columns, freq across rows) # represent x as column-vector if it is not if (len(x.shape) > 1) and (x.shape[1] > 1): x = x.transpose() # length of the signal xlen = x.shape[0] # form a periodic hamming window win = hamming(wlen, False) # form the stft matrix rown = int(np.ceil((1.0+nfft)/2)) coln = int(np.fix((xlen-wlen)/h) + 1) short_tft = np.zeros((rown,coln)).astype('complex64') # initialize the indexes indx = 0 col = 0 # perform STFT while (indx + wlen <= xlen): # windowing xw =x[indx:indx+wlen]*win # FFT X = np.fft.fft(xw,nfft) # update the stft matrix short_tft[:,col] = X[0:rown] # update the indexes indx += h col += 1 # calculate the time and frequency vectors t = np.linspace(wlen/2,wlen/2+(coln-1)*h,coln)/fs f = np.arange(0,rown,dtype= np.float32)*fs/nfft return short_tft, f, t
def balanced_accuracy_score(y_true, y_pred, method = 'edges', random_state=None): """Balanced classification accuracy metric (multi-class). Keeps only a subset of the data instances corresponding to the rest class. The size of the subset is equal to the median group size of the other classes.""" _check_x_y(y_true,y_pred) classes, n_instances = np.unique(y_true, return_counts=True) median_instances = np.median(n_instances[1:]) n_classes = classes.size idx_rest = np.where(y_true == 0)[0] # Find rest instances idx_else = np.where(y_true != 0)[0] # Find all other instances if method == 'random': if random_state is not None: np.random.seed(random_state) idx_keep = np.random.choice(idx_rest,median_instances, replace=False) # Keep a random subset idx_final = np.sort(np.hstack((idx_keep, idx_else))) if method == 'edges': samples_per_rest_repetition = np.fix(median_instances / (2*n_classes - 1)).astype('int'); # How many we want to keep for each rest repetition if samples_per_rest_repetition < 1: samples_per_rest_repetition = 1; changes = np.diff(y_true) # Stimulus change idx_changes = np.nonzero(changes)[0] # Stimulus change idx_from_rest = idx_changes[np.arange(start=0,stop=idx_changes.size,step=2)] # Changing from rest to movement idx_to_rest = idx_changes[np.arange(start=1,stop=idx_changes.size,step=2)] # Changing from rest to movement idx_to_rest = np.hstack(([0], idx_to_rest)) idx_keep = [] for ii,jj in zip(idx_to_rest,idx_from_rest): center = np.fix(ii + (jj-ii)/2) idx_keep.extend(np.arange(center,center+samples_per_rest_repetition)) idx_keep = np.asarray(idx_keep, dtype='int') idx_final = np.sort(np.hstack((idx_keep, idx_else))) true_new = y_true[idx_final] pred_new = y_pred[idx_final] return accuracy_score(true_new, pred_new)
def balanced_log_loss(y_true, y_pred, method = 'edges', random_state=None): """Balanced log-loss metric (multi-class). Keeps only a subset of the data instances corresponding to the rest class. The size of the subset is equal to the median group size of the other classes.""" # y_true = np.asarray(y_true) # y_pred = np.asarray(y_pred) classes, n_instances = np.unique(y_true, return_counts=True) median_instances = np.median(n_instances[1:]) n_classes = classes.size idx_rest = np.where(y_true == 0)[0] # Find rest instances idx_else = np.where(y_true != 0)[0] # Find all other instances if method == 'random': if random_state is not None: np.random.seed(random_state) idx_keep = np.random.choice(idx_rest,median_instances, replace=False) # Keep a random subset idx_final = np.sort(np.hstack((idx_keep, idx_else))) if method == 'edges': samples_per_rest_repetition = np.fix(median_instances / (2*n_classes - 1)).astype('int'); # How many we want to keep for each rest repetition if samples_per_rest_repetition < 1: samples_per_rest_repetition = 1; changes = np.diff(y_true) # Stimulus change idx_changes = np.nonzero(changes)[0] # Stimulus change idx_from_rest = idx_changes[np.arange(start=0,stop=idx_changes.size,step=2)] # Changing from rest to movement idx_to_rest = idx_changes[np.arange(start=1,stop=idx_changes.size,step=2)] # Changing from rest to movement idx_to_rest = np.hstack(([0], idx_to_rest)) idx_keep = [] for ii,jj in zip(idx_to_rest,idx_from_rest): center = np.fix(ii + (jj-ii)/2) idx_keep.extend(np.arange(center,center+samples_per_rest_repetition)) idx_keep = np.asarray(idx_keep, dtype='int') idx_final = np.sort(np.hstack((idx_keep, idx_else))) true_new = y_true[idx_final] pred_new = y_pred[idx_final] return log_loss(true_new, pred_new)
def frequest(im,orientim,windsze,minWaveLength,maxWaveLength): rows,cols = np.shape(im); # Find mean orientation within the block. This is done by averaging the # sines and cosines of the doubled angles before reconstructing the # angle again. This avoids wraparound problems at the origin. cosorient = np.mean(np.cos(2*orientim)); sinorient = np.mean(np.sin(2*orientim)); orient = math.atan2(sinorient,cosorient)/2; # Rotate the image block so that the ridges are vertical #ROT_mat = cv2.getRotationMatrix2D((cols/2,rows/2),orient/np.pi*180 + 90,1) #rotim = cv2.warpAffine(im,ROT_mat,(cols,rows)) rotim = scipy.ndimage.rotate(im,orient/np.pi*180 + 90,axes=(1,0),reshape = False,order = 3,mode = 'nearest'); # Now crop the image so that the rotated image does not contain any # invalid regions. This prevents the projection down the columns # from being mucked up. cropsze = int(np.fix(rows/np.sqrt(2))); offset = int(np.fix((rows-cropsze)/2)); rotim = rotim[offset:offset+cropsze][:,offset:offset+cropsze]; # Sum down the columns to get a projection of the grey values down # the ridges. proj = np.sum(rotim,axis = 0); dilation = scipy.ndimage.grey_dilation(proj, windsze,structure=np.ones(windsze)); temp = np.abs(dilation - proj); peak_thresh = 2; maxpts = (temp<peak_thresh) & (proj > np.mean(proj)); maxind = np.where(maxpts); rows_maxind,cols_maxind = np.shape(maxind); # Determine the spatial frequency of the ridges by divinding the # distance between the 1st and last peaks by the (No of peaks-1). If no # peaks are detected, or the wavelength is outside the allowed bounds, # the frequency image is set to 0 if(cols_maxind<2): freqim = np.zeros(im.shape); else: NoOfPeaks = cols_maxind; waveLength = (maxind[0][cols_maxind-1] - maxind[0][0])/(NoOfPeaks - 1); if waveLength>=minWaveLength and waveLength<=maxWaveLength: freqim = 1/np.double(waveLength) * np.ones(im.shape); else: freqim = np.zeros(im.shape); return(freqim);
def filter_window_polar(img, wsize, fun, rscale, random=False): r"""Apply a filter of an approximated square window of half size `fsize` on a given polar image `img`. Parameters ---------- img : :class:`numpy:numpy.ndarray` 2d array of values to which the filter is to be applied wsize : float Half size of the window centred on the pixel [m] fun : string name of the 1d filter from :mod:`scipy:scipy.ndimage` rscale : float range [m] scale of the polar grid random: bool True to use random azimuthal size to avoid long-term biases. Returns ------- output : :class:`numpy:numpy.ndarray` Array with the same shape as `img`, containing the filter's results. """ ascale = 2 * np.pi / img.shape[0] data_filtered = np.empty(img.shape, dtype=img.dtype) fun = getattr(filters, "%s_filter1d" % fun) nbins = img.shape[-1] ranges = np.arange(nbins) * rscale + rscale / 2 asize = ranges * ascale if random: na = prob_round(wsize / asize).astype(int) else: na = np.fix(wsize / asize + 0.5).astype(int) # Maximum of adjacent azimuths (higher close to the origin) to # increase performance na[na > 20] = 20 sr = np.fix(wsize / rscale + 0.5).astype(int) for sa in np.unique(na): imax = np.where(na >= sa)[0][-1] + 1 imin = np.where(na <= sa)[0][0] if sa == 0: data_filtered[:, imin:imax] = img[:, imin:imax] imin2 = max(imin - sr, 0) imax2 = min(imax + sr, nbins) temp = img[:, imin2:imax2] temp = fun(temp, size=2 * sa + 1, mode='wrap', axis=0) temp = fun(temp, size=2 * sr + 1, axis=1) imin3 = imin - imin2 imax3 = imin3 + imax - imin data_filtered[:, imin:imax] = temp[:, imin3:imax3] return data_filtered