我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.round()。
def resample(image, scan, new_spacing=[1,1,1]): # Determine current pixel spacing spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing)) spacing = np.array(list(spacing)) #scan[2].SliceThickness resize_factor = spacing / new_spacing new_real_shape = image.shape * resize_factor new_shape = np.round(new_real_shape) real_resize_factor = new_shape / image.shape new_spacing = spacing / real_resize_factor image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') ### early orig modified return image, new_spacing
def resample(image, scan, new_spacing=[1,1,1]): # Determine current pixel spacing spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing)) spacing = np.array(list(spacing)) resize_factor = spacing / new_spacing new_real_shape = image.shape * resize_factor new_shape = np.round(new_real_shape) real_resize_factor = new_shape / image.shape new_spacing = spacing / real_resize_factor #image = scipy.ndimage.interpolation.zoom(image, real_resize_factor) # nor mode= "wrap"/xxx, nor cval=-1024 can ensure that the min and max values are unchanged .... # cval added image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') ### early orig modified #image = scipy.ndimage.zoom(image, real_resize_factor, order=1) # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2) #image = scipy.ndimage.zoom(image, real_resize_factor, mode='nearest', order=1) # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2) return image, new_spacing
def handle_data(self, data): if self.target_shares == 0: assert 0 not in self.portfolio.positions self.order(self.sid(0), 10) self.target_shares = 10 return else: print(self.portfolio) assert self.portfolio.positions[0]['amount'] == \ self.target_shares, "Orders not filled immediately." assert self.portfolio.positions[0]['last_sale_price'] == \ data[0].price, "Orders not filled at current price." self.order_target_value(self.sid(0), 20) self.target_shares = np.round(20 / data[0].price) if isinstance(self.sid(0), Equity): self.target_shares = np.round(20 / data[0].price) if isinstance(self.sid(0), Future): self.target_shares = np.round( 20 / (data[0].price * self.sid(0).multiplier))
def round_to_chunk_size(self, chunk_size, offset=Vec(0,0,0, dtype=int)): """ Align a potentially non-axis aligned bbox to the grid by rounding it to the nearest grid lines. Required: chunk_size: arraylike (x,y,z), the size of chunks in the dataset e.g. (64,64,64) Optional: offset: arraylike (x,y,z), the starting coordinate of the dataset """ chunk_size = np.array(chunk_size, dtype=np.float32) result = self.clone() result = result - offset result.minpt = np.round(result.minpt / chunk_size) * chunk_size result.maxpt = np.round(result.maxpt / chunk_size) * chunk_size return result + offset
def draw_bounding_boxes(image, gt_boxes, im_info): num_boxes = gt_boxes.shape[0] gt_boxes_new = gt_boxes.copy() gt_boxes_new[:,:4] = np.round(gt_boxes_new[:,:4].copy() / im_info[2]) disp_image = Image.fromarray(np.uint8(image[0])) for i in xrange(num_boxes): this_class = int(gt_boxes_new[i, 4]) disp_image = _draw_single_box(disp_image, gt_boxes_new[i, 0], gt_boxes_new[i, 1], gt_boxes_new[i, 2], gt_boxes_new[i, 3], 'N%02d-C%02d' % (i, this_class), FONT, color=STANDARD_COLORS[this_class % NUM_COLORS]) image[0, :] = np.array(disp_image) return image
def quantized_forward_pass_cost_and_output(inputs, weights, scales, biases=None, quantization_method='round', hidden_activations='relu', output_activation = 'relu', computation_calc='adds', seed=None): """ Do a forward pass of a discretized network, and return the (pseudo) computational cost and final output. :param inputs: A (n_samples, n_dims) array of inputs :param weights: A list of (n_dim_in, n_dim_out) arrays of weights :param scales: A list of (w[0].shape[0], w[1].shape[0], ...) scales to multiply/divide by before/after the quantization :param quantization_method: The method of quantization/discretization: 'round', 'uniform', None, .... :param seed: A random seed or number generator :return: n_ops, output_activation: Where: n_ops is the (scalar) number of commputations required in the forward pass (only striclty true if scale is 'round', .. otherwise it's some kind of surrogate. output_activation: A (n_samples, n_dims) array representing the output activations. """ activations = scaled_quantized_forward_pass(inputs= inputs, weights=weights, biases=biases, scales=scales, hidden_activations=hidden_activations, output_activations=output_activation, quantization_method=quantization_method, rng=seed) spike_activations = activations[1::3] n_ops = sparse_nn_flop_count(spike_activations, [w.shape[1] for w in weights], mode=computation_calc) if quantization_method is not None else None return n_ops, activations[-1]
def resample(patient, new_spacing=[1,1,1]): scan = get_scan(patient) image = get_3D_data(patient) # Determine current pixel spacing spacing = np.array([scan[0].SliceThickness] + scan[0].PixelSpacing, dtype=np.float32) resize_factor = spacing / new_spacing new_real_shape = image.shape * resize_factor new_shape = np.round(new_real_shape) real_resize_factor = new_shape / image.shape new_spacing = spacing / real_resize_factor image = nd.interpolation.zoom(image, real_resize_factor, mode='nearest') return image # For the sake of testing the network, we'll be using the sample dataset # For this, we'll use the maximum size of the image # and PAD any image with -1000 values which is smaller than that #PS: only the first dimension is different in sample dataset #which is not the case in actual dataset
def did_succeed( output_dict, cond_dict ): ''' Used in rejection sampling: for each row, determine if cond is satisfied for every cond in cond_dict success is hardcoded as round(label) being exactly equal to the integer in cond_dict ''' #definition success: def is_win(key): #cond=np.squeeze(cond_dict[key]) cond=np.squeeze(cond_dict[key]) val=np.squeeze(output_dict[key]) condition= np.round(val)==cond return condition scoreboard=[is_win(key) for key in cond_dict] #print('scoreboard', scoreboard) all_victories_bool=np.logical_and.reduce(scoreboard) return all_victories_bool.flatten()
def func_impl(self, x): objval, invalid = None, False for i, t in enumerate(x): if t < self.p_min[i] or t > self.p_max[i]: objval = float("inf") invalid = True if not invalid: x = [int(np.round(x_t)) if p_t is "integer" else x_t for p_t, x_t in zip(self.p_types, x)] objval = self._coef * self.func(x) print("{:5d} | {} | {:>15.5f}".format( self.n_eval, " | ".join(["{:>15.5f}".format(t) for t in x]), self._coef * objval )) self.n_eval += 1 return objval
def clean_height(df): v = df.VALUE.astype(float) idx = df.VALUEUOM.fillna('').apply(lambda s: 'in' in s.lower()) | df.MIMIC_LABEL.apply(lambda s: 'in' in s.lower()) v.ix[idx] = np.round(v[idx] * 2.54) return v # ETCO2: haven't found yet # Urine output: ambiguous units (raw ccs, ccs/kg/hr, 24-hr, etc.) # Tidal volume: tried to substitute for ETCO2 but units are ambiguous # Glascow coma scale eye opening # Glascow coma scale motor response # Glascow coma scale total # Glascow coma scale verbal response # Heart Rate # Respiratory rate # Mean blood pressure
def test_sample_NormalFloatHyperparameter(self): hp = NormalFloatHyperparameter("nfhp", 0, 1) def actual_test(): rs = np.random.RandomState(1) counts_per_bin = [0 for i in range(11)] for i in range(100000): value = hp.sample(rs) index = min(max(int((round(value + 0.5)) + 5), 0), 9) counts_per_bin[index] += 1 self.assertEqual([0, 4, 138, 2113, 13394, 34104, 34282, 13683, 2136, 146, 0], counts_per_bin) return counts_per_bin self.assertEqual(actual_test(), actual_test())
def round_solution_pool(pool, constraints): pool.distinct().sort() P = pool.P L0_reg_ind = np.isnan(constraints['coef_set'].C_0j) L0_max = constraints['L0_max'] rounded_pool = SolutionPool(P) for solution in pool.solutions: # sort from largest to smallest coefficients feature_order = np.argsort([-abs(x) for x in solution]) rounded_solution = np.zeros(shape=(1, P)) l0_norm_count = 0 for k in range(0, P): j = feature_order[k] if not L0_reg_ind[j]: rounded_solution[0, j] = np.round(solution[j], 0) elif l0_norm_count < L0_max: rounded_solution[0, j] = np.round(solution[j], 0) l0_norm_count += L0_reg_ind[j] rounded_pool.add(objvals=np.nan, solutions=rounded_solution) rounded_pool.distinct().sort() return rounded_pool
def resize(im, target_size, max_size): """ only resize input image to target size and return scale :param im: BGR image input by opencv :param target_size: one dimensional size (the short side) :param max_size: one dimensional max size (the long side) :return: """ im_shape = im.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) im_scale = float(target_size) / float(im_size_min) # prevent bigger axis from being more than max_size: if np.round(im_scale * im_size_max) > max_size: im_scale = float(max_size) / float(im_size_max) im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) return im, im_scale
def resize(im, target_size, max_size): """ only resize input image to target size and return scale :param im: BGR image input by opencv :param target_size: one dimensional size (the short side) :param max_size: one dimensional max size (the long side) :return: """ im_shape = im.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) im_scale = float(target_size) / float(im_size_min) if np.round(im_scale * im_size_max) > max_size: im_scale = float(max_size) / float(im_size_max) im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) return im, im_scale
def generateTickText(tickValue, ratio, baseline=False): multStep = 1000. multipliers = [ dict(suffix='', mult=pow(multStep, 0)), dict(suffix='k', mult=pow(multStep, 1)), dict(suffix='M', mult=pow(multStep, 2)), dict(suffix='G', mult=pow(multStep, 3)), ] multiplier = multipliers[0] for m in multipliers: if np.round(tickValue / m['mult'], decimals=2) >= 1: multiplier = m baseText = float('%.3g' % np.round(tickValue / multiplier['mult'], decimals=2)) baseText = int(baseText) if int(baseText) == baseText else baseText suffix = multiplier['suffix'] percent = float('%.1f' % (100 * ratio)) percent = int(percent) if percent == int(percent) else percent return '%s%s [%s%%]' % (baseText, suffix, percent)
def generateTickText(tickValue, ratio, baseline = False): multStep = 1000. multipliers = [ dict(suffix='', mult=pow(multStep, 0)), dict(suffix='k', mult=pow(multStep, 1)), dict(suffix='M', mult=pow(multStep, 2)), dict(suffix='G', mult=pow(multStep, 3)), ] multiplier = multipliers[0] for m in multipliers: if np.round(tickValue / m['mult']) >= 1: multiplier = m baseText = float('%.3g' % np.round(tickValue / multiplier['mult'])) baseText = int(baseText) if int(baseText) == baseText else baseText suffix = multiplier['suffix'] percent = float('%.1f' % (100 * ratio)) percent = int(percent) if percent == int(percent) else percent return '%s%s [%s%%]' % (baseText, suffix, percent)
def apply_regr(x, y, w, h, tx, ty, tw, th): try: cx = x + w/2. cy = y + h/2. cx1 = tx * w + cx cy1 = ty * h + cy w1 = math.exp(tw) * w h1 = math.exp(th) * h x1 = cx1 - w1/2. y1 = cy1 - h1/2. x1 = int(round(x1)) y1 = int(round(y1)) w1 = int(round(w1)) h1 = int(round(h1)) return x1, y1, w1, h1 except ValueError: return x, y, w, h except OverflowError: return x, y, w, h except Exception as e: print(e) return x, y, w, h
def prep_im_for_blob(im, pixel_means, target_size, max_size): """Mean subtract and scale an image for use in a blob.""" im = im.astype(np.float32, copy=False) im -= pixel_means im = im / 127.5 im_shape = im.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) im_scale = float(target_size) / float(im_size_min) # Prevent the biggest axis from being more than MAX_SIZE if np.round(im_scale * im_size_max) > max_size: im_scale = float(max_size) / float(im_size_max) im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) return im, im_scale
def _gene_embed_space(self,vec): shape = vec.shape vec = vec.flatten() combo_neg_idx = np.array([1 if vec[i]<0 else 0 for i in range(len(vec))]) vec_pos = np.abs(vec) int_part = np.floor(vec_pos) frac_part = np.round(vec_pos - int_part,2) bi_int_part=[] #?????????????signature??????? for i in range(len(int_part)): bi=list(bin(int(int_part[i]))[2:]) bie = [0] * (16 - len(bi)) bie.extend(bi) bi_int_part.append(np.array(bie,dtype=np.uint16)) bi_int_part = np.array(bi_int_part) sig = [] for i in range(len(bi_int_part)): sig.append(bi_int_part[i][10]) sig = np.array(sig).reshape(shape) return np.array(bi_int_part),frac_part.reshape(shape),combo_neg_idx.reshape(shape),sig
def crop_pad(image, corner, shape): ndim = len(corner) corner = [int(round(c)) for c in corner] shape = [int(round(s)) for s in shape] original = image.shape[-ndim:] zipped = zip(corner, shape, original) if np.any(c < 0 or c + s > o for (c, s, o) in zipped): no_padding = [(0, 0)] * (image.ndim - ndim) padding = [(max(-c, 0), max(c + s - o, 0)) for (c, s, o) in zipped] corner = [c + max(-c, 0) for c in corner] image_temp = np.pad(image, no_padding + padding, mode=str('constant')) else: image_temp = image no_crop = [slice(o+1) for o in image.shape[:-ndim]] crop = [slice(c, c+s) for (c, s) in zip(corner, shape)] return image_temp[no_crop + crop]
def compute_centroids(object_matrix, preserve_ids=False, round_val=False): # if ids=true, then write a matrix equal to size of maximum # value, else, order in object label order # if round = true, round centroid coordinates to nearest integer # when rounding, TODO: make sure we don't leave the volume import skimage.measure as measure centroids = [] # Threshold data rp = measure.regionprops(object_matrix) for r in rp: if round_val > 0: centroids.append(np.round(r.Centroid, round_val)) else: centroids.append(r.Centroid) return centroids
def pareto_front(vals1, vals2, round_val=3): # butter and guns pareto front. Removes points not on # the pareto frontier # round very similar vals vals1 = round(vals1, round_val) vals2 = round(vals2, round_val) v1_out = [] v2_out = [] idx_out = [] for idx in range(0, len(vals1)): is_better = np.find(vals1 >= vals1[idx] and vals2 >= vals2[idx]) if is_better is None: v1_out.append(vals1[idx]) v2_out.append(vals2[idx]) idx_out.append(idx) return v1_out, v2_out, idx_out
def _downsample_mask(X, pct): """ Create a boolean mask indicating which subset of X should be evaluated. """ if pct < 1.0: Mask = np.zeros(X.shape, dtype=np.bool) m = X.shape[-2] n = X.shape[-1] nToEval = np.round(pct*m*n).astype(np.int32) idx = sobol(2, nToEval ,0) idx[0] = np.floor(m*idx[0]) idx[1] = np.floor(n*idx[1]) idx = idx.astype(np.int32) Mask[:,:,idx[0], idx[1]] = True else: Mask = np.ones(X.shape, dtype=np.bool) return Mask
def prepare_oae_PU4(known_transisitons): print("Learn from pre + action label", "*** INCOMPATIBLE MODEL! ***", sep="\n") N = known_transisitons.shape[1] // 2 y = generate_oae_action(known_transisitons) ind = np.where(np.squeeze(combined(y[:,N:])) > 0.5)[0] y = y[ind] actions = oae.encode_action(known_transisitons, batch_size=1000).round() positive = np.concatenate((known_transisitons[:,:N], np.squeeze(actions)), axis=1) actions = oae.encode_action(y, batch_size=1000).round() negative = np.concatenate((y[:,:N], np.squeeze(actions)), axis=1) # random.shuffle(negative) # negative = negative[:len(positive)] # normalize return (default_networks['PUDiscriminator'], *prepare_binary_classification_data(positive, negative))
def prepare_oae_PU5(known_transisitons): print("Learn from pre + suc + action label", "*** INCOMPATIBLE MODEL! ***", sep="\n") N = known_transisitons.shape[1] // 2 y = generate_oae_action(known_transisitons) ind = np.where(np.squeeze(combined(y[:,N:])) > 0.5)[0] y = y[ind] actions = oae.encode_action(known_transisitons, batch_size=1000).round() positive = np.concatenate((known_transisitons, np.squeeze(actions)), axis=1) actions = oae.encode_action(y, batch_size=1000).round() negative = np.concatenate((y, np.squeeze(actions)), axis=1) # random.shuffle(negative) # negative = negative[:len(positive)] # normalize return (default_networks['PUDiscriminator'], *prepare_binary_classification_data(positive, negative))
def puzzle_plot(p): p.setup() def name(template): return template.format(p.__name__) from itertools import islice configs = list(islice(p.generate_configs(9), 1000)) # be careful, islice is not immutable!!! import numpy.random as random random.shuffle(configs) configs = configs[:10] puzzles = p.generate(configs, 3, 3) print(puzzles.shape, "mean", puzzles.mean(), "stdev", np.std(puzzles)) plot_image(puzzles[-1], name("{}.png")) plot_image(np.clip(puzzles[-1]+np.random.normal(0,0.1,puzzles[-1].shape),0,1),name("{}+noise.png")) plot_image(np.round(np.clip(puzzles[-1]+np.random.normal(0,0.1,puzzles[-1].shape),0,1)),name("{}+noise+round.png")) plot_grid(puzzles, name("{}s.png")) _transitions = p.transitions(3,3,configs=configs) print(_transitions.shape) transitions_for_show = \ np.einsum('ba...->ab...',_transitions) \ .reshape((-1,)+_transitions.shape[2:]) print(transitions_for_show.shape) plot_grid(transitions_for_show, name("{}_transitions.png"))
def point_trans(ori_point, angle, ori_shape, new_shape): """ Transfrom the point from original to rotated image. Args: ori_point: Point coordinates in original image. angle: Rotate angle. ori_shape: The shape of original image. new_shape: The shape of rotated image. Returns: Numpy array of new point coordinates in rotated image. """ dx = ori_point[0] - ori_shape[1] / 2.0 dy = ori_point[1] - ori_shape[0] / 2.0 t_x = round(dx * math.cos(angle) - dy * math.sin(angle) + new_shape[1] / 2.0) t_y = round(dx * math.sin(angle) + dy * math.cos(angle) + new_shape[0] / 2.0) return np.array((int(t_x), int(t_y)))
def predict_tf_once(day,start_date = '2016-10-1'): all_dataset = get_dataset(day) all_dataset = map(lambda x:x.ix[start_date:start_date],all_dataset) y_p_features = map(lambda user_id:tf_percent_model.resample_x_y_(all_dataset,user_id)[0].reshape(-1),get_full_user_ids()) y_p_features_df = pd.DataFrame(y_p_features,index = get_full_user_ids()) percent = pd.DataFrame.from_csv('./features/tensorflow_model/percent_model/%d.csv'%day) #percent = pd.DataFrame.from_csv('./features/tensorflow_model/percent_model/%d.csv'%2) #%% percent = percent[map(lambda x:'percent#%d'%x,range(_feature_length))] t = pd.DataFrame(index = percent.index) t[pd.Timestamp(start_date)+pd.Timedelta('%dd'%(day-1))] = (np.array(y_p_features_df)*percent).sum(axis=1) t = t.T t.to_csv('./result/predict_part/%d.csv'%day) real = int(np.round((np.array(y_p_features_df)*percent).sum().sum())) print (day,real) return (day,real)
def test_minmax_funcs_with_output(self): # Tests the min/max functions with explicit outputs mask = np.random.rand(12).round() xm = array(np.random.uniform(0, 10, 12), mask=mask) xm.shape = (3, 4) for funcname in ('min', 'max'): # Initialize npfunc = getattr(np, funcname) mafunc = getattr(numpy.ma.core, funcname) # Use the np version nout = np.empty((4,), dtype=int) try: result = npfunc(xm, axis=0, out=nout) except MaskError: pass nout = np.empty((4,), dtype=float) result = npfunc(xm, axis=0, out=nout) self.assertTrue(result is nout) # Use the ma version nout.fill(-999) result = mafunc(xm, axis=0, out=nout) self.assertTrue(result is nout)
def test_round(self): a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], mask=[0, 1, 0, 0, 0]) assert_equal(a.round(), [1., 2., 3., 5., 6.]) assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) b = empty_like(a) a.round(out=b) assert_equal(b, [1., 2., 3., 5., 6.]) x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) c[0] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked)
def test_round_with_scalar(self): # Testing round with scalar/zero dimension input # GH issue 2244 a = array(1.1, mask=[False]) assert_equal(a.round(), 1) a = array(1.1, mask=[True]) assert_(a.round() is masked) a = array(1.1, mask=[False]) output = np.empty(1, dtype=float) output.fill(-9999) a.round(out=output) assert_equal(output, 1) a = array(1.1, mask=[False]) output = array(-9999., mask=[True]) a.round(out=output) assert_equal(output[()], 1) a = array(1.1, mask=[True]) output = array(-9999., mask=[False]) a.round(out=output) assert_(output[()] is masked)
def dec_round(num, dprec=4, rnd='down', rto_zero=False): """ Round up/down numeric ``num`` at specified decimal ``dprec``. Parameters ---------- num: float dprec: int Decimal position for truncation. rnd: str (default: 'down') Set as 'up' or 'down' to return a rounded-up or rounded-down value. rto_zero: bool (default: False) Use a *round-towards-zero* method, e.g., ``floor(-3.5) == -3``. Returns ---------- float (default: rounded-up) """ dprec = 10**dprec if rnd == 'up' or (rnd == 'down' and rto_zero and num < 0.): return np.ceil(num*dprec)/dprec elif rnd == 'down' or (rnd == 'up' and rto_zero and num < 0.): return np.floor(num*dprec)/dprec return np.round(num, dprec)
def sample_output(self, val): vocabulary = self.get_vocabulary() if self.one_hot: vals = [ np.argmax(r) for r in val ] ox_val = [vocabulary[obj] for obj in list(vals)] string = "".join(ox_val) return string else: val = np.reshape(val, [-1]) val *= len(vocabulary)/2.0 val += len(vocabulary)/2.0 val = np.round(val) val = np.maximum(0, val) val = np.minimum(len(vocabulary)-1, val) ox_val = [self.get_character(obj) for obj in list(val)] string = "".join(ox_val) return string
def __init__(self, opt_engine, topK=16, grid_size=None, nps=320, model_name='tmp'): QWidget.__init__(self) self.topK = topK if grid_size is None: self.n_grid = int(np.ceil(np.sqrt(self.topK))) self.grid_size = (self.n_grid, self.n_grid) # (width, height) else: self.grid_size = grid_size self.select_id = 0 self.ims = None self.vis_results = None self.width = int(np.round(nps/ (4 * float(self.grid_size[1])))) * 4 self.winWidth = self.width * self.grid_size[0] self.winHeight = self.width * self.grid_size[1] self.setFixedSize(self.winWidth, self.winHeight) self.opt_engine = opt_engine self.frame_id = -1 self.sr = save_result.SaveResult(model_name=model_name)
def sresample(src, outshape): """ Simple 3d array resampling Inputs: src -- a ndimensional array (dim>2) outshape -- fixed output shape for the first 2 dimensions Outputs: hout -- resulting n-dimensional array """ inh, inw = src.shape[:2] outh, outw = outshape hslice = (np.arange(outh) * (inh-1.)/(outh-1.)).round().astype(int) wslice = (np.arange(outw) * (inw-1.)/(outw-1.)).round().astype(int) hout = src[hslice, :][:, wslice] return hout.copy()
def stationInfo(stnds, varname, name, titlestr=None, alttitle=None, lflatten=False, lmon=False,): ''' helper to generate an axes title with station info ''' if stnds.hasAxis('station'): nstn = len(stnds.axes['station']) # number of stations else: nstn = 1 # single station if stnds.name[:3].lower() == 'obs' and varname in stnds: ec = stnds[varname] # some variables are not present everywhere if ec.hasAxis('time') and ec.time.units[:3].lower() == 'mon': units = 'mon.' elif ec.hasAxis('year') and ec.year.units.lower() == 'year': units = 'yrs.' else: units = 'mon.' if lmon else 'yrs.' mask = ec.data_array.mask if isinstance(ec.data_array,np.ma.MaskedArray) else np.isnan(ec.data_array) if lflatten: rec_len = (ec.data_array.size - mask.sum()) # valid years in obs/EC else: rec_len = int(np.round(ec.data_array.shape[-1] - mask.sum(axis=-1).mean())) # valid years in obs/EC if titlestr: axtitle = titlestr.format(name,nstn,rec_len) # axes label else: axtitle = "{:s} (#{:d}, {:d} {:s})".format(name,nstn,rec_len,units) # axes label else: if alttitle: axtitle = alttitle.format(name,nstn) # axes label elif titlestr: axtitle = titlestr.format(name,nstn) # axes label else: axtitle = "{:s} (#{:d}, WRF only)".format(name,nstn) # axes label return axtitle # function to compute some statistics and print them
def get_fft_mel_mat(nfft, sr=8000, nfilts=None, width=1.0, minfrq=20, maxfrq=None, constamp=0): if nfilts is None: nfilts = nfft if maxfrq is None: maxfrq = sr // 2 wts = np.zeros((nfilts, nfft//2+1)) fftfrqs = np.arange(0, nfft//2+1) / (1. * nfft) * (sr) minmel = hz2mel(minfrq) maxmel = hz2mel(maxfrq) binfrqs = mel2hz(minmel + np.arange(0, nfilts+2) / (nfilts+1.) * (maxmel - minmel)) # binbin = np.round(binfrqs / maxfrq * nfft) for i in range(nfilts): fs = binfrqs[[i+0, i+1, i+2]] fs = fs[1] + width * (fs - fs[1]) loslope = (fftfrqs - fs[0]) / (fs[1] - fs[0]) hislope = (fs[2] - fftfrqs) / (fs[2] - fs[1]) wts[i, :] = np.maximum(0, np.minimum(loslope, hislope)) return wts
def CSMToBinary(D, Kappa): """ Turn a cross-similarity matrix into a binary cross-simlarity matrix If Kappa = 0, take all neighbors If Kappa < 1 it is the fraction of mutual neighbors to consider Otherwise Kappa is the number of mutual neighbors to consider """ N = D.shape[0] M = D.shape[1] if Kappa == 0: return np.ones((N, M)) elif Kappa < 1: NNeighbs = int(np.round(Kappa*M)) else: NNeighbs = Kappa J = np.argpartition(D, NNeighbs, 1)[:, 0:NNeighbs] I = np.tile(np.arange(N)[:, None], (1, NNeighbs)) V = np.ones(I.size) [I, J] = [I.flatten(), J.flatten()] ret = sparse.coo_matrix((V, (I, J)), shape=(N, M)) return ret.toarray()
def print_word_vectors(word_vectors, write_path): """ This function prints the collection of word vectors to file, in a plain textual format. """ f_write = codecs.open(write_path, 'w', 'utf-8') for key in word_vectors: print >>f_write, key, " ".join(map(unicode, numpy.round(word_vectors[key], decimals=6))) print "Printed", len(word_vectors), "word vectors to:", write_path
def trataGroups(objeto): current = list(filter(None.__ne__, objeto)) current = np.sort(current, axis=0) for i in range(len(current[0])): current_ = [j[i] for j in current] mean_ = np.round(np.mean(current_, axis=0), 4) deviation_ = np.round(np.std(current_, axis=0, ddof=1), 4) return [mean_, deviation_]
def trataGroups(objeto): current = list(filter(None.__ne__, objeto)) mean_ = np.round(np.mean(current, axis=0), 4) deviation_ = np.round(np.std(current, axis=0, ddof=1), 4) fivecent = np.round(np.percentile(current, 5.0, axis=0), 4) # confidence intervals lowci = np.round(np.percentile(current, 2.5, axis=0), 4) highci = np.round(np.percentile(current, 97.5, axis=0), 4) return [mean_, deviation_, fivecent, current, lowci, highci]
def PA(samples, variables): datasets = 5000 eig_vals = [] for i in range(datasets): data = np.random.standard_normal((variables, samples)) cor_ = np.corrcoef(data) eig_vals.append(np.sort(np.linalg.eig(cor_)[0])[::-1]) quantile = (np.round(np.percentile(eig_vals, 95.0, axis=0), 4)) mean_ = (np.round(np.mean(eig_vals, axis=0), 4)) return quantile
def get_line_region(self, position, name=''): """Creates a line region at the given position (start_x, start_y, end_x, end_y), inclusive. Args: position: Position of the line region (start_x, start_y, end_x, end_y). name: Name of the region. Returns: Line region. """ start_idx = self.get_index(position[:2]) end_idx = self.get_index(position[2:]) x_diff = start_idx % self.x.samples - end_idx % self.x.samples y_diff = int(start_idx / self.x.samples) - int(end_idx / self.x.samples) num_points = max(np.abs([x_diff, y_diff])) point_indices = [] for ii in range(num_points + 1): x_position = start_idx % self.x.samples - np.round(ii / num_points * x_diff) y_position = int(start_idx / self.x.samples) - np.round(ii / num_points * y_diff) point_indices.append(int(x_position + self.x.samples * y_position)) return reg.LineRegion(point_indices, position, name=name)
def seq(start, stop, step=1): n = int(round((stop - start)/float(step))) if n > 1: return([start + step*i for i in range(n+1)]) else: return([])
def draw_circles(image,cands,origin,spacing): #make empty matrix, which will be filled with the mask image_mask = np.zeros(image.shape, dtype=np.int16) #run over all the nodules in the lungs for ca in cands.values: #get middel x-,y-, and z-worldcoordinate of the nodule #radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% .... radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net . coord_x = ca[1] coord_y = ca[2] coord_z = ca[3] image_coord = np.array((coord_z,coord_y,coord_x)) #determine voxel coordinate given the worldcoordinate image_coord = world_2_voxel(image_coord,origin,spacing) #determine the range of the nodule #noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing noduleRange_z = seq(-radius, radius, spacing[0]) noduleRange_y = seq(-radius, radius, spacing[1]) noduleRange_x = seq(-radius, radius, spacing[2]) #x = y = z = -2 #create the mask for x in noduleRange_x: for y in noduleRange_y: for z in noduleRange_z: coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing) #if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE) if (np.linalg.norm((image_coord-coords) * spacing)) < radius: image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1) return image_mask
def train_and_eval_sklearn_classifier( clf, data ): x_train = data['x_train'] y_train = data['y_train'] x_test = data['x_test'] y_test = data['y_test'] clf.fit( x_train, y_train ) try: p = clf.predict_proba( x_train )[:,1] # sklearn convention except IndexError: p = clf.predict_proba( x_train ) ll = log_loss( y_train, p ) auc = AUC( y_train, p ) acc = accuracy( y_train, np.round( p )) print "\n# training | log loss: {:.2%}, AUC: {:.2%}, accuracy: {:.2%}".format( ll, auc, acc ) # try: p = clf.predict_proba( x_test )[:,1] # sklearn convention except IndexError: p = clf.predict_proba( x_test ) ll = log_loss( y_test, p ) auc = AUC( y_test, p ) acc = accuracy( y_test, np.round( p )) print "# testing | log loss: {:.2%}, AUC: {:.2%}, accuracy: {:.2%}".format( ll, auc, acc ) #return { 'loss': 1 - auc, 'log_loss': ll, 'auc': auc } return { 'loss': ll, 'log_loss': ll, 'auc': auc } ### # "clf", even though it's a regressor