我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.vectorize()。
def consideronlylabels(self, list2consider, verbose = False): """ Add labels to the ignoredlabels list (set) and update the self._labels cache. """ if isinstance(list2consider, int): list2consider = [list2consider] toignore = set(np.unique(self.image))-set(list2consider) integers = np.vectorize(lambda x : int(x)) toignore = integers(list(toignore)).tolist() if verbose: print 'Adding labels', toignore,'to the list of labels to ignore...' self._ignoredlabels.update(toignore) if verbose: print 'Updating labels list...' self._labels = self.__labels()
def __init__(self, num_hidden_nodes, data_matrix, data_labels, training_indices, use_file=True): # sigmoid?? self.sigmoid = np.vectorize(self._sigmoid_scalar) # sigmoid???? self.sigmoid_prime = np.vectorize(self._sigmoid_prime_scalar) # ?????? self._use_file = use_file # ??? self.data_matrix = data_matrix self.data_labels = data_labels if (not os.path.isfile(OCRNeuralNetwork.NN_FILE_PATH) or not use_file): # ??????? self.theta1 = self._rand_initialize_weights(400, num_hidden_nodes) self.theta2 = self._rand_initialize_weights(num_hidden_nodes, 10) self.input_layer_bias = self._rand_initialize_weights(1, num_hidden_nodes) self.hidden_layer_bias = self._rand_initialize_weights(1, 10) # ????? TrainData = namedtuple('TrainData', ['y0', 'label']) self.train([TrainData(self.data_matrix[i], int(self.data_labels[i])) for i in training_indices]) self.save() else: # ??nn.json????? self._load()
def test_keywords2_ticket_2100(self): # Test kwarg support: enhancement ticket 2100 def foo(a, b=1): return a + b f = vectorize(foo) args = np.array([1, 2, 3]) r1 = f(a=args) r2 = np.array([2, 3, 4]) assert_array_equal(r1, r2) r1 = f(b=1, a=args) assert_array_equal(r1, r2) r1 = f(args, b=2) r2 = np.array([3, 4, 5]) assert_array_equal(r1, r2)
def setUp(self): """Setup script for each test """ # Initial estimate of prior functional form init_prior = np.vectorize(lambda v: 2.56e9 / v**3) # Create the model and *true* EOS self.eos_model = EOSModel(init_prior, name="Default EOS Model") self.eos_true = EOSBump() # Create the objects to generate simulations and # pseudo experimental data self.exp1 = GunExperiment(model=self.eos_true) self.sim1 = Gun(name="Default Gun Simulation") self.exp2 = StickExperiment(model=self.eos_true) self.sim2 = Stick() # end
def test_shot_plot(self): """tests the plotting function """ init_prior = np.vectorize(lambda v: 2.56e9 / v**3) # Create the model and *true* EOS eos = EOSModel(init_prior) gun = Gun() data0 = gun({'eos': eos}) old_dof = eos.get_c() old_dof[0] *= 1.02 eos.update_dof(old_dof) data1 = gun({'eos': eos}) gun.plot(level=3, data=[data0, data1]) gun.plot(level=1, data=[data0, data1]) plt.show() # end
def test_sigmoid(): ''' Test using a numerically stable reference sigmoid implementation ''' def ref_sigmoid(x): if x >= 0: return 1 / (1 + np.exp(-x)) else: z = np.exp(x) return z / (1 + z) sigmoid = np.vectorize(ref_sigmoid) x = K.placeholder(ndim=2) f = K.function([x], [activations.sigmoid(x)]) test_values = get_standard_values() result = f([test_values])[0] expected = sigmoid(test_values) assert_allclose(result, expected, rtol=1e-05)
def test_hard_sigmoid(): ''' Test using a reference hard sigmoid implementation ''' def ref_hard_sigmoid(x): ''' Reference hard sigmoid with slope and shift values from theano, see https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py ''' x = (x * 0.2) + 0.5 z = 0.0 if x <= 0 else (1.0 if x >= 1 else x) return z hard_sigmoid = np.vectorize(ref_hard_sigmoid) x = K.placeholder(ndim=2) f = K.function([x], [activations.hard_sigmoid(x)]) test_values = get_standard_values() result = f([test_values])[0] expected = hard_sigmoid(test_values) assert_allclose(result, expected, rtol=1e-05)
def _get_new_id_seq(pos, numbers): """ A helper function to produce the new sequence of the transformed structure. Algs is sort the position back to init and use the index to sort numbers. """ # transfer the atom position into >=0 and <=1 pos = np.around(pos, decimals=3) func_tofrac = np.vectorize(lambda x: round((x % 1), 3)) o_pos = func_tofrac(pos) # round_o_pos = np.around(o_pos, decimals=3) # z, y, x = round_o_pos[:, 2], round_o_pos[:, 1], round_o_pos[:, 0] z, y, x = o_pos[:, 2], o_pos[:, 1], o_pos[:, 0] inds = np.lexsort((z, y, x)) return inds
def _get_new_id_seq(pos, numbers): """ A helper function to produce the new sequence of the transformed structure. Algs is sort the position back to init and use the index to sort numbers. """ # transfer the atom position into >=0 and <=1 pos = np.around(pos, decimals=5) func_tofrac = np.vectorize(lambda x: round((x % 1), 3)) o_pos = func_tofrac(pos) # round_o_pos = np.around(o_pos, decimals=3) # z, y, x = round_o_pos[:, 2], round_o_pos[:, 1], round_o_pos[:, 0] z, y, x = o_pos[:, 2], o_pos[:, 1], o_pos[:, 0] inds = np.lexsort((z, y, x)) return inds
def get_new_id_seq(pos, numbers): """ A helper function to produce the new sequence of the transformed structure. Algs is sort the position back to init and use the index to sort numbers. """ # transfer the atom position into >=0 and <=1 pos = np.around(pos, decimals=5) func_tofrac = np.vectorize(lambda x: round((x % 1), 3)) o_pos = func_tofrac(pos) # round_o_pos = np.around(o_pos, decimals=3) # z, y, x = round_o_pos[:, 2], round_o_pos[:, 1], round_o_pos[:, 0] z, y, x = o_pos[:, 2], o_pos[:, 1], o_pos[:, 0] inds = np.lexsort((z, y, x)) return inds
def _ingest_pairs(self, pairs, oid2nid, frame_size, limit, single_sided): oid2nid_v = np.vectorize(oid2nid.get) # whole pairs set does not fit in memory, so split it in frames with `frame_size` number of pairs. for start in range(0, limit, frame_size): stop = frame_size + start t1 = process_time() six.print_('Fetching pairs {0}:{1} of {2} ... '.format(start, stop, limit), end='', flush=True) raw_frame = pairs.read(start=start, stop=stop) t2 = process_time() six.print_('{0}s, Parsing ... '.format(int(t2 - t1)), flush=True) frame = self._translate_frame(raw_frame, oid2nid_v, single_sided) t3 = process_time() six.print_('Writing ... '.format(int(t3 - t2)), flush=True) # alternate direction, to make use of cached chunks of prev frame self._ingest_pairs_frame(frame) del frame t4 = process_time() six.print_('{0}s, Done with {1}:{2} in {3}s'.format(int(t4 - t3), start, stop, int(t4 - t1)), flush=True)
def _plot_mpl(scheme): # pylint: disable=relative-import, unused-variable from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = fig.gca(projection='3d') ax.set_aspect('equal') flt = numpy.vectorize(float) pts = flt(scheme.points) wgs = flt(scheme.weights) for p, w in zip(pts, wgs): # <https://en.wikipedia.org/wiki/Spherical_cap> w *= 4 * numpy.pi theta = numpy.arccos(1.0 - abs(w) / (2*numpy.pi)) color = '#1f77b4' if w >= 0 else '#d62728' _plot_spherical_cap_mpl(ax, p, theta, color) ax.set_axis_off() return
def __init__(self, n, a=0.0, b=0.0): # The general scheme is: # Get the Jacobi recurrence coefficients, get the Kronrod vectors alpha # and beta, and hand those off to orthopy.line.schemes.custom. There, # the eigenproblem for a tridiagonal matrix with alpha and beta is # solved to retrieve the points and weights. # TODO replace math.ceil by -(-k//n) length = int(math.ceil(3*n/2.0)) + 1 self.degree = 2*length + 1 _, _, alpha, beta = \ orthopy.line.recurrence_coefficients.jacobi(length, a, b, 'monic') flt = numpy.vectorize(float) alpha = flt(alpha) beta = flt(beta) a, b = self.r_kronrod(n, alpha, beta) x, w = orthopy.line.schemes.custom(a, b, mode='numpy') # sort by x i = numpy.argsort(x) self.points = x[i] self.weights = w[i] return # pylint: disable=no-self-use
def map_predicate(self, f): """ Map a function from str -> bool element-wise over ``self``. ``f`` will be applied exactly once to each non-missing unique value in ``self``. Missing values will always return False. """ # Functions passed to this are of type str -> bool. Don't ever call # them on None, which is the only non-str value we ever store in # categories. if self.missing_value is None: def f_to_use(x): return False if x is None else f(x) else: f_to_use = f # Call f on each unique value in our categories. results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories) # missing_value should produce False no matter what results[self.reverse_categories[self.missing_value]] = False # unpack the results form each unique value into their corresponding # locations in our indices. return results[self.as_int_array()]
def test_map_shrinks_code_storage_if_possible(self): arr = LabelArray( # Drop the last value so we fit in a uint16 with None as a missing # value. self.create_categories(16, plus_one=False)[:-1], missing_value=None, ) self.assertEqual(arr.itemsize, 2) def either_A_or_B(s): return ('A', 'B')[sum(ord(c) for c in s) % 2] result = arr.map(either_A_or_B) self.assertEqual(set(result.categories), {'A', 'B', None}) self.assertEqual(result.itemsize, 1) assert_equal( np.vectorize(either_A_or_B)(arr.as_string_array()), result.as_string_array(), )
def coords_edges(self, edges): ''' Returns a list of coordinates head and tail points for all edge in edges ''' res = np.empty((len(edges)), dtype=object) for r, e in zip(range(len(edges)), edges): if e[0] is None: e[0] = 0 res[r] = self.coords_edge(e) if len(res[r][0]) != 2: print 'there is an error with the edges' import pdb pdb.set_trace() # v = np.vectorize(self.coords_edge, otypes=[np.object]) # res = v(edges) return res
def _get_alpha_data(data, kwargs): """Get alpha values for all data points. Parameters ---------- data : array_like alpha: Callable or float This can be a fixed value or a function of the data. Returns ------- array_like """ alpha = kwargs.pop("alpha", 1) if hasattr(alpha, "__call__"): return np.vectorize(alpha)(data) return alpha
def __init__(self, scale, pre=10): """ This class holds a queue of times drawn from an exponential distribution with a specified scale. Arguments: - scale: The scale parameter for the exponential distribution. - pre: Predefined size of the queue. Default=10 """ self.scale = scale self.pre = pre self.queue = SimpleQueue(maxsize=pre + 1) self.v_put = vectorize(self.queue.put_nowait) #the exponential dist is not defined for a rate of 0 #therefore if the rate is 0 (scale is None then) huge times are set if self.scale in [None, 0]: self.scale = 0 self.draw_fct = no_mut else: self.draw_fct = random.exponential #fillup the queue self.fillup() # there was: (new version compatible with pickeling see method below) self.v_get = vectorize(self.get_val)
def __setstate__(self, d): if 'simple_queue_list' in d: event_queue_list = d.pop('simple_queue_list') d['queue'] = SimpleQueue(maxsize=d['pre'] + 1) while len(event_queue_list): d['queue'].put_nowait(event_queue_list.pop()) self.__dict__.update(d) self.__dict__['v_put'] = vectorize(self.queue.put_nowait) #d['v_put'] = vectorize(d['queue'].put_nowait) #self.__dict__.update(d) self.__dict__['v_get'] = vectorize(self.get_val) if self.scale is None: self.scale = 0 self.queue = SimpleQueue(maxsize=self.pre + 1) self.v_put = vectorize(self.queue.put_nowait) # this is specific to the queue, thus reinit here self.draw_fct = no_mut self.fillup()
def __init__(self, source, **params): #_Graph.__init__(self) self.is_static = False if isinstance(source, str): # it is a file self._load(source, **params) else: # source must be an EventQueue then # to do: read from event queue # should also get self.starts, ... pass self.t_start = params.get('t_start', np.min(self.starts)) self.t_stop = params.get('t_stop', np.max(self.stops)) # ToDo: Ideally only use self.all_nodes self.all_nodes = list(np.union1d(self.node1s, self.node2s)) all_nodes = list(np.union1d(self.node1s, self.node2s)) n = len(self.all_nodes) def get_id(an_id): return all_nodes.index(an_id) v_get_id = np.vectorize(get_id) self.node1s = v_get_id(self.node1s) self.node2s = v_get_id(self.node2s) # now we need to remap the node ids _Graph.__init__(self, n=n)
def _init_genotypes(self): """Construct an array of genotype vectors, one per variant. If it is found in cache, use the cached version, otherwise recompute it and cache the result. Either way, store a copy in local process memory. """ if self.genotypes_key in self.cache: # Read cache, store in local memory self._gt_types_bit = self._get_genotypes() else: # Regenerate, cache, and store in local memory gt_types = extract_genotypes(db=self.db) f = np.vectorize(variant_build_gt_type_bit, otypes=[np.uint8]) # apply to all array elements self._gt_types_bit = f(gt_types) self._gt_types_bit.flags.writeable = False # make it immutable self._save_genotypes(self._gt_types_bit)
def game(self, mask = False): q = list() if mask is True: q.append(self.layers.flags) q.append(self.layers.masks) q.append(self.layers.mines) q.append(self.layers.hints) __ = self.addLayers(q) __[__ == None] = __TOKEN_EMPTY__ f = np.vectorize(str) return f(__).T.tolist()
def _postcompute_biases(self): """ Post-computed biases for non-boundary training examples (support vectors) when training is done. This is for estimating sample mean and sample std of biases. For a good learning result, sample std of biases should be small. """ def _b(i): if self.enable_kernel_cache: return self.train_y[i] - np.dot(self.alpha*self.train_y, self.kernel_cache[i]) else: return self.train_y[i] - self._f(self.train_X[i]) I_non_boundary = np.where(np.logical_and(self.alpha > 0, self.alpha < self.C) == True)[0].tolist() if len(I_non_boundary): biases = np.vectorize(_b)(I_non_boundary) self.b_mean = np.mean(biases) self.b_std = np.sqrt(np.sum((biases - self.b_mean)**2) / (len(biases) - 1)) self.postcomputed_biases[I_non_boundary] = biases
def calcr2s(lc1, lc2, reltimeshifts, spline, trace=False): """ I calcuate the r2 for an array of relative time shifts. To be compated to calcd2 of pycs.pelt.twospec ! """ lc2abstimeshifts = reltimeshifts + lc2.timeshift def r2(lc2abstimeshift): # We work with copies at every trial time delay, to always start from the same position. mylc1 = lc1.copy() mylc2 = lc2.copy() mylc2.timeshift = lc2abstimeshift myspline = spline.copy() return pycs.spl.multiopt.opt_source([mylc1, mylc2], myspline, verbose=False, trace=trace) # We vectorize this before applying it to our abstimeshifts vecr2 = np.vectorize(r2, otypes=[np.ndarray]) r2s = vecr2(lc2abstimeshifts) return r2s
def preprocessing_train_data(paras, df, LabelColumnName, ticker, train_tickers_dict, one_hot_label_proc, array_format=True): day_list=train_tickers_dict[ticker] index_df=np.vectorize(lambda s: s.strftime('%Y-%m-%d'))(df.index.to_pydatetime()) df.index=index_df common_day=list(set(day_list).intersection(set(index_df))) df=df.loc[common_day] X = df.drop(LabelColumnName, 1) y = np.array(df[LabelColumnName]) #print(X.head()) # print("ticker", ticker) # print(X) if one_hot_label_proc == True: # generate one hot output y_normalized_T = one_hot_processing(y, paras.n_out_class) else: y_normalized_T = y.astype(int) # np.repeat(float('nan'), len(y)) if array_format: return X.values, y_normalized_T return X, y_normalized_T
def setUp(self): # real function self.z_values = np.linspace(0, 1, 1000) self.real_func = core.Function(lambda x: x) self.real_func_handle = np.vectorize(self.real_func) # approximation by lag1st self.nodes, self.src_test_funcs = shapefunctions.cure_interval(shapefunctions.LagrangeFirstOrder, (0, 1), node_count=2) register_base("test_funcs", self.src_test_funcs, overwrite=True) self.src_weights = core.project_on_base(self.real_func, self.src_test_funcs) self.assertTrue(np.allclose(self.src_weights, [0, 1])) # just to be sure self.src_approx_handle = core.back_project_from_base(self.src_weights, self.src_test_funcs) # approximation by sin(w*x) def trig_factory(freq): def func(x): return np.sin(freq*x) return func self.trig_test_funcs = np.array([core.Function(trig_factory(w), domain=(0, 1)) for w in range(1, 3)])
def __init__(self, y0, y1, z0, z1, t0, dt, params): SimulationInput.__init__(self) # store params self._tA = t0 self._dt = dt self._dz = z1 - z0 self._m = params.m # []=kg mass at z=0 self._tau = params.tau # []=m/s speed of wave translation in string self._sigma = params.sigma # []=kgm/s**2 pretension of string # construct trajectory generator for yd ts = max(t0, self._dz * self._tau) # never too early self.trajectory_gen = SmoothTransition((y0, y1), (ts, ts + dt), method="poly", differential_order=2) # create vectorized functions self.control_input = np.vectorize(self._control_input, otypes=[np.float]) self.system_state = np.vectorize(self._system_sate, otypes=[np.float])
def __new__(cls, array): array = asarray(array) assert numpy.prod(array.shape) # Handle children with shape child_shape = array.flat[0].shape assert all(elem.shape == child_shape for elem in array.flat) if child_shape: # Destroy structure direct_array = numpy.empty(array.shape + child_shape, dtype=object) for alpha in numpy.ndindex(array.shape): for beta in numpy.ndindex(child_shape): direct_array[alpha + beta] = Indexed(array[alpha], beta) array = direct_array # Constant folding if all(isinstance(elem, Constant) for elem in array.flat): return Literal(numpy.vectorize(attrgetter('value'))(array)) self = super(ListTensor, cls).__new__(cls) self.array = array return self
def _power_level_from_power_spectrogram(spectrogram: ndarray) -> ndarray: # default value for min_decibel found by experiment (all values except for 0s were above this bound) def power_to_decibel(x, min_decibel: float = -150) -> float: if x == 0: return min_decibel l = 10 * math.log10(x) return min_decibel if l < min_decibel else l return vectorize(power_to_decibel)(spectrogram)
def _is_enum(feature_values, enum_threshold): are_all_ints = np.vectorize(lambda val: float(val).is_integer()) return ( len(np.unique(feature_values)) <= enum_threshold and np.all(are_all_ints(feature_values)) )
def __init__(self, M): oprint(3, "Initializing bond objective") self.M = M self.matrix_function = self.get_matrix_function() self.evaluate_score_matrix_vect = np.vectorize(self.evaluate_score_matrix, otypes=[np.float], excluded=['match'])
def normalize_to_zero(self, y_coordinates): vertical_offset = y_coordinates[0] if vertical_offset == 0: return y_coordinates fix_offset = np.vectorize(lambda x: x - vertical_offset) y_coordinates = fix_offset(y_coordinates) return y_coordinates
def calculate_target_coordinates(self): def simple_function(x): y = 0 x = float(x) for i, c in enumerate(reversed(self.constants)): y += c * (x / 2000) ** i return (y * self.scaling) calculate = np.vectorize(simple_function) x_variables = np.arange(self.major_axis_span, dtype=float) y_variables = calculate(x_variables) return self.prepare_coordinates(x_variables, y_variables)
def calculate_target_coordinates(self): amplitude = self.amplitude coef = (2 * pi) / self.period phase = radians(self.phase) def simple_sine(x): return (amplitude * sin((x * coef - phase))) calculate = np.vectorize(simple_sine) x_variables = np.arange(self.major_axis_span, dtype=float) y_variables = calculate(x_variables) y_variables = self.normalize_to_zero(y_variables) return self.prepare_coordinates(x_variables, y_variables)
def test_mem_vectorise(self, level=rlevel): # Ticket #325 vt = np.vectorize(lambda *args: args) vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2))) vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)), np.zeros((2, 2)))
def test_refcount_vectorize(self, level=rlevel): # Ticket #378 def p(x, y): return 123 v = np.vectorize(p) _assert_valid_refcount(v)
def test_simple(self): def addsubtract(a, b): if a > b: return a - b else: return a + b f = vectorize(addsubtract) r = f([0, 3, 6, 9], [1, 3, 5, 7]) assert_array_equal(r, [1, 6, 1, 2])
def test_scalar(self): def addsubtract(a, b): if a > b: return a - b else: return a + b f = vectorize(addsubtract) r = f([0, 3, 6, 9], 5) assert_array_equal(r, [5, 8, 1, 4])
def test_large(self): x = np.linspace(-3, 2, 10000) f = vectorize(lambda x: x) y = f(x) assert_array_equal(y, x)
def test_keywords(self): def foo(a, b=1): return a + b f = vectorize(foo) args = np.array([1, 2, 3]) r1 = f(args) r2 = np.array([2, 3, 4]) assert_array_equal(r1, r2) r1 = f(args, 2) r2 = np.array([3, 4, 5]) assert_array_equal(r1, r2)
def test_keywords_no_func_code(self): # This needs to test a function that has keywords but # no func_code attribute, since otherwise vectorize will # inspect the func_code. import random try: vectorize(random.randrange) # Should succeed except: raise AssertionError()
def test_keywords3_ticket_2100(self): # Test excluded with mixed positional and kwargs: ticket 2100 def mypolyval(x, p): _p = list(p) res = _p.pop(0) while _p: res = res * x + _p.pop(0) return res vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) ans = [3, 6] assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3]))
def test_keywords4_ticket_2100(self): # Test vectorizing function with no positional args. @vectorize def f(**kw): res = 1.0 for _k in kw: res *= kw[_k] return res assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8])
def test_coverage1_ticket_2100(self): def foo(): return 1 f = vectorize(foo) assert_array_equal(f(), 1)
def test_assigning_docstring(self): def foo(x): return x doc = "Provided documentation" f = vectorize(foo, doc=doc) assert_equal(f.__doc__, doc)