我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.random.uniform()。
def __call__(self, image, boxes, labels): if random.randint(2): return image, boxes, labels height, width, depth = image.shape ratio = random.uniform(1, 4) left = random.uniform(0, width*ratio - width) top = random.uniform(0, height*ratio - height) expand_image = np.zeros( (int(height*ratio), int(width*ratio), depth), dtype=image.dtype) expand_image[:, :, :] = self.mean expand_image[int(top):int(top + height), int(left):int(left + width)] = image image = expand_image boxes = boxes.copy() boxes[:, :2] += (int(left), int(top)) boxes[:, 2:] += (int(left), int(top)) return image, boxes, labels
def __call__(self, image, boxes, labels): if random.randint(2): return image, boxes, labels height, width, depth = image.shape ratio = random.uniform(1, 4) left = random.uniform(0, width * ratio - width) top = random.uniform(0, height * ratio - height) expand_image = np.zeros( (int(height * ratio), int(width * ratio), depth), dtype=image.dtype) expand_image[:, :, :] = self.mean expand_image[int(top):int(top + height), int(left):int(left + width)] = image image = expand_image boxes = boxes.copy() boxes[:, :2] += (int(left), int(top)) boxes[:, 2:] += (int(left), int(top)) return image, boxes, labels
def test_ln_sample_parameters (self): sampling.set_seed(3242) for rep in range(10): mu = random.uniform(-1, 1) sd = random.uniform(0.5, 1.25) print rep, mu, sd f = distributions.LogNormal (mu, sd) x = f.sample(1000) print numpy.mean(map(numpy.log, x)) params = [ f.sample_parameters(x) for i in xrange(10000) ] mu1 = [ p[0] for p in params ] sd1 = [ p[1] for p in params ] print numpy.mean(mu1) print numpy.mean(sd1) self.assertTrue (abs(mu - numpy.mean(mu1)) < 0.1, "Mismatch: MU %s params %s" % (mu, numpy.mean(mu1))) self.assertTrue (abs(sd - numpy.mean(sd1)) < 0.1, "Mismatch: std %s params %s" % (sd, numpy.mean(sd1)))
def test_gamma_sample_parameters (self): sampling.set_seed(3242) for rep in range(1): shape = random.uniform(0.5, 3.0) scale = random.uniform(0.0, 10.0) print "REP", rep, shape, scale f = distributions.Gamma (shape, scale) x = f.sample(1000) params = [ f.sample_parameters(x) for i in xrange(1000) ] shape1 = [ p[0] for p in params ] scale1 = [ p[1] for p in params ] for p in params: print "P", " ".join (map(str,p)), p[0]*p[1] self.assertTrue (abs(scale - numpy.mean(scale1)) < 0.03, "Mismatch: MU %s params %s" % (scale, numpy.mean(scale1))) self.assertTrue (abs(shape - numpy.mean(shape1)) < 0.03, "Mismatch: SHAPE %s params %s" % (shape, numpy.mean(shape1)))
def pollute_forever(self): if self.verbose: print("""Display format: Downloading: website.com; NNNNN links [in library], H(domain)= B bits [entropy] Downloaded: website.com: +LLL/NNNNN links [added], H(domain)= B bits [entropy] """) self.open_driver() self.seed_links() self.clear_driver() if self.quit_driver_every_call: self.quit_driver() while True: # pollute forever, pausing only to meet the bandwidth requirement try: if (not self.diurnal_flag) or self.diurnal_cycle_test(): self.pollute() else: time.sleep(self.chi2_mean_std(3.,1.)) if npr.uniform() < 0.005: self.set_user_agent() # reset the user agent occasionally self.elapsed_time = time.time() - self.start_time self.exceeded_bandwidth_tasks() self.random_interval_tasks() self.every_hour_tasks() time.sleep(self.chi2_mean_std(0.5,0.2)) except Exception as e: if self.debug: print('.pollute() exception:\n{}'.format(e))
def null_model(num_samples, dimension = 1, rho=0): data_z = np.reshape(uniform(0,5,num_samples*dimension),(num_samples,dimension)) coin_flip_x = np.random.choice([0,1],replace=True,size=num_samples) coin_flip_y = np.random.choice([0,1],replace=True,size=num_samples) mean_noise = [0,0] cov_noise = [[1,0],[0,1]] noise_x, noise_y = multivariate_normal(mean_noise, cov_noise, num_samples).T data_x = zeros(num_samples) data_x[coin_flip_x == 0,] = 1.7*data_z[coin_flip_x == 0,0] data_x[coin_flip_x == 1,] = -1.7*data_z[coin_flip_x == 1,0] data_x = data_x + noise_x data_y = zeros(num_samples) data_y[coin_flip_y == 0,] = (data_z[coin_flip_y == 0,0]-2.7)**2 data_y[coin_flip_y == 1,] = -(data_z[coin_flip_y == 1,0]-2.7)**2+13 data_y = data_y + noise_y data_x = np.reshape(data_x, (num_samples,1)) data_y = np.reshape(data_y, (num_samples,1)) return data_x, data_y, data_z
def alternative_model(num_samples,dimension = 1, rho=0.15): data_z = np.reshape(uniform(0,5,num_samples*dimension),(num_samples,dimension)) rr = uniform(0,1, num_samples) idx_rr = np.where(rr < rho) coin_flip_x = np.random.choice([0,1],replace=True,size=num_samples) coin_flip_y = np.random.choice([0,1],replace=True,size=num_samples) coin_flip_y[idx_rr] = coin_flip_x[idx_rr] mean_noise = [0,0] cov_noise = [[1,0],[0,1]] noise_x, noise_y = multivariate_normal(mean_noise, cov_noise, num_samples).T data_x = zeros(num_samples) data_x[coin_flip_x == 0] = 1.7*data_z[coin_flip_x == 0,0] data_x[coin_flip_x == 1] = -1.7*data_z[coin_flip_x == 1,0] data_x = data_x + noise_x data_y = zeros(num_samples) data_y[coin_flip_y == 0] = (data_z[coin_flip_y == 0,0]-2.7)**2 data_y[coin_flip_y == 1] = -(data_z[coin_flip_y == 1,0]-2.7)**2+13 data_y = data_y + noise_y data_x = np.reshape(data_x, (num_samples,1)) data_y = np.reshape(data_y, (num_samples,1)) return data_x, data_y, data_z
def genGroundTruth(numN, numM, numK): ''' ''' arrX = np.zeros((numN, numM)) for ii in range(numM): arrInd = int(0.1 * numN) + npr.choice( range(int(numN - 0.2 * numN)), numK, replace=False) arrX[arrInd, ii] = npr.uniform(1, 2, numK) return arrX ################################################################################ # CALCULATION SECTION ################################################################################
def move(my_history, their_history, my_score, their_score): ''' Arguments accepted: my_history, their_history are strings. my_score, their_score are ints. Make my move. Returns 'c' or 'b'. ''' # my_history: a string with one letter (c or b) per round that has been played with this opponent. # their_history: a string of the same length as history, possibly empty. # The first round between these two players is my_history[0] and their_history[0]. # The most recent round is my_history[-1] and their_history[-1]. # Analyze my_history and their_history and/or my_score and their_score. # Decide whether to return 'c' or 'b'. if random.uniform() < 0.7: return 'c' else: return 'b'
def update(self, index, weight, grad, state): # assert(isinstance(weight, NDArray)) # assert(isinstance(grad, NDArray)) self._update_count(index) lr = self._get_lr(index) wd = self._get_wd(index) # preprocess grad grad *= self.rescale_grad if self.clip_gradient is not None: grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient) grad += wd * weight w_nadam = self._update_nadam(index, weight, grad, state, lr, wd) w_sgd = self._update_sgd(index, weight, grad, state, lr, wd) if uniform(0, 1) < 1.0/3.0: weight[:] += w_nadam * 0.1 else: weight[:] += w_sgd
def __init__(self, sand, colors): self.x = 0 # X position on grid self.y = 0 # Y position on grid self.t = 0 # Direction of travel self.w = WIDTH self.h = HEIGHT self.g = uniform(0.01, 0.1) self.grains = 64 self.xs = SimpleLinearScale(domain=array([0, self.w]), range=array([0, 1])) self.ys = SimpleLinearScale(domain=array([0, self.h]), range=array([0, 1])) self.painter = SandPainter( sand=sand, xs=self.xs, ys=self.ys, colors=colors) self.find_start()
def find_start(self): global cgrid px = 0 py = 0 timeout = 0 found = False while not found: px = randint(self.w) py = randint(self.h) if(cgrid[py * self.w + px] < 10000): found = True if found: a = cgrid[py * self.w + px] if randint(100) < 50: a -= 90 + int(uniform(-2, 2.1)) else: a += 90 + int(uniform(-2, 2.1)) self.start_crack(px, py, a)
def transform(image): #translate, shear, stretch, flips? rows,cols = image.shape angle = random.uniform(-1.5,1.5) center = (rows / 2 - 0.5+random.uniform(-50,50), cols / 2 - 0.5+random.uniform(-50,50)) def_image = tf.rotate(image, angle = angle, center = center,clip = True, preserve_range = True,order = 5) alpha = random.uniform(0,5) sigma = random.exponential(scale = 5)+2+alpha**2 def_image = elastic_transform(def_image, alpha, sigma) def_image = def_image[10:-10,10:-10] return def_image # sigma: variance of filter, fixes homogeneity of transformation # (close to zero : random, big: translation)
def get_random_params(self, num=1): """Generate random sets of model parameters in the default bounds. Samples num values for each model parameter from a uniform distribution between the default bounds. Args: num: (optional) Integer, specifying the number of parameter sets, that will be generated. Default is 1. Returns: A numpy array of the models custom data type, containing the at random generated parameters. """ params = np.zeros(num, dtype=self._dtype) # sample one value for each parameter for param in self._param_list: values = uniform(low=self._default_bounds[param][0], high=self._default_bounds[param][1], size=num) params[param] = values return params
def init_board_gauss(N, k): from numpy import random n = float(N)/k X = [] for i in range(k): c = (random.uniform(-1, 1), random.uniform(-1, 1)) s = random.uniform(0.05, 0.5) x = [] while len(x) < n: a, b = np.array([np.random.normal(c[0], s), np.random.normal(c[1], s)]) # Continue drawing points from the distribution in the range [-1,1] if abs(a) < 1 and abs(b) < 1: x.append([a, b]) X.extend(x) X = np.array(X)[:N] return X
def irl(feature_matrix, n_actions, discount, transition_probability, trajectories, epochs, learning_rate): """ Find the reward function for the given trajectories. feature_matrix: Matrix with the nth row representing the nth state. NumPy array with shape (N, D) where N is the number of states and D is the dimensionality of the state. n_actions: Number of actions A. int. discount: Discount factor of the MDP. float. transition_probability: NumPy array mapping (state_i, action, state_k) to the probability of transitioning from state_i to state_k under action. Shape (N, A, N). trajectories: 3D array of state/action pairs. States are ints, actions are ints. NumPy array with shape (T, L, 2) where T is the number of trajectories and L is the trajectory length. epochs: Number of gradient descent steps. int. learning_rate: Gradient descent learning rate. float. -> Reward vector with shape (N,). """ n_states, d_states = feature_matrix.shape # Initialise weights. alpha = rn.uniform(size=(d_states,)) # Calculate the feature expectations \tilde{phi}. feature_expectations = find_feature_expectations(feature_matrix, trajectories) # Gradient descent on alpha. for i in range(epochs): # print("i: {}".format(i)) r = feature_matrix.dot(alpha) expected_svf = find_expected_svf(n_states, r, n_actions, discount, transition_probability, trajectories) grad = feature_expectations - feature_matrix.T.dot(expected_svf) alpha += learning_rate * grad return feature_matrix.dot(alpha).reshape((n_states,))
def __call__(self, image, boxes=None, labels=None): if random.randint(2): image[:, :, 1] *= random.uniform(self.lower, self.upper) return image, boxes, labels
def __call__(self, image, boxes=None, labels=None): if random.randint(2): image[:, :, 0] += random.uniform(-self.delta, self.delta) image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0 image[:, :, 0][image[:, :, 0] < 0.0] += 360.0 return image, boxes, labels
def __call__(self, image, boxes=None, labels=None): if random.randint(2): alpha = random.uniform(self.lower, self.upper) image *= alpha return image, boxes, labels
def __call__(self, image, boxes=None, labels=None): if random.randint(2): delta = random.uniform(-self.delta, self.delta) image += delta return image, boxes, labels
def init_unif(sz): """ Uniform intialization Heuristic commonly used to initialize deep neural networks """ bnd = 1 / sqrt(sz[0]) p = uniform(low=-bnd, high=bnd, size=sz) return squeeze(p)
def init_nunif(sz): """ Normalized uniform initialization See Glorot X., Bengio Y.: "Understanding the difficulty of training deep feedforward neural networks". AISTATS, 2010 """ bnd = sqrt(6) / sqrt(sz[0] + sz[1]) p = uniform(low=-bnd, high=bnd, size=sz) return squeeze(p)
def test_image_conversion(self): stokes = numpy.array(random.uniform(-1.0, 1.0, [3, 4, 128, 128])) cir = convert_stokes_to_circular(stokes) st = convert_circular_to_stokes(cir) assert_array_almost_equal(st.real, stokes, 15)
def test_image_auto_conversion_circular(self): stokes = numpy.array(random.uniform(-1.0, 1.0, [3, 4, 128, 128])) ipf = PolarisationFrame('stokesIQUV') opf = PolarisationFrame('circular') cir = convert_pol_frame(stokes, ipf, opf) st = convert_pol_frame(cir, opf, ipf) assert_array_almost_equal(st.real, stokes, 15)
def test_image_auto_conversion_linear(self): stokes = numpy.array(random.uniform(-1.0, 1.0, [3, 4, 128, 128])) ipf = PolarisationFrame('stokesIQUV') opf = PolarisationFrame('linear') cir = convert_pol_frame(stokes, ipf, opf) st = convert_pol_frame(cir, opf, ipf) assert_array_almost_equal(st.real, stokes, 15)
def test_image_auto_conversion_I(self): stokes = numpy.array(random.uniform(-1.0, 1.0, [3, 4, 128, 128])) ipf = PolarisationFrame('stokesI') opf = PolarisationFrame('stokesI') cir = convert_pol_frame(stokes, ipf, opf) st = convert_pol_frame(cir, opf, ipf) assert_array_almost_equal(st.real, stokes, 15)
def test_vis_conversion(self): stokes = numpy.array(random.uniform(-1.0, 1.0, [1000, 3, 4])) cir = convert_stokes_to_circular(stokes, polaxis=2) st = convert_circular_to_stokes(cir, polaxis=2) assert_array_almost_equal(st.real, stokes, 15)
def test_vis_auto_conversion_I(self): stokes = numpy.array(random.uniform(-1.0, 1.0, [1000, 3, 1])) ipf = PolarisationFrame('stokesI') opf = PolarisationFrame('stokesI') cir = convert_pol_frame(stokes, ipf, opf, polaxis=2) st = convert_pol_frame(cir, opf, ipf, polaxis=2) assert_array_almost_equal(st.real, stokes, 15)
def test_circular_to_linear(self): stokes = numpy.array(random.uniform(-1.0, 1.0, [3, 4, 128, 128])) ipf = PolarisationFrame('stokesIQUV') opf = PolarisationFrame('circular') cir = convert_pol_frame(stokes, ipf, opf) wrong_pf = PolarisationFrame('linear') with self.assertRaises(ValueError): convert_pol_frame(cir, opf, wrong_pf)
def _clear(self): """ Resets the variables that are altered on a per-run basis of the algorithm :return: None """ self.pos = uniform(self.lower_bound, self.upper_bound, size=(self.swarm_size, self.member_size)) self.vel = uniform(self.lower_bound - self.upper_bound, self.upper_bound - self.lower_bound, size=(self.swarm_size, self.member_size)) self.scores = self._score(self.pos) self.best = copy(self.pos) self.cur_steps = 0 self._global_best()
def seed_links(self): # bias with non-random seed links self.bias_links() if self.link_count() < self.max_links_cached: num_words = max(1,npr.poisson(1.33)+1) # mean of 1.33 words per search if num_words == 1: word = ' '.join(random.sample(self.words,num_words)) else: if npr.uniform() < 0.5: word = ' '.join(random.sample(self.words,num_words)) else: # quote the first two words together word = ' '.join(['"{}"'.format(' '.join(random.sample(self.words, 2))), ' '.join(random.sample(self.words, num_words-2))]) if self.debug: print('Seeding with search for \'{}\'…'.format(word)) self.get_websearch(word)
def diurnal_cycle_test(self): now = dt.datetime.now() tmhr = now.hour + now.minute/60. phase = npr.normal(14.,1.) exponent = min(0.667,self.chi2_mean_std(0.333,0.1)) def cospow(x,e): # flattened cosine with e < 1 c = np.cos(x) return np.sign(c) * np.power(np.abs(c), e) diurn = max(0.,0.5*(1.+cospow((tmhr-phase)*(2.*np.pi/24.),exponent))) flr = min(0.1,self.chi2_mean_std(0.02,0.002)) val = flr + (1.-flr)*diurn return npr.uniform() < val
def pop_link(self,remove_link_fraction=0.95,current_preferred_domain_fraction=0.1): """ Pop a link from the collected list. If `self.current_preferred_domain` is defined, then a link from this domain is drawn a fraction of the time. """ url = None if hasattr(self,'current_preferred_domain') and npr.uniform() < current_preferred_domain_fraction: while url is not None and len(self.domain_links) > 0: # loop until `self.current_preferred_domain` has a url url = self.draw_link_from_domain(self.current_preferred_domain) if url is None: self.current_preferred_domain = self.draw_domain() if url is None: url = self.draw_link() if npr.uniform() < remove_link_fraction: # 95% 1 GET, ~5% 2 GETs, .2% three GETs self.remove_link(url) # pop a random item from the stack return url
def noisify(sound, noise): loudness = random.uniform(0.0, 10.0) start = random.randint(0, int((noise.duration_seconds - sound.duration_seconds) * 1000)) speed = 1 + abs(numpy.random.normal(0.0, 0.1)) noisy = sound.speedup(playback_speed=speed).overlay(noise[start:] + loudness) return noisy # Delta and acceleration
def _init_random_nodes(self): ''' Initialize uniformly random nodes coordinates ''' s = self.n*5 # number of random samples # generate random points self.nodes = np.array( 1.*rnd.uniform(0, 1, size=(s, 2)), dtype=np.float32)
def test_update_data(): ds = fake_random_ds(64, nprocs=8) ds.index dims = (32,32,32) grid_data = [{"temperature": uniform(size=dims)} for i in range(ds.index.num_grids)] ds.index.update_data(grid_data) prj = ds.proj("temperature", 2) prj["temperature"] dd = ds.all_data() profile = create_profile(dd, "density", "temperature", 10) profile["temperature"]