我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用numpy.random.random_integers()。
def corrupt_image(img, MAR_prob=0, min_rects=0, max_rects=0, min_width=0, max_width=0): new_img = img.copy() mask = np.zeros(img.shape[0:2], dtype=np.bool) if MAR_prob > 0: mask[(random_sample(mask.shape) < MAR_prob)] = True if max_rects > 0 and max_width > 0: h, w = mask.shape num_rects = random_integers(min_rects, max_rects) for i in range(num_rects): px1 = random_integers(0, w - min(max(min_width, 1), w)) py1 = random_integers(0, h - min(max(min_width, 1), h)) px2 = px1 + (min_width - 1) + random_integers(0, max(min(w - px1 - min_width, max_width - min_width), 0)); py2 = py1 + (min_width - 1) + random_integers(0, max(min(h - py1 - min_width, max_width - min_width), 0)); if px1 <= px2 and py1 <= py2: mask[py1:py2, px1:px2] = True else: # One of the sides has length 0, so we should remove any pixels4 pass if len(new_img.shape) == 2: new_img[mask] = 0 else: new_img[mask,:] = 0 return (new_img, 1.0 * mask) # Process command line inputs
def corrupt_image(img, MAR_prob=0, min_rects=0, max_rects=0, min_width=0, max_width=0, apply_to_all_channels=False): def generate_channel_mask(): mask = np.zeros(img.shape[0:2], dtype=np.bool) if MAR_prob > 0: mask[(random_sample(mask.shape) < MAR_prob)] = True if max_rects > 0 and max_width > 0: h, w = mask.shape num_rects = random_integers(min_rects, max_rects) for i in range(num_rects): px1 = random_integers(0, w - min(max(min_width, 1), w)) py1 = random_integers(0, h - min(max(min_width, 1), h)) px2 = px1 + min_width + random_integers(0, max(min(w - px1 - min_width, max_width - min_width), 0)); py2 = py1 + min_width + random_integers(0, max(min(h - py1 - min_width, max_width - min_width), 0)); if px1 <= px2 and py1 <= py2: mask[py1:py2, px1:px2] = True else: # One of the sides has length 0, so we should remove any pixels4 pass return mask new_img = img.copy() channels = 1 if len(new_img.shape) == 2 else new_img.shape[-1] global_mask = np.zeros(img.shape, dtype=np.bool) if channels == 1 or apply_to_all_channels: mask = generate_channel_mask() if channels == 1: global_mask[:, :] = mask else: for i in xrange(channels): global_mask[:, :, i] = mask else: global_mask = np.zeros(img.shape, dtype=np.bool) for i in xrange(channels): global_mask[:,:,i] = generate_channel_mask() new_img[global_mask] = 0 return (new_img, 1.0 * global_mask) # Process command line inputs
def SaltAndPepper(src,percetage): NoiseImg=src NoiseNum=int(percetage*src.shape[0]*src.shape[1]) for i in range(NoiseNum): randX=random.random_integers(0,src.shape[0]-1) randY=random.random_integers(0,src.shape[1]-1) if random.random_integers(0,1)==0: NoiseImg[randX,randY]=0 else: NoiseImg[randX,randY]=255 return NoiseImg # read a picture
def RGB_PCA(images): ''' Source: https://github.com/Thrandis/ift6266h15/blob/1cc3fc6164dc6c54936971 935027cd447e2cd81f/dataset_augmentation.py RGB PCA and variations from Alex's paper ''' pixels = images.reshape(-1, images.shape[-1]) idx = np.random.random_integers(0, pixels.shape[0], 1000000) pixels = [pixels[i] for i in idx] pixels = np.array(pixels, dtype=np.uint8).T m = np.mean(pixels)/256. C = np.cov(pixels)/(256.*256.) l, v = np.linalg.eig(C) return l, v, m
def get_median(cls, item_set, axis): """Generates the median of the given item_set, either exactly or approximately (by computing the median of a set of constant size of _median_sample) Args: item_set: list of items whose median is to be computed axis: axis on which to compute the median (0 or 1) Returns: left_item_set, right_item_set, median """ key = cls._axis_keys[axis] if cls.use_approx_median and len(item_set) >= cls._median_sample * 5: # take a random sample of the items of fixed size rand_indices = random.random_integers(0, len(item_set) - 1, size=cls._median_sample) sample_item_set = [item_set[ind] for ind in rand_indices] del rand_indices # get the points whose median is to be computed points = [item[key] for item in sample_item_set] args_sorted = np.argsort(points) median_index = len(points) // 2 median = sample_item_set[args_sorted[median_index]] key_value = median[key] left_item_set = [] right_item_set = [] for item in item_set: if item['id'] != median['id']: if item[key] <= key_value: left_item_set.append(item) else: right_item_set.append(item) return left_item_set, right_item_set, median else: sample_item_set = item_set # get the points whose median is to be computed points = [item[key] for item in sample_item_set] args_sorted = np.argsort(points) median_index = len(points) // 2 left_item_set = [sample_item_set[i] for i in args_sorted[:median_index]] right_item_set = [sample_item_set[i] for i in args_sorted[median_index + 1:]] median = sample_item_set[args_sorted[median_index]] return left_item_set, right_item_set, median # @timecall
def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) chunk = 100 max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('==============================') print('Iteration %03d of %03d' % (it, max_it)) print('==============================') print() data = nr.random_integers(-50, 50, (n_samples, n_features)) print('K-Means') tstart = time() kmeans = KMeans(init='k-means++', n_clusters=10).fit(data) delta = time() - tstart print("Speed: %0.3fs" % delta) print("Inertia: %0.5f" % kmeans.inertia_) print() results['kmeans_speed'].append(delta) results['kmeans_quality'].append(kmeans.inertia_) print('Fast K-Means') # let's prepare the data in small chunks mbkmeans = MiniBatchKMeans(init='k-means++', n_clusters=10, batch_size=chunk) tstart = time() mbkmeans.fit(data) delta = time() - tstart print("Speed: %0.3fs" % delta) print("Inertia: %f" % mbkmeans.inertia_) print() print() results['MiniBatchKMeans Speed'].append(delta) results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_) return results