我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.dstack()。
def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if self.dataset['type'] == 'instances': ax = plt.gca() polygons = [] color = [] for ann in anns: c = np.random.random((1, 3)).tolist()[0] if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly, True,alpha=0.4)) color.append(c) else: # mask mask = COCO.decodeMask(ann['segmentation']) img = np.ones( (mask.shape[0], mask.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, mask*0.5) )) p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4) ax.add_collection(p) if self.dataset['type'] == 'captions': for ann in anns: print ann['caption']
def compute_eng_color(img, rgb_weights): """ Computes the energy of an image using its color properties Args: img4 (n,m,4 numpy matrix): RGB image with additional mask layer. rgb_weights (n,m numpy matrix): img-specific weights for RBG values Returns: n,m numpy matrix: Color energy map of the provided image """ eng = np.dstack(( img[:, :, 0] * rgb_weights[0], img[:, :, 1] * rgb_weights[1], img[:, :, 2] * rgb_weights[2] )) eng = np.sum(eng, axis=2) return eng
def read_naip(file_path, bands_to_use): """ Read in a NAIP, based on www.machinalis.com/blog/python-for-geospatial-data-processing. Bands_to_use is an array like [0,0,0,1], designating whether to use each band (R, G, B, IR). """ raster_dataset = gdal.Open(file_path, gdal.GA_ReadOnly) bands_data = [] index = 0 for b in range(1, raster_dataset.RasterCount + 1): band = raster_dataset.GetRasterBand(b) if bands_to_use[index] == 1: bands_data.append(band.ReadAsArray()) index += 1 bands_data = numpy.dstack(bands_data) return raster_dataset, bands_data
def renderRelighting(renderer, albedo, spec, roughness, normal): renderer.SetPointLight(0, 0.27, -0.25, 1, 0, 0.6, 0.6, 0.6) renderer.SetAlbedoMap(albedo) renderer.SetSpecValue(spec) renderer.SetRoughnessValue(roughness) normal = normal * 2.0 - 1.0 normal[0] = normal[0] * 2.5 len = np.linalg.norm(normal, axis = 2) normal = normal / np.dstack((len, len, len)) normal = 0.5*(normal + 1.0) renderer.SetNormalMap(normal*2.0 - 1.0) img = renderer.Render() renderer.SetEnvLightByID(43, 30, -10.0) renderer.SetAlbedoMap(albedo) renderer.SetSpecValue(spec) renderer.SetRoughnessValue(roughness) renderer.SetNormalMap(normal*2.0 - 1.0) img_1 = renderer.Render() return 1.2 * img + 0.8 * img_1
def new_image(self, image, diag=False): if isinstance(image, str): self.image_file = image self.image = np.array(PIL.Image.open(image)) else: self.image_file = None self.image = image # Get the image into the right format. if self.image.dtype != np.uint8: raise TypeError('Image %s dtype is not unsigned 8 bit integer, image.dtype is %s.'%( '"%s"'%self.image_file if self.image_file is not None else 'argument', self.image.dtype)) self.image = np.squeeze(self.image) if len(self.image.shape) == 2: self.image = np.dstack([self.image] * 3) self.preprocess_edges() self.randomize_view() if diag: plt.figure('Image') plt.title('Image') plt.imshow(self.image, interpolation='nearest') plt.show()
def _fix_alpha_channel(self): # This is a fix for a bug where the Alpha channel was dropped. colors3to4 = [(c[:3], c[3]) for c in self.names.keys()] colors3to4 = dict(colors3to4) assert(len(colors3to4) == len(self.names)) # Dropped alpha channel causes colors to collide :( for lbl in self.labels: if lbl is None: continue # No label file created yet. img = Image.open(lbl) size = img.size img = np.array(img) if img.shape[2] == 4: continue # Image has alpha channel, good. elif img.shape[2] == 3: # Lookup each (partial) color and find what its alpha should be. alpha = np.apply_along_axis(lambda c: colors3to4[tuple(c)], 2, img) data = np.dstack([img, np.array(alpha, dtype=np.uint8)]) new_img = Image.frombuffer("RGBA", size, data, "raw", "RGBA", 0, 1) new_img.save(lbl) print("FIXED", lbl)
def plot_img_with_mask(img,mask,mask2=None, line_size=2): kernel = np.ones((line_size,line_size),dtype=np.uint8) if np.max(img)<=1.0: img = np.array(img*255,dtype=np.uint8); mask = np.array(mask*255, dtype=np.uint8); color_img = np.dstack((img,img,img)); edges = binary_dilation(canny(mask,sigma=1.0),kernel); color_img[edges,0] = 255; color_img[edges,1] = 0; color_img[edges,2] = 0; if mask2 is not None: mask2 = np.array(mask2*255,dtype=np.uint8); edges2 = binary_dilation(canny(mask2,sigma=1.0),kernel); color_img[edges2,2] = 255; color_img[edges2,0:2] = 0; plt.imshow(color_img)
def plot_cost_to_go_mountain_car(env, estimator, num_tiles=20): x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles) y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles) X, Y = np.meshgrid(x, y) Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y])) fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(111, projection='3d') surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0) ax.set_xlabel('Position') ax.set_ylabel('Velocity') ax.set_zlabel('Value') ax.set_title("Mountain \"Cost To Go\" Function") fig.colorbar(surf) plt.show()
def test_encode_data_roundtrip(): minrand, maxrand = np.sort(np.random.randint(-427, 8848, 2)) testdata = np.round((np.sum( np.dstack( np.indices((512, 512), dtype=np.float64)), axis=2) / (511. + 511.)) * maxrand, 2) + minrand baseval = -1000 interval = 0.1 rtripped = _decode(data_to_rgb(testdata.copy(), baseval, interval), baseval, interval) assert testdata.min() == rtripped.min() assert testdata.max() == rtripped.max()
def process_one(image_dir, page_dir, output_dir, basename, colormap, color_labels): image_filename = os.path.join(image_dir, "{}.jpg".format(basename)) page_filename = os.path.join(page_dir, "{}.xml".format(basename)) page = PAGE.parse_file(page_filename) text_lines = [tl for tr in page.text_regions for tl in tr.text_lines] graphic_regions = page.graphic_regions img = imread(image_filename, mode='RGB') gt = np.zeros_like(img[:, :, 0]) mask1 = cv2.fillPoly(gt.copy(), [PAGE.Point.list_to_cv2poly(tl.coords) for tl in text_lines if 'comment' in tl.id], 1) mask2 = cv2.fillPoly(gt.copy(), [PAGE.Point.list_to_cv2poly(tl.coords) for tl in text_lines if not 'comment' in tl.id], 1) mask3 = cv2.fillPoly(gt.copy(), [PAGE.Point.list_to_cv2poly(tl.coords) for tl in graphic_regions], 1) arr = np.dstack([mask1, mask2, mask3]) gt_img = convert_array_masks(arr, colormap, color_labels) save_and_resize(img, os.path.join(output_dir, 'images', '{}.jpg'.format(basename))) save_and_resize(gt_img, os.path.join(output_dir, 'labels', '{}.png'.format(basename)), nearest=True)
def __init__(self, m): k = 4*m + 3 self.degree = k theta = 2*numpy.pi * numpy.arange(1, k+2) / (k+1) p, w = numpy.polynomial.legendre.leggauss(m+1) # scale points to [r0, r1] (where r0 = 0, r1 = 1 for now) p = numpy.sqrt(0.5*(p + 1.0)) p_theta = numpy.dstack(numpy.meshgrid(p, theta)).reshape(-1, 2).T self.points = numpy.column_stack([ p_theta[0] * numpy.cos(p_theta[1]), p_theta[0] * numpy.sin(p_theta[1]), ]) # When integrating between 0 and 1, the weights are exactly the # Gauss-Legendre weights, scaled according to the disk area. self.weights = numpy.tile(0.5 * numpy.pi / (k+1) * w, k+1) return
def _process(self, img, key=None): if self.p.fast: return self._fast_process(img, key) proj = self.p.projection if proj == img.crs: return img x0, x1 = img.range(0) y0, y1 = img.range(1) xn, yn = img.interface.shape(img, gridded=True)[:2] px0, py0, px1, py1 = project_extents((x0, y0, x1, y1), img.crs, proj) src_ext, trgt_ext = (x0, x1, y0, y1), (px0, px1, py0, py1) arrays = [] for vd in img.vdims: arr = img.dimension_values(vd, flat=False) projected, extents = warp_array(arr, proj, img.crs, (xn, yn), src_ext, trgt_ext) arrays.append(projected) projected = np.dstack(arrays) if len(arrays) > 1 else arrays[0] data = np.flipud(projected) bounds = (extents[0], extents[2], extents[1], extents[3]) return img.clone(data, bounds=bounds, kdims=img.kdims, vdims=img.vdims, crs=proj)
def geo_mesh(element): """ Get mesh data from a 2D Element ensuring that if the data is on a cylindrical coordinate system and wraps globally that data actually wraps around. """ if len(element.vdims) > 1: xs, ys = (element.dimension_values(i, False, False) for i in range(2)) zs = np.dstack([element.dimension_values(i, False, False) for i in range(2, 2+len(element.vdims))]) else: xs, ys, zs = (element.dimension_values(i, False, False) for i in range(3)) lon0, lon1 = element.range(0) if isinstance(element.crs, ccrs._CylindricalProjection) and (lon1 - lon0) == 360: xs = np.append(xs, xs[0:1] + 360, axis=0) zs = np.ma.concatenate([zs, zs[:, 0:1]], axis=1) return xs, ys, zs
def test_concat(make_data): """Test concatenation layer.""" x, _, X = make_data # This replicates the input layer behaviour f = ab.InputLayer('X', n_samples=3) g = ab.InputLayer('Y', n_samples=3) catlayer = ab.Concat(f, g) F, KL = catlayer(X=x, Y=x) tc = tf.test.TestCase() with tc.test_session(): forked = F.eval() orig = X.eval() assert forked.shape == orig.shape[0:2] + (2 * orig.shape[2],) assert np.all(forked == np.dstack((orig, orig))) assert KL.eval() == 0.0
def test_100_inputs(self): """Test that 100 input rings work""" def dstack_handler(*args): """Stack all input arrays""" return np.dstack(tuple(args)) number_inputs = 100 connections = {'in_1': 0, 'out_1': 1} for index in range(number_inputs): self.blocks.append([ NumpyBlock(function=np.copy), {'in_1': 0, 'out_1': index + 2}]) connections['in_' + str(index + 2)] = index + 2 self.blocks.append([ NumpyBlock(function=dstack_handler, inputs=len(connections) - 1), connections]) self.expected_result = np.dstack((self.test_array,) * (len(connections) - 1)).ravel()
def find_intersections(A,B): arrayMinimum = lambda x1, x2: np.where(x1<x2, x1, x2) arrayMaximum = lambda x1, x2: np.where(x1>x2, x1, x2) arrayAll = lambda abools: np.dstack(abools).all(axis=2) slope = lambda line: (lambda d: d[:,1]/d[:,0])(np.diff(line, axis=0)) x11, x21 = np.meshgrid(A[:-1, 0], B[:-1, 0]) x12, x22 = np.meshgrid(A[1:, 0], B[1:, 0]) y11, y21 = np.meshgrid(A[:-1, 1], B[:-1, 1]) y12, y22 = np.meshgrid(A[1:, 1], B[1:, 1]) m1, m2 = np.meshgrid(slope(A), slope(B)) # Here we use masked arrays to properly treat the rare case where a line segment is perfectly vertical _m1 = np.ma.masked_array(m1,m1==-np.inf) _m2 = np.ma.masked_array(m2,m2==-np.inf) yi = (_m1*(x21-x11-y21/_m2)+y11)/(1-_m1/_m2) xi = (yi-y21)/_m2+x21 xconds = (arrayMinimum(x11, x12) < xi, xi <= arrayMaximum(x11, x12), arrayMinimum(x21, x22) < xi, xi <= arrayMaximum(x21, x22) ) yconds = (arrayMinimum(y11, y12) < yi, yi <= arrayMaximum(y11, y12), arrayMinimum(y21, y22) < yi, yi <= arrayMaximum(y21, y22) ) return xi[arrayAll(xconds)], yi[arrayAll(yconds)]
def test_iou(): # 3 x 5 x 2 grid = np.dstack(np.meshgrid(10 * np.arange(5), 10 * np.arange(3))) boxes = np.tile( np.expand_dims(np.expand_dims(np.array([10, 10]), 0), 0), [3, 5, 1] ) proposals = np.reshape(np.concatenate([grid, boxes], axis=2), (-1, 4)) proposals = tf.constant(proposals, tf.float32) ground_truth = tf.constant(np.array([ [4, 4, 10, 10], [10, 10, 10, 10] ]), tf.float32) iou_metric = sess.run(model.iou(ground_truth, 2, proposals, 15)) assert equal(iou_metric[0, 0], 0.2195) assert equal(iou_metric[1, 0], 0.1363) assert equal(iou_metric[5, 0], 0.1363) assert equal(iou_metric[6, 0], 0.0869) assert equal(iou_metric[6, 1], 1.0) for (boxes, count) in [(proposals, 15), (ground_truth, 2)]: iou_metric = sess.run(model.iou(boxes, count, boxes, count)) assert np.all(np.diag(iou_metric) == 1)
def _init_random_maze(self): # init goal position goal = np.zeros_like(self._level) while True: row_idx = np.random.randint(0, self._level.shape[0]) col_idx = np.random.randint(0, self._level.shape[1]) if self._level[row_idx, col_idx] == 0: goal[row_idx, col_idx] = 1 self._goal_pos = np.array([row_idx, col_idx]) break # init player position player = np.zeros_like(self._level) while True: row_idx = np.random.randint(0, self._level.shape[0]) col_idx = np.random.randint(0, self._level.shape[1]) if self._level[row_idx, col_idx] == 0 and goal[row_idx, col_idx] == 0: player[row_idx, col_idx] = 1 self._player_pos = np.array([row_idx, col_idx]) break # stack all together in depth (along third axis) self._maze = np.dstack((self._level, goal, player))
def draw_lines(img, lines, color=[255, 0, 0], thickness=2): """ averaging & extrapolating lines points achieved. """ if len(img.shape) == 2: # grayscale image -> make a "color" image out of it img = np.dstack((img, img, img)) for line in lines: for x1, y1, x2, y2 in line: if x1 >= 0 and x1 < img.shape[1] and \ y1 >= 0 and y1 < img.shape[0] and \ x2 >= 0 and x2 < img.shape[1] and \ y2 >= 0 and y2 < img.shape[0]: cv2.line(img, (x1, y1), (x2, y2), color, thickness) else: print('BAD LINE (%d, %d, %d, %d)' % (x1, y1, x2, y2))
def sort_eigensystem(parameters_dict): eigenvectors = np.stack(tensor_spherical_to_cartesian(np.squeeze(parameters_dict['theta']), np.squeeze(parameters_dict['phi']), np.squeeze(parameters_dict['psi'])), axis=0) eigenvalues = np.atleast_2d(np.squeeze(np.dstack([parameters_dict['d'], parameters_dict['dperp0'], parameters_dict['dperp1']]))) ranking = np.atleast_2d(np.squeeze(np.argsort(eigenvalues, axis=1, kind='mergesort')[:, ::-1])) voxels_range = np.arange(ranking.shape[0]) sorted_eigenvalues = np.concatenate([eigenvalues[voxels_range, ranking[:, ind], None] for ind in range(ranking.shape[1])], axis=1) sorted_eigenvectors = np.stack([eigenvectors[ranking[:, ind], voxels_range, :] for ind in range(ranking.shape[1])]) return sorted_eigenvalues, sorted_eigenvectors, ranking
def draw_matches(self, im1, pos1, im2, pos2, matches, filename="matches.jpg"): self._log("drawing matches into '%s'..." % filename) row1, col1 = im1.shape row2, col2 = im2.shape im_out = np.zeros((max(row1, row2), col1+col2, 3), dtype=np.uint8) im_out[:row1, :col1] = np.dstack([im1]*3) im_out[:row2, col1:] = np.dstack([im2]*3) l = len(matches) for ind, (i, j, d) in list(enumerate(matches))[::-1]: d /= para.descr_match_threshold # map to [0, 1] _pos1, _pos2 = pos1[i], pos2[j] color = hsv_to_rgb(int(d * 120 - 120), 1, 1 - d / 3) color = [int(c * 255) for c in color] cv2.line(im_out, (_pos1[1], _pos1[0]), (_pos2[1]+col1, _pos2[0]), color, 1) cv2.imwrite(filename, im_out) ########################## # Utility ##########################
def get_cells_for_tile(self, tile_h, tile_v): """ Returns the list of cells covered by the given modis tile. The tile is identified by its MODIS grid coordinates """ range_x = np.arange(tile_h * self.n_cells_per_tile_x, (tile_h + 1) * self.n_cells_per_tile_x) range_y = np.arange(tile_v * self.n_cells_per_tile_y, (tile_v + 1) * self.n_cells_per_tile_y) cells_ij = np.dstack( np.meshgrid(range_y, range_x, indexing='ij')).reshape(-1, 2) cells = np.ravel_multi_index( (cells_ij[:, 0], cells_ij[:, 1]), (self.n_cells_y, self.n_cells_x) ) # sanity check assert len(cells) == self.n_cells_per_tile_x * self.n_cells_per_tile_y return cells
def test_nonrectangular_add(self): rgba1 = np.ones((64, 1, 4)) z1 = np.expand_dims(np.arange(64.), 1) rgba2 = np.zeros((64, 1, 4)) z2 = np.expand_dims(np.arange(63., -1., -1.), 1) exact_rgba = np.concatenate((np.ones(32), np.zeros(32))) exact_rgba = np.expand_dims(exact_rgba, 1) exact_rgba = np.dstack((exact_rgba, exact_rgba, exact_rgba, exact_rgba)) exact_z = np.concatenate((np.arange(32.), np.arange(31.,-1.,-1.))) exact_z = np.expand_dims(exact_z, 1) buff1 = ZBuffer(rgba1, z1) buff2 = ZBuffer(rgba2, z2) buff = buff1 + buff2 assert_almost_equal(buff.rgba, exact_rgba) assert_almost_equal(buff.z, exact_z)
def log_mat(x, n, g_coeff, c_1, const): with np.errstate(divide='ignore', invalid='ignore'): K = g_coeff.shape[0] - 1 thres = 2 * c_1 * math.log(n) / n [T, X] = np.meshgrid(thres, x) ratio = np.clip(2*X/T - 1, 0, 1) # force MATLAB-esque behavior with NaN, inf ratio[T == 0] = 1.0 ratio[X == 0] = 0.0 q = np.reshape(np.arange(K), [1, 1, K]) g = np.tile(np.reshape(g_coeff, [1, 1, K + 1]), [c_1.shape[1], 1]) g[:, :, 0] = g[:, :, 0] + np.log(thres) MLE = np.log(X) + (1-X) / (2*X*n) MLE[X == 0] = -np.log(n) - const tmp = (n*X[:,:,np.newaxis] - q)/(T[:,:,np.newaxis]*(n - q)) polyApp = np.sum(np.cumprod(np.dstack([np.ones(T.shape + (1,)), tmp]), axis=2) * g, axis=2) polyFail = np.logical_or(np.isnan(polyApp), np.isinf(polyApp)) polyApp[polyFail] = MLE[polyFail] return ratio*MLE + (1-ratio)*polyApp
def getNeighborsSorted(self, id, objType): if id not in self.objectDict: return None distColumn, idColumn = self.kdTrees[objType][1].query(self.get(id).pos, k=self.maxSearch[objType]) indexToID = self.kdTrees[objType][0] for i in range(0, idColumn.size): if distColumn[i] == float('inf'): idColumn = idColumn[:i] distColumn = distColumn[:i] break idColumn[i] = indexToID[idColumn[i]] return np.dstack([idColumn, distColumn])[0]
def MakePaddedSequenceTensorFromListArray( in_arr, pad_value=0., doWhitening=False, maxlen=None, padding = 'pre'): seq_list = numpy.array([]) arr = in_arr if len(in_arr.shape)==1: arr = numpy.array([ in_arr ]) for i in range( arr.shape[1] ): current = convertSequencesFromListArray( arr[:,i], dopad=True, doWhitening=doWhitening, maxlen=maxlen, padding = padding ) if len(seq_list)==0: seq_list = current else: seq_list = numpy.dstack((seq_list, current) ) return seq_list
def MakePaddedSequenceTensor( filename_list, doWhitening=False, maxlen=None): seq_list = numpy.array([]) for fn in filename_list: current = convertSequences( fn, dopad=True, doWhitening= doWhitening, maxlen=maxlen) if len(seq_list)==0: seq_list = current else: seq_list = numpy.dstack((seq_list, current) ) return seq_list ################################################################################################### # maniplation functions ###################################################################################################
def ensurebuf(self, invalidate=True): if self.dbuf is None: if self.dpil is not None: self.dbuf = self.dpil.tostring("raw", "RGBX", 0, 1) elif self.darr is not None: data = self.scaledpixelarray(0,255.999) self.dbuf = np.dstack(( np.flipud(np.rollaxis(data,1)).astype(np.uint8), np.zeros(self.shape[::-1],np.uint8) )).tostring() else: raise ValueError("No source data for conversion to buffer") if invalidate: self.dpil = None self.darr = None self.rangearr = None ## This private function ensures that there is a valid numpy array representation, converting from # one of the other representations if necessary, and invalidating the other representations if requested.
def asarray(self, axis=3): """ This function ... :return: """ # Get a list that contains the frames frame_list = self.frames.as_list() # Stack the frames into a 3D numpy array if axis == 3: return np.dstack(frame_list) elif axis == 2: return np.hstack(frame_list) elif axis == 1: return np.vstack(frame_list) elif axis == 0: return np.stack(frame_list) else: raise ValueError("'axis' parameter should be integer 0-3") # -----------------------------------------------------------------
def flowList(xFileNames, yFileNames): ''' (x/y)fileNames: List of the fileNames in order to get the flows from ''' frameList = [] if (len(xFileNames) != len(yFileNames)): print 'XFILE!=YFILE ERROR: In', xFileNames[0] for i in range(0, min(len(xFileNames), len(yFileNames))): imgX = io.imread(xFileNames[i]) imgY = io.imread(yFileNames[i]) frameList.append(np.dstack((imgX, imgY))) frameList = np.array(frameList) return frameList
def vstack(tup): """Stacks arrays vertically. If an input array has one dimension, then the array is treated as a horizontal vector and stacked along the additional axis at the head. Otherwise, the array is stacked along the first axis. Args: tup (sequence of arrays): Arrays to be stacked. Each array is converted by :func:`cupy.atleast_2d` before stacking. Returns: cupy.ndarray: Stacked array. .. seealso:: :func:`numpy.dstack` """ return concatenate(cupy.atleast_2d(*tup), 0)
def disp_to_flowfile(disp, filename): """ Read KITTI disparity file in png format :param disp: disparity matrix :param filename: the flow file name to save :return: None """ f = open(filename, 'wb') magic = np.array([202021.25], dtype=np.float32) (height, width) = disp.shape[0:2] w = np.array([width], dtype=np.int32) h = np.array([height], dtype=np.int32) empty_map = np.zeros((height, width), dtype=np.float32) data = np.dstack((disp, empty_map)) magic.tofile(f) w.tofile(f) h.tofile(f) data.tofile(f) f.close()
def _gen_centroids(): a = np.arange(SSIZE/18, SSIZE, SSIZE/9) x, y = np.meshgrid(a, a) return np.dstack((y, x)).reshape((81, 2))
def build_data_auto_encoder(data, step, win_size): count = data.shape[1] / float(step) docX = np.zeros((count, 3, win_size)) for i in range(0, data.shape[1] - win_size, step): c = i / step docX[c][0] = np.abs(data[0, i:i + win_size] - data[1, i:i + win_size]) docX[c][1] = np.power(data[0, i:i + win_size] - data[1, i:i + win_size], 2) docX[c][2] = np.pad( (data[0, i:i + win_size - 1] - data[0, i + 1:i + win_size]) * (data[1, i:i + win_size - 1] - data[1, i + 1:i + win_size]), (0, 1), 'constant', constant_values=0) data = np.dstack((docX[:, 0], docX[:, 1], docX[:, 2])).reshape(docX.shape[0], docX.shape[1]*docX.shape[2]) return data
def test_weighted_average(self): """ Test results of weighted average against numpy.average """ stream = [np.random.random(size = (16,16)) for _ in range(5)] with self.subTest('float weights'): weights = [random() for _ in stream] from_iaverage = last(iaverage(stream, weights = weights)) from_numpy = np.average(np.dstack(stream), axis = 2, weights = np.array(weights)) self.assertTrue(np.allclose(from_iaverage, from_numpy)) with self.subTest('array weights'): weights = [np.random.random(size = stream[0].shape) for _ in stream] from_iaverage = last(iaverage(stream, weights = weights)) from_numpy = np.average(np.dstack(stream), axis = 2, weights = np.dstack(weights)) self.assertTrue(np.allclose(from_iaverage, from_numpy))
def test_ignore_nan(self): """ Test that NaNs are handled correctly """ stream = [np.random.random(size = (16,12)) for _ in range(5)] for s in stream: s[randint(0, 15), randint(0,11)] = np.nan with catch_warnings(): simplefilter('ignore') from_iaverage = last(iaverage(stream, ignore_nan = True)) from_numpy = np.nanmean(np.dstack(stream), axis = 2) self.assertTrue(np.allclose(from_iaverage, from_numpy))
def test_avg_no_weights(self): stream = [np.random.random(size = (16,16)) for _ in range(5)] from_caverage = caverage(stream) from_numpy = np.average(np.dstack(stream), axis = 2) self.assertTrue(np.allclose(from_caverage, from_numpy))
def test_weighted_average(self): """ Test results of weighted average against numpy.average """ stream = [np.random.random(size = (16,16)) for _ in range(5)] weights = [np.random.random(size = stream[0].shape) for _ in stream] from_caverage = caverage(stream, weights = weights) from_numpy = np.average(np.dstack(stream), axis = 2, weights = np.dstack(weights)) self.assertTrue(np.allclose(from_caverage, from_numpy))
def test_mean_random(self): """ Test cmean against numpy.mean on random data """ stream = [np.random.random(size = (16,16)) for _ in range(5)] from_cmean = cmean(stream) from_numpy = np.mean(np.dstack(stream), axis = 2) self.assertTrue(np.allclose(from_cmean, from_numpy))
def test_against_numpy(self): """ Test that iprod() returns the same as numpy.prod() for various axis inputs """ stream = [np.random.random((16,16)) for _ in range(10)] stack = np.dstack(stream) for axis in (0, 1, 2, None): with self.subTest('axis = {}'.format(axis)): from_numpy = np.prod(stack, axis = axis) from_stream = last(iprod(stream, axis = axis)) self.assertTrue(np.allclose(from_stream, from_numpy))
def iterate_cifar(shapeInput, batch_size, shuffle=False, train=True): # iterator over patches of the cifar10 data set. files = [] if train: for j in range(1, 6): files.append('data_batch_'+str(j)) else: for j in range(1, 6): files.append('test_batch') data_idxs = np.random.permutation(len(files)) data = [] labels = [] for j in range(len(files)): data_idx = j if shuffle: data_idx = data_idxs[j] file = files[data_idx] dict = unpickle('C:\\Paul\\cifar-10-batches-py\\'+file) ls = dict['labels'] idxs = np.random.permutation(len(dict['data'])) for i in range(len(dict['data'])): if shuffle: idx = idxs[i] else: idx = i stackedArray = np.dstack((dict['data'][idx][0:1024].reshape(32, 32), dict['data'][idx][1024:1024 * 2].reshape(32,32), dict['data'][idx][1024 * 2:1024 * 3].reshape(32, 32))) patches = image.extract_patches_2d(stackedArray, (shapeInput[0], shapeInput[1]), max_patches=1) #max = patches.max()+1.e-6 patches = patches.astype(np.float32) / 256.0 data.append(patches) labels.append(ls[idx]) if len(data)>=batch_size: array = np.asarray(data).reshape(-1, shapeInput[0]*shapeInput[1]*3) data = [] labels = [] #print(len(dict['data'])*len(files)*patches.shape[0]) yield array
def test_lab_full_gamut(self): a, b = np.meshgrid(np.arange(-100, 100), np.arange(-100, 100)) L = np.ones(a.shape) lab = np.dstack((L, a, b)) for value in [0, 10, 20]: lab[:, :, 0] = value with expected_warnings(['Color data out of range']): lab2xyz(lab)
def test_sun_rgbd(): from pybot.vision.image_utils import to_color from pybot.vision.imshow_utils import imshow_cv from pybot.utils.io_utils import write_video from pybot.vision.color_utils import colormap directory = '/media/HD1/data/SUNRGBD/' dataset = SUNRGBDDataset(directory) colors = cv2.imread('data/sun3d/sun.png').astype(np.uint8) for (rgb, depth, label) in dataset.segmentationdb(None): cout = np.dstack([label, label, label]) colored = cv2.LUT(cout, colors) cdepth = colormap(depth / 64000.0) for j in range(5): write_video('xtion.avi', np.hstack([rgb, cdepth, colored])) # for f in dataset.iteritems(every_k_frames=5): # # vis = rgbd_data_uw.annotate(f) # imshow_cv('frame', f.img, text='Image') # imshow_cv('depth', (f.depth / 16).astype(np.uint8), text='Depth') # imshow_cv('instance', (f.instance).astype(np.uint8), text='Instance') # imshow_cv('label', (f.label).astype(np.uint8), text='Label') # cv2.waitKey(100) return dataset
def valid_pixels(im, valid): """ Determine valid pixel (x,y) coords for the image """ if valid.dtype != np.bool: raise ValueError('valid_pixels requires boolean image') assert(im.shape == valid.shape) H,W = valid.shape[:2] xs, ys = np.meshgrid(np.arange(W), np.arange(H)) return np.dstack([xs[valid], ys[valid], im[valid]]).reshape(-1,3)
def reconstruct(self, depth): s = self.skip depth_sampled = depth[::s,::s] assert(depth_sampled.shape == self.xs.shape) return np.dstack([self.xs * depth_sampled, self.ys * depth_sampled, depth_sampled])
def dense_optical_flow(im1, im2, pyr_scale=0.5, levels=3, winsize=5, iterations=3, poly_n=5, poly_sigma=1.2, fb_threshold=-1, mask1=None, mask2=None, flow1=None, flow2=None): if flow1 is None: fflow = cv2.calcOpticalFlowFarneback(to_gray(im1), to_gray(im2), pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, 0) else: fflow = cv2.calcOpticalFlowFarneback(to_gray(im1), to_gray(im2), pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, 0, flow1.copy()) if mask1 is not None: fflow[~mask1.astype(np.bool)] = np.nan if fb_threshold > 0: H, W = im1.shape[:2] xs, ys = np.meshgrid(np.arange(W), np.arange(H)) xys1 = np.dstack([xs, ys]) xys2 = xys1 + fflow rflow = dense_optical_flow(im2, im1, pyr_scale=pyr_scale, levels=levels, winsize=winsize, iterations=iterations, poly_n=poly_n, poly_sigma=poly_sigma, fb_threshold=-1) if mask2 is not None: rflow[~mask2.astype(np.bool)] = np.nan xys1r = xys2 + rflow fb_bad = (np.fabs(xys1r - xys1) > fb_threshold).all(axis=2) fflow[fb_bad] = np.nan return fflow