我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用skimage.draw.polygon()。
def get_labels(contours, shape, slices): z = [np.around(s.ImagePositionPatient[2], 1) for s in slices] pos_r = slices[0].ImagePositionPatient[1] spacing_r = slices[0].PixelSpacing[1] pos_c = slices[0].ImagePositionPatient[0] spacing_c = slices[0].PixelSpacing[0] label_map = np.zeros(shape, dtype=np.float32) for con in contours: num = ROI_ORDER.index(con['name']) + 1 for c in con['contours']: nodes = np.array(c).reshape((-1, 3)) assert np.amax(np.abs(np.diff(nodes[:, 2]))) == 0 z_index = z.index(np.around(nodes[0, 2], 1)) r = (nodes[:, 1] - pos_r) / spacing_r c = (nodes[:, 0] - pos_c) / spacing_c rr, cc = polygon(r, c) label_map[z_index, rr, cc] = num return label_map
def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if self.dataset['type'] == 'instances': ax = plt.gca() polygons = [] color = [] for ann in anns: c = np.random.random((1, 3)).tolist()[0] if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly, True,alpha=0.4)) color.append(c) else: # mask mask = COCO.decodeMask(ann['segmentation']) img = np.ones( (mask.shape[0], mask.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, mask*0.5) )) p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4) ax.add_collection(p) if self.dataset['type'] == 'captions': for ann in anns: print ann['caption']
def annToRLE(self, ann): """ Convert annotation which can be polygons, uncompressed RLE to RLE. :return: binary mask (numpy 2D array) """ t = self.imgs[ann['image_id']] h, w = t['height'], t['width'] segm = ann['segmentation'] if type(segm) == list: # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = mask.frPyObjects(segm, h, w) rle = mask.merge(rles) elif type(segm['counts']) == list: # uncompressed RLE rle = mask.frPyObjects(segm, h, w) else: # rle rle = ann['segmentation'] return rle
def create_polygon_mask( image, xcorners, ycorners ): ''' Give image and x/y coners to create a polygon mask image: 2d array xcorners, list, points of x coners ycorners, list, points of y coners Return: the polygon mask: 2d array, the polygon pixels with values 1 and others with 0 Example: ''' from skimage.draw import line_aa, line, polygon, circle imy, imx = image.shape bst_mask = np.zeros_like( image , dtype = bool) rr, cc = polygon( ycorners,xcorners) bst_mask[rr,cc] =1 #full_mask= ~bst_mask return bst_mask
def create_rectangle_mask( image, xcorners, ycorners ): ''' Give image and x/y coners to create a rectangle mask image: 2d array xcorners, list, points of x coners ycorners, list, points of y coners Return: the polygon mask: 2d array, the polygon pixels with values 1 and others with 0 Example: ''' from skimage.draw import line_aa, line, polygon, circle imy, imx = image.shape bst_mask = np.zeros_like( image , dtype = bool) rr, cc = polygon( ycorners,xcorners) bst_mask[rr,cc] =1 #full_mask= ~bst_mask return bst_mask
def nf_read_roi_zip(fname,dims): import zipfile with zipfile.ZipFile(fname) as zf: coords = [nf_read_roi(zf.open(n)) for n in zf.namelist()] def tomask(coords): mask = np.zeros(dims) coords=np.array(coords) # rr, cc = polygon(coords[:,0]+1, coords[:,1]+1) rr, cc = polygon(coords[:,0]+1, coords[:,1]+1) mask[rr,cc]=1 # mask[zip(*coords)] = 1 return mask masks = np.array([tomask(s-1) for s in coords]) return masks #%%
def segToMask( S, h, w ): """ Convert polygon segmentation to binary mask. :param S (float array) : polygon segmentation mask :param h (int) : target mask height :param w (int) : target mask width :return: M (bool 2D array) : binary mask """ M = np.zeros((h,w), dtype=np.bool) for s in S: N = len(s) rr, cc = polygon(np.array(s[1:N:2]), np.array(s[0:N:2])) # (y, x) M[rr, cc] = 1 return M
def cell_rect(self, x, y, areaf=1): padx = self.cellw * areaf pady = self.cellh * areaf fromx = round(x * self.cellw + padx) fromy = round(y * self.cellh + pady) tox = round(fromx + self.cellw - 2*padx) toy = round(fromy + self.cellh - 2*pady) r = np.array([fromy, fromy, toy, toy]) c = np.array([fromx, tox, tox, fromx]) return polygon(r, c)
def segToMask( S, h, w ): """ Convert polygon segmentation to binary mask. :param S (float array) : polygon segmentation mask :param h (int) : target mask height :param w (int) : target mask width :return: M (bool 2D array) : binary mask """ M = np.zeros((h,w), dtype=np.bool) for s in S: N = len(s) rr, cc = polygon(np.array(s[1:N:2]).clip(max=h-1), \ np.array(s[0:N:2]).clip(max=w-1)) # (y, x) M[rr, cc] = 1 return M
def create_scribble_mask(x, y, frame, pct_area_shrink=0.1, pct_area_grow=0.1, winsz=3): """ Arguments: x = x-coordinates of bbox y = y-coordinates of bbox frame = image to create scribble mask for pct_area_shrink = fraction of bbox area to shrink by: new area = old area * (1-pct_area_shrink) pct_area_grow = fraction of bbox area to expand by: new area = old area * pct_area_grow winsz = window size of the alpha matting algorithm Output: scribble_mask = mask for pixels. Contains 1 for definite foreground, -1 for definite background, and 0 for unknown pixels. """ ih, iw = frame.shape[:2] # expand bbox by 'pct_area_grow' area xe, ye = expand_region(x, y, pct_area_grow, ih, iw, winsz) # shrink bbox by 'pct_area_shrink' area xs, ys = expand_region(x, y, 1-pct_area_shrink, ih, iw, winsz) # pixel indices (row, column) for expanded/contracted bboxes re, ce = polygon(xe, ye) # pixels for expanded polygon rs, cs = polygon(xs, ys) # pixels for contracted polygon # mask for outside of expanded bbox expanded_mask = np.zeros((ih, iw), dtype='bool') expanded_mask[ce, re] = True # mark inside expanded bbox expanded_mask = ~expanded_mask # invert to get outside # final scribble mask with -1 for outside expanded bbox, 1 inside, 0 otherwise scribble_mask = np.zeros((ih, iw), dtype='int') scribble_mask.flat[expanded_mask.ravel()] = -1 scribble_mask[cs, rs] = 1 return scribble_mask
def mask_coco2voc(coco_masks, im_height, im_width): voc_masks = np.zeros((len(coco_masks), im_height, im_width)) for i, ann in enumerate(coco_masks): if type(ann) == list: # polygon m = segToMask(ann, im_height, im_width) else: # rle m = decodeMask(ann) voc_masks[i,:,:]=m; return voc_masks
def create_box( cx, cy, wx, wy, roi_mask): Nmax = np.max( np.unique( roi_mask ) ) for i, [cx_,cy_] in enumerate(list( zip( cx,cy ))): #create boxes x = np.array( [ cx_-wx, cx_+wx, cx_+wx, cx_-wx]) y = np.array( [ cy_-wy, cy_-wy, cy_+wy, cy_+wy]) rr, cc = polygon( y,x) roi_mask[rr,cc] = i +1 + Nmax return roi_mask
def create_wedge( image, center, radius, wcors, acute_angle=True) : '''YG develop at June 18, 2017, @CHX Create a wedge by a combination of circle and a triangle defined by center and wcors wcors: [ [x1,x2,x3...], [y1,y2,y3..] ''' from skimage.draw import line_aa, line, polygon, circle imy, imx = image.shape cy,cx = center x = [cx] + list(wcors[0]) y = [cy] + list(wcors[1]) maskc = np.zeros_like( image , dtype = bool) rr, cc = circle( cy, cx, radius, shape = image.shape) maskc[rr,cc] =1 maskp = np.zeros_like( image , dtype = bool) x = np.array( x ) y = np.array( y ) print(x,y) rr, cc = polygon( y,x) maskp[rr,cc] =1 if acute_angle: return maskc*maskp else: return maskc*~maskp
def pixels(_, shape): return draw.polygon([_.x0, _.x0, _.x1, _.x1], [_.y0, _.y1, _.y1, _.y0], shape)
def fill_mask(mask, polygon_array, value, W, H, Xmax, Ymin): po = np.array(polygon_array) po[:, 0] = po[:, 0] / Xmax * W * W / (W + 1) po[:, 1] = po[:, 1] / Ymin * H * H / (H + 1) rr, cc = polygon(po[:, 1], po[:, 0], shape=mask.shape) mask[rr, cc] = value return mask
def gen_mask_image(multi, img, W, H, Xmax, Ymin): mask = np.zeros(img.shape, dtype='float') if type(multi) == Polygon: # Polygon mask = fill_polygon(multi, mask, W, H, Xmax, Ymin) else: # Multi polygon for poly in multi: mask = fill_polygon(poly, mask, W, H, Xmax, Ymin) return mask
def poly_line(self, coordinates, line_width, seg_noise = 0): #Note that we subtract generation offsets from the curve coordinates before calculating the line segment locations x,y = bezier_curve(coordinates[ 0, : ] - self.cropsize[0], coordinates[1, :] - self.cropsize[1], self.n_segments) true_line = np.array([x, y]) #Add some noise to the line so it's harder to over fit noise_line = true_line + seg_noise * np.random.randn(2, true_line.shape[1]) #Create the virtual point path needed to give the line width when drawn by polygon: polygon_path = np.zeros( (true_line.shape[0], 2 * true_line.shape[1] + 1) , dtype=float) #Now we offset the noisy line perpendicularly by the line width to give it depth (rhs) polygon_path[:, 1:(true_line.shape[1]-1) ] = (noise_line[:,1:true_line.shape[1]-1] + line_width * np.transpose(self.perpendicular( np.transpose(noise_line[:,2:] - noise_line[:, :noise_line.shape[1]-2]) ) ) ) #Same code but subtracting width and reverse order to produce the lhs of the line polygon_path[:, (2*true_line.shape[1]-2):(true_line.shape[1]) :-1 ] = (noise_line[:,1:true_line.shape[1]-1] - line_width * np.transpose(self.perpendicular( np.transpose(noise_line[:,2:] - noise_line[:, :noise_line.shape[1]-2]) ) ) ) #These points determine the bottom end of the line: polygon_path[:, true_line.shape[1]-1] = noise_line[:, true_line.shape[1]-1] - [line_width, 0] polygon_path[:, true_line.shape[1] ] = noise_line[:, true_line.shape[1]-1] + [line_width, 0] #Now we set the start and endpoints (they must be the same!) polygon_path[:, 0] = noise_line[:, 0] - [line_width, 0] polygon_path[:, 2*true_line.shape[1] -1] = noise_line[:, 0] + [line_width, 0] #This is the last unique point polygon_path[:, 2*true_line.shape[1] ] = noise_line[:, 0] - [line_width, 0] #Actually draw the polygon rr, cc = polygon((polygon_path.astype(int)[1]), polygon_path.astype(int)[0], ( self.view_res[1], self.view_res[0]) ) return rr, cc # Draws dashed lines like the one in the center of the road # FIXME add noise to the dashed line generator to cut down on over-fitting(may be superfluous)
def dashed_line(self, coordinates, dash_length, dash_width ): #estimate the curve length to generate a segment count which will approximate the desired dash lenght est_curve_len = (self.vector_len(coordinates[:,2] - coordinates[:,0] ) + self.vector_len(coordinates[:,1] - coordinates[:,0] ) + self.vector_len(coordinates[:,2] - coordinates[:,1] ) )/2 segments = int(est_curve_len/dash_length) x, y = bezier_curve(coordinates[0, :] - self.cropsize[0], coordinates[1, :] - self.cropsize[1], segments) dash_line = np.array([x, y]) #initializing empty indices rrr = np.empty(0, dtype=int) ccc = np.empty(0, dtype=int) for dash in range( int(segments/2) ): offset = .5*dash_width * self.perpendicular(dash_line[:,dash*2]-dash_line[:,dash*2+1]) d_path = np.array( [ dash_line[:,dash*2] + offset, dash_line[:,dash*2 +1] + offset, dash_line[:,dash*2+1] - offset, dash_line[:,dash*2 ] - offset, dash_line[:,dash*2] + offset] ) rr, cc = polygon(d_path.astype(int)[:,1], d_path.astype(int)[:,0], (self.view_res[1], self.view_res[0]) ) rrr = np.append(rrr, rr) ccc = np.append(ccc, cc) return rrr, ccc #Makes randomly shaped polygon noise to screw with the learning algorithm
def poly_noise(self, origin, max_size=[128,24], max_verticies=10): vert_count = np.random.randint(3,max_verticies) verts = np.matmul(np.ones([vert_count+1, 1]), [origin] ) verts[1:vert_count, 0] = origin[ 0] + np.random.randint(0, max_size[0], vert_count -1) verts[1:vert_count, 1] = origin[ 1] + np.random.randint(0, max_size[1], vert_count -1) return polygon(verts[:,1], verts[:,0], (self.view_res[1], self.view_res[0]) ) #converts coordinates into images with curves on them
def dashed_line(self, coordinates, dash_length, dash_width ): #estimate the curve length to generate a segment count which will approximate the # desired dash length est_curve_len = (self.vector_len(coordinates[:,2] - coordinates[:,0] ) + self.vector_len(coordinates[:,1] - coordinates[:,0] ) + self.vector_len(coordinates[:,2] - coordinates[:,1] ) )/2 segments = int(est_curve_len/dash_length) if self.debug: print(est_curve_len) print(segments) x, y = bezier_curve(coordinates[0, :], coordinates[1, :], segments) dash_line = np.array([x, y]) if self.debug: print('dashed line center coordinates') print(dash_line[:,:self.debug]) #initializing empty indices rrr = np.empty(0, dtype=int) ccc = np.empty(0, dtype=int) for dash in range( int(segments/2) ): offset = .5*dash_width * self.perpendicular(dash_line[:,dash*2]-dash_line[:,dash*2+1]) d_path = np.array( [ dash_line[:,dash*2] + offset, dash_line[:,dash*2 +1] + offset, dash_line[:,dash*2 +1] - offset, dash_line[:,dash*2] - offset, dash_line[:,dash*2] + offset] ) dd_path = self.xz_to_xy(np.array([d_path[:, 0], d_path[:, 1] ]) ) rr, cc = polygon(dd_path.astype(int)[1], dd_path.astype(int)[0], (self.view_res[0], self.view_res[1]) ) rrr = np.append(rrr, rr) ccc = np.append(ccc, cc) return rrr, ccc #Makes randomly shaped polygon noise to screw with the learning algorithm
def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' if datasetType == 'instances': ax = plt.gca() polygons = [] color = [] for ann in anns: c = np.random.random((1, 3)).tolist()[0] if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly, True,alpha=0.4)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print ann['caption']
def superpixel_image(image, bbox, Nsp_min=100, Nsp_max=500, Nsp_npx=50): """ Superpixels an image using slic0 and labels them 1 if they are 100% inside the bounding box, 0 otherwise. Arguments: image = MxNxD bbox = Nsp_npx = number of pixels per superpixel (on avg) we're aiming for Nsp_min = (approx) min number of superpixels in cropped region Nsp_max = (approx) max number of superpixels in cropped region Output: segments = MxN label image where each pixel's value represents the superpixel it belongs to. sp_label = boolean vector containing segments.max()+1 entries corresponding to the label of the superpixel, with 1 indicating 100% inside the bounding box, 0 otherwise. """ bbox_aa = bbox_to_axis_aligned_bbox(bbox) # calculate number of superpixels to aim for n_sp = np.rint(bbox_aa[2] * bbox_aa[3] / Nsp_npx).astype('int') n_sp = np.max([Nsp_min, np.min([Nsp_max, n_sp])]) # segment the image using SLIC0 segments = slic(image, n_segments=n_sp, slic_zero=True, enforce_connectivity=True) n_sp = segments.max()+1 # actual number of superpixels # create mask for outside of bounding box bbox_mask = np.zeros(image.shape[:2], dtype='bool') x, y = polygon(bbox[::2], bbox[1::2]) bbox_mask[y, x] = True # label superpixels - 0 = 100% outside bbox, 1 = some overlap with bbox sp_label = np.zeros((n_sp), dtype='bool') for n in range(n_sp): # label n'th sp as inside bbox if any of its pixels overlap the bbox if np.any((segments == n) & bbox_mask): sp_label[n] = True return segments, sp_label
def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' if datasetType == 'instances': ax = plt.gca() polygons = [] color = [] for ann in anns: c = np.random.random((1, 3)).tolist()[0] if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly, True,alpha=0.4)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print(ann['caption'])
def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' if datasetType == 'instances': ax = plt.gca() polygons = [] color = [] for ann in anns: c = np.random.random((1, 3)).tolist()[0] if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly, True,alpha=0.4)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4) ax.add_collection(p) elif datasetType == 'captions': n=0 cap= [None] * 5 for ann in anns: #print ann['caption'] if n<5: cap[n]=ann['caption'] #print cap[n] n = n + 1 print n print cap return cap
def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk]>0): plt.plot(x[sk],y[sk], linewidth=3, color=c) plt.plot(x[v==1], y[v==1],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) plt.plot(x[v==2], y[v==2],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print ann['caption']