我们从Python开源项目中,提取了以下39个代码示例,用于说明如何使用cv2.fillConvexPoly()。
def classify(img): cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img2 = cv2.medianBlur(cimg, 13) ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY) t2 = copy.copy(thresh1) x, y = thresh1.shape arr = np.zeros((x, y, 3), np.uint8) final_contours = [] image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #cv2.imshow('image', image) #k = cv2.waitKey(0) for i in range(len(contours)): cnt = contours[i] if cv2.contourArea(cnt) > 3000 and cv2.contourArea(cnt) < 25000: cv2.drawContours(img, [cnt], -1, [0, 255, 255]) cv2.fillConvexPoly(arr, cnt, [255, 255, 255]) final_contours.append(cnt) #cv2.imshow('arr', arr) #k = cv2.waitKey(0) return arr
def fill(img, points): filler = cv2.convexHull(points) cv2.fillConvexPoly(img, filler, 255) return img
def patSiemensStar(s0, n=72, vhigh=255, vlow=0, antiasing=False): '''make line pattern''' arr = np.full((s0,s0),vlow, dtype=np.uint8) c = int(round(s0/2.)) s = 2*np.pi/(2*n) step = 0 for i in range(2*n): p0 = round(c+np.sin(step)*2*s0) p1 = round(c+np.cos(step)*2*s0) step += s p2 = round(c+np.sin(step)*2*s0) p3 = round(c+np.cos(step)*2*s0) pts = np.array(((c,c), (p0,p1), (p2,p3) ), dtype=int) cv2.fillConvexPoly(arr, pts, color=vhigh if i%2 else vlow, lineType=cv2.LINE_AA if antiasing else 0) arr[c,c]=0 return arr.astype(float)
def classify(img): cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img2 = cv2.medianBlur(cimg, 13) ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY) t2 = copy.copy(thresh1) x, y = thresh1.shape arr = np.zeros((x, y, 3), np.uint8) final_contours = [] image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #cv2.imshow('image', image) #k = cv2.waitKey(0) for i in range(len(contours)): cnt = contours[i] if cv2.contourArea(cnt) > 35000 and cv2.contourArea(cnt) < 15000: cv2.drawContours(img, [cnt], -1, [0, 255, 255]) cv2.fillConvexPoly(arr, cnt, [255, 255, 255]) final_contours.append(cnt) cv2.imshow('arr', arr) k = cv2.waitKey(0) return arr
def classify(img): cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img2 = cv2.medianBlur(cimg, 13) ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY) t2 = copy.copy(thresh1) x, y = thresh1.shape arr = np.zeros((x, y, 3), np.uint8) final_contours = [] image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #cv2.imshow('image', image) #k = cv2.waitKey(0) for i in range(len(contours)): cnt = contours[i] if cv2.contourArea(cnt) > 3600 and cv2.contourArea(cnt) < 25000: cv2.drawContours(img, [cnt], -1, [0, 255, 255]) cv2.fillConvexPoly(arr, cnt, [255, 255, 255]) final_contours.append(cnt) cv2.imshow('arr', arr) k = cv2.waitKey(0) return arr
def update_colours(self): if self.image is None: return None (facets, centers) = self.subdiv.getVoronoiFacetList([]) img = np.zeros(self.image.shape, dtype=self.image.dtype) # color image for i in range(len(facets)): ifacet = np.array([f for f in facets[i]], np.int) color = self.image[int(centers[i][1])][int(centers[i][0])].tolist() cv2.fillConvexPoly(img, ifacet, color) # update image data self.im.set_data(np.flipud(np.fliplr(img))) # draw image self.ax.draw_artist(self.im) self.fig.canvas.blit(self.ax.bbox) self.fig.canvas.flush_events()
def partial_blur(img, points, kenel_size = 9, type = 1): """ Partial Gaussian blur within convex hull of points. Args: type = 0 for Gaussian blur type = 1 for average blur """ points = cv2.convexHull(points) copy_img = img.copy() black = (0, 0, 0) if type: cv2.blur(img, (kenel_size, kenel_size)) else: cv2.GaussianBlur(img, (kenel_size, kenel_size), 0) cv2.fillConvexPoly(copy_img, points, color = black) for row in range(img.shape[:2][0]): for col in range(img.shape[:2][1]): if numpy.array_equal(copy_img[row][col], black): copy_img[row][col] = blur_img[row][col] return copy_img
def get_face_mask(img, img_l): img = np.zeros(img.shape[:2], dtype = np.float64) for idx in OVERLAY_POINTS_IDX: cv2.fillConvexPoly(img, cv2.convexHull(img_l[idx]), color = 1) img = np.array([img, img, img]).transpose((1, 2, 0)) img = (cv2.GaussianBlur(img, (BLUR_AMOUNT, BLUR_AMOUNT), 0) > 0) * 1.0 img = cv2.GaussianBlur(img, (BLUR_AMOUNT, BLUR_AMOUNT), 0) return img
def draw_convex_hull(self,im, points, color): ''' ?????? ''' points = cv2.convexHull(points) cv2.fillConvexPoly(im, points, color=color)
def mask_from_points(size, points): mask = np.zeros(size, np.uint8) cv2.fillConvexPoly(mask, cv2.convexHull(points), 255) return mask
def draw_quads(self, img, quads, color = (0, 255, 0)): img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0] img_quads.shape = quads.shape[:2] + (2,) for q in img_quads: cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.LINE_AA, shift=2)
def draw_convex_hull(im, points, color): points = cv2.convexHull(points) cv2.fillConvexPoly(im, points, color=color)
def warpTriangle(self, img1, img2, t1, t2) : # Find bounding rectangle for each triangle r1 = cv2.boundingRect(np.float32([t1])) r2 = cv2.boundingRect(np.float32([t2])) # Offset points by left top corner of the respective rectangles t1Rect = [] t2Rect = [] t2RectInt = [] for i in xrange(0, 3): t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1]))) t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1]))) t2RectInt.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1]))) # Get mask by filling triangle mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32) cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0); # Apply warpImage to small rectangular patches img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]] #img2Rect = np.zeros((r2[3], r2[2]), dtype = img1Rect.dtype) size = (r2[2], r2[3]) img2Rect = self.applyAffineTransform(img1Rect, t1Rect, t2Rect, size) img2Rect = img2Rect * mask # Copy triangular region of the rectangular patch to the output image img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ( (1.0, 1.0, 1.0) - mask ) img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Rect
def smoothen_blush(x, y): global imOrg imgBase = zeros((height, width)) cv2.fillConvexPoly(imgBase, np.array(c_[x, y], dtype='int32'), 1) imgMask = cv2.GaussianBlur(imgBase, (51, 51), 0) imgBlur3D = np.ndarray([height, width, 3], dtype='float') imgBlur3D[:, :, 0] = imgMask imgBlur3D[:, :, 1] = imgMask imgBlur3D[:, :, 2] = imgMask imOrg = (imgBlur3D * im + (1 - imgBlur3D) * imOrg).astype('uint8')
def foreground(self, quad=None): '''return foreground (quad) mask''' fg = np.zeros(shape=self._newBorders[::-1], dtype=np.uint8) if quad is None: quad = self.quad else: quad = quad.astype(np.int32) cv2.fillConvexPoly(fg, quad, 1) return fg.astype(bool)
def draw_quads(self, img, quads, color = (0, 255, 0)): img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0] img_quads.shape = quads.shape[:2] + (2,) for q in img_quads: cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.CV_AA, shift=2)
def get_face_mask(self,img,landmarks): for group in self.OVERLAY_POINTS: hull = cv2.convexHull(landmarks[group]) cv2.fillConvexPoly(img, hull, 0)
def getLabelMask(self, img, xmlroot): self._img = img self._xmlroot = xmlroot self._mask_label = np.zeros((self._img.shape[0], self._img.shape[1])) for objectElement in self._xmlroot.findall('object'): classnameElement = objectElement.find('name').text xElementList = objectElement.findall('polygon/pt/x') yElementList = objectElement.findall('polygon/pt/y') points = np.zeros((len(xElementList), 2), dtype='int32') points_idx = 0 for (xElement,yElement) in zip(xElementList, yElementList): points[points_idx] = np.array([int(xElement.text), int(yElement.text)]) points_idx = points_idx + 1 mask = np.zeros((self._img.shape[0], self._img.shape[1])) cv2.fillConvexPoly(mask, points, 1) mask = mask.astype(bool) if (classnameElement == self._labelname): self._mask_label = self._mask_label.astype(bool) self._mask_label = mask + self._mask_label self._mask_label = np.repeat(self._mask_label[:, :, None], 3, axis=2) return self._mask_label
def getLabelMask(xml_root, img_src, classname): mask_stems = np.zeros((img_src.shape[0], img_src.shape[1])) for objectElement in xml_root.findall('object'): classnameElement = objectElement.find('name').text xElementList = objectElement.findall('polygon/pt/x') yElementList = objectElement.findall('polygon/pt/y') points = np.zeros((len(xElementList), 2), dtype='int32') points_idx = 0 for (xElement,yElement) in zip(xElementList, yElementList): points[points_idx] = np.array([int(xElement.text), int(yElement.text)]) points_idx = points_idx + 1 mask = np.zeros((img_src.shape[0], img_src.shape[1])) cv2.fillConvexPoly(mask, points, 1) mask = mask.astype(bool) if (classnameElement == classname): mask_stems = mask_stems.astype(bool) mask_stems = mask + mask_stems mask_stems = np.repeat(mask_stems[:, :, None], 3, axis=2) return mask_stems
def generate( self ): # Create black background image and fill it up with random polygons img = np.zeros((self.height, self.width, 3), np.uint8) overlay = img.copy() output = img.copy() for i in range(self.size): info = self.genes[i].getInfo() if self.type == 1: cv2.circle(overlay,info[0], info[1], info[2], -1) cv2.addWeighted(overlay, info[3], output, 1 - info[3], 0, output) elif self.type == 2: cv2.ellipse(overlay,info[0],info[1],info[2],0,360,info[3],-1) cv2.addWeighted(overlay, info[4], output, 1 - info[4], 0, output) elif self.type == 3: cv2.fillConvexPoly(overlay,np.asarray(info[0]), info[1]) cv2.addWeighted(overlay, info[2], output, 1 - info[2], 0, output) elif self.type == 4: cv2.fillConvexPoly(overlay, np.asarray(info[0]), info[1]) cv2.addWeighted(overlay, info[2], output, 1 - info[2], 0, output ) return output
def draw_convex_hull(im, points, color): points = cv2.convexHull(points) cv2.fillConvexPoly(im, points, color=color) print(points)
def warpTriangle(imSrc, imDst, tSrc, tDst) : # Get mask by filling triangle mask = np.zeros((imDst.shape), dtype = np.float32) cv2.fillConvexPoly(mask, np.int32(tDst), (1.0, 1.0, 1.0), 16, 0) warpImage = applyAffineTransform(imSrc, tSrc, tDst, imDst) # Copy triangular region of the rectangular patch to the output image imDst = imDst*(1-mask) + warpImage*mask return imDst
def warpTriangle(img1, img2, t1, t2) : # Find bounding rectangle for each triangle r1 = cv2.boundingRect(np.float32([t1])) r2 = cv2.boundingRect(np.float32([t2])) # Offset points by left top corner of the respective rectangles t1Rect = [] t2Rect = [] t2RectInt = [] for i in xrange(0, 3): t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1]))) t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1]))) t2RectInt.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1]))) # Get mask by filling triangle mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32) cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0); # Apply warpImage to small rectangular patches img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]] #img2Rect = np.zeros((r2[3], r2[2]), dtype = img1Rect.dtype) size = (r2[2], r2[3]) img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size) img2Rect = img2Rect * mask # Copy triangular region of the rectangular patch to the output image img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ( (1.0, 1.0, 1.0) - mask ) img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Rect
def warpTriangle(img1, img2, t1, t2) : # Find bounding rectangle for each triangle r1 = cv2.boundingRect(np.float32([t1])) r2 = cv2.boundingRect(np.float32([t2])) # Offset points by left top corner of the respective rectangles t1Rect = [] t2Rect = [] t2RectInt = [] for i in xrange(0, 3): t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1]))) t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1]))) t2RectInt.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1]))) # Get mask by filling triangle mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32) cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0) # Apply warpImage to small rectangular patches img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]] size = (r2[2], r2[3]) img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size) img2Rect = img2Rect * mask # Copy triangular region of the rectangular patch to the output image img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ( (1.0, 1.0, 1.0) - mask ) img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Rect
def getNextFrame(self): img = self.sceneBg.copy() if self.foreground != None: self.currentCenter = (self.center[0] + self.getXOffset(self.time), self.center[1] + self.getYOffset(self.time)) img[self.currentCenter[0]:self.currentCenter[0]+self.foreground.shape[0], self.currentCenter[1]:self.currentCenter[1]+self.foreground.shape[1]] = self.foreground else: self.currentRect = self.initialRect + np.int( 30*cos(self.time*self.speed) + 50*sin(self.time*self.speed)) if self.deformation: self.currentRect[1:3] += self.h/20*cos(self.time) cv2.fillConvexPoly(img, self.currentRect, (0, 0, 255)) self.time += self.timeStep return img
def get_background(self, vehicle_roll, vehicle_pitch): # create sky coloured image image = numpy.zeros((balloon_video.img_height, balloon_video.img_width, 3),numpy.uint8) image[:] = self.background_sky_colour_bgr # create large rectangle which will become the ground top_left = balloon_utils.rotate_xy(balloon_video.img_center_x-1000, balloon_video.img_center_y, -vehicle_roll) top_right = balloon_utils.rotate_xy(balloon_video.img_center_x+1000, balloon_video.img_center_y, -vehicle_roll) bot_left = balloon_utils.rotate_xy(balloon_video.img_center_x-1000,balloon_video.img_center_y+1000, -vehicle_roll) bot_right = balloon_utils.rotate_xy(balloon_video.img_center_x+1000,balloon_video.img_center_y+1000, -vehicle_roll) # calculate vertical pixel shift pitch_pixel_shift = balloon_video.angle_to_pixels_y(vehicle_pitch) # add pitch adjustment top_left = balloon_utils.shift_pixels_down(top_left, pitch_pixel_shift) top_right = balloon_utils.shift_pixels_down(top_right, pitch_pixel_shift) bot_left = balloon_utils.shift_pixels_down(bot_left, pitch_pixel_shift) bot_right = balloon_utils.shift_pixels_down(bot_right, pitch_pixel_shift) # draw horizon box = numpy.array([top_left, top_right, bot_right, bot_left],numpy.int32) cv2.fillConvexPoly(image, box, self.background_ground_colour_bgr_scalar) return image # draw_fake_balloon - draws fake balloon in the frame at the specified roll, pitch and yaw angle # veh_pos : PositionVector holding the vehicle's offset from home # balloon_pos : PositionVector holding the balloon's offset from home # vehicle roll, pitch and yaw angles should be in radians
def draw_convex_hull(img, points, color): """ Draw convex hull on img. Figure img will be changed after calling this function. """ points = cv2.convexHull(points) cv2.fillConvexPoly(img, points, color=color)
def pasteOne(self, source, dest, source_face, dest_face, mask): img1Warped = np.copy(dest) points1 = self.getPoints(source_face) points2 = self.getPoints(dest_face) # Find convex hull hull1 = [] hull2 = [] hullIndex = cv2.convexHull(np.array(points2), returnPoints = False) for i in xrange(0, len(hullIndex)): hull1.append(points1[hullIndex[i][0]]) hull2.append(points2[hullIndex[i][0]]) # Find delanauy traingulation for convex hull points sizeImg2 = dest.shape rect = (0, 0, sizeImg2[1], sizeImg2[0]) dt = self.calculateDelaunayTriangles(rect, hull2) if len(dt) == 0: quit() # Apply affine transformation to Delaunay triangles for i in xrange(0, len(dt)): t1 = [] t2 = [] #get points for img1, img2 corresponding to the triangles for j in xrange(0, 3): t1.append(hull1[dt[i][j]]) t2.append(hull2[dt[i][j]]) self.warpTriangle(source, img1Warped, t1, t2) # Calculate Mask hull8U = [] for i in xrange(0, len(hull2)): hull8U.append((hull2[i][0], hull2[i][1])) cv2.fillConvexPoly(mask, np.int32(hull8U), (255, 255, 255)) r = cv2.boundingRect(np.float32([hull2])) center = ((r[0]+int(r[2]/2), r[1]+int(r[3]/2))) return (img1Warped, mask) # Clone seamlessly, looks better but much slower # output = cv2.seamlessClone(np.uint8(img1Warped), dest, mask, center, cv2.NORMAL_CLONE) # return output
def run ( self ): # Succesively add a new polygon that contributes to reduce overall error # until stopping criteria is achieved min_err = self.fitness( self.source ) cur = copy.deepcopy( self.source ) improvements = 0 for i in range ( self.iterations ): next = copy.deepcopy(cur) if self.type == 1: c = circle.Circle(self.height,self.width, 0.1) info = c.getInfo() cv2.circle(cur,info[0], info[1], info[2], -1) cv2.addWeighted(cur, info[3], next, 1 - info[3], 0, next) elif self.type == 2: e = ellipse.Ellipse(self.height,self.width, 0.1) info = e.getInfo() cv2.ellipse(cur,info[0],info[1],info[2],0,360,info[3],-1) cv2.addWeighted(cur, info[4], next, 1 - info[4], 0, next) elif self.type == 3: t = triangle.Triangle(self.height,self.width, 0.1) info = t.getInfo() cv2.fillConvexPoly(cur,np.asarray(info[0]), info[1]) cv2.addWeighted(cur, info[2], next, 1 - info[2], 0, next) elif self.type == 4: r = quadrilateral.Quadrilateral(self.height,self.width, 0.1) info = r.getInfo() cv2.fillConvexPoly(cur, np.asarray(info[0]), info[1]) cv2.addWeighted(cur, info[2], next, 1 - info[2], 0, next) # Compute dissimilarity err = self.fitness(next) # Update the solution that provides more fitness if err < min_err : min_err = err improvements = improvements + 1 cur = copy.deepcopy(next) cv2.imwrite("Refine_Error_" + str(i) + "_" + str(min_err) + ".jpg", cur) if improvements == self.maximprovements: break
def warp_image(img, triangulation, base_points, coord): """ Realize the mesh warping phase triangulation is the Delaunay triangulation of the base points base_points are the coordinates of the landmark poitns of the reference image code inspired from http://www.learnopencv.com/warp-one-triangle-to-another-using-opencv-c-python/ """ all_points, coordinates = preprocess_image_before_triangulation(img) img_out = 255 * np.ones(img.shape, dtype=img.dtype) for t in triangulation: # triangles to map one another src_tri = np.array([[all_points[x][0], all_points[x][1]] for x in t]).astype(np.float32) dest_tri = np.array([[base_points[x][0], base_points[x][1]] for x in t]).astype(np.float32) # bounding boxes src_rect = cv2.boundingRect(np.array([src_tri])) dest_rect = cv2.boundingRect(np.array([dest_tri])) # crop images src_crop_tri = np.zeros((3, 2), dtype=np.float32) dest_crop_tri = np.zeros((3, 2)) for k in range(0, 3): for dim in range(0, 2): src_crop_tri[k][dim] = src_tri[k][dim] - src_rect[dim] dest_crop_tri[k][dim] = dest_tri[k][dim] - dest_rect[dim] src_crop_img = img[src_rect[1]:src_rect[1] + src_rect[3], src_rect[0]:src_rect[0] + src_rect[2]] # affine transformation estimation mat = cv2.getAffineTransform( np.float32(src_crop_tri), np.float32(dest_crop_tri) ) dest_crop_img = cv2.warpAffine( src_crop_img, mat, (dest_rect[2], dest_rect[3]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 ) # Use a mask to keep only the triangle pixels # Get mask by filling triangle mask = np.zeros((dest_rect[3], dest_rect[2], 3), dtype=np.float32) cv2.fillConvexPoly(mask, np.int32(dest_crop_tri), (1.0, 1.0, 1.0), 16, 0) # Apply mask to cropped region dest_crop_img = dest_crop_img * mask # Copy triangular region of the rectangular patch to the output image img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] = \ img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] * ( (1.0, 1.0, 1.0) - mask) img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] = \ img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] + dest_crop_img return img_out[coord[2]:coord[3], coord[0]:coord[1]]