我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.line()。
def estimate_skew(image): edges = auto_canny(image) lines = cv2.HoughLines(edges, 1, np.pi / 90, 200) new = edges.copy() thetas = [] for line in lines: for rho, theta in line: a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) if theta > np.pi / 3 and theta < np.pi * 2 / 3: thetas.append(theta) new = cv2.line(new, (x1, y1), (x2, y2), (255, 255, 255), 1) theta_mean = np.mean(thetas) theta = rad_to_deg(theta_mean) if len(thetas) > 0 else 0 return theta
def rgb_callback(self,data): try: img = self.br.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # ret, corners = cv2.findChessboardCorners(gray, (x_num,y_num),None) ret, corners = cv2.findChessboardCorners(img, (x_num,y_num)) cv2.imshow('img',img) cv2.waitKey(5) if ret == True: cv2.cornerSubPix(gray,corners,(5,5),(-1,-1),criteria) tempimg = img.copy() cv2.drawChessboardCorners(tempimg, (x_num,y_num), corners,ret) # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP) rvec, tvec, inliers = cv2.solvePnPRansac(objpoints, corners, rgb_mtx, rgb_dist) print("rvecs:") print(rvec) print("tvecs:") print(tvec) # project 3D points to image plane imgpts, jac = cv2.projectPoints(axis, rvec, tvec, rgb_mtx, rgb_dist) imgpts = np.int32(imgpts).reshape(-1,2) cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[1]),[255,0,0],4) #BGR cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[2]),[0,255,0],4) cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[3]),[0,0,255],4) cv2.imshow('img',tempimg) cv2.waitKey(5)
def run(im): im_disp = im.copy() window_name = "Draw line here." cv2.namedWindow(window_name,cv2.WINDOW_AUTOSIZE) cv2.moveWindow(window_name, 910, 0) print " Drag across the screen to set lines.\n Do it twice" print " After drawing the lines press 'r' to resume\n" l1 = np.empty((2, 2), np.uint32) l2 = np.empty((2, 2), np.uint32) list = [l1,l2] mouse_down = False def callback(event, x, y, flags, param): global trigger, mouse_down if trigger<2: if event == cv2.EVENT_LBUTTONDOWN: mouse_down = True list[trigger][0] = (x, y) if event == cv2.EVENT_LBUTTONUP and mouse_down: mouse_down = False list[trigger][1] = (x,y) cv2.line(im_disp, (list[trigger][0][0], list[trigger][0][1]), (list[trigger][1][0], list[trigger][1][1]), (255, 0, 0), 2) trigger += 1 else: pass cv2.setMouseCallback(window_name, callback) while True: cv2.imshow(window_name,im_disp) key = cv2.waitKey(10) & 0xFF if key == ord('r'): # Press key `q` to quit the program return list exit()
def find_lines(img): edges = cv2.Canny(img,100,200) threshold = 60 minLineLength = 10 lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold, 0, minLineLength, 20); if (lines is None or len(lines) == 0): return #print lines for line in lines[0]: #print line cv2.line(img, (line[0],line[1]), (line[2],line[3]), (0,255,0), 2) cv2.imwrite("line_edges.jpg", edges) cv2.imwrite("lines.jpg", img)
def draw_joints_15(test_image, joints, save_image): image = cv2.imread(test_image) # bounding box bbox = [min(joints[:, 0]), min(joints[:, 1]), max(joints[:, 0]), max(joints[:, 1])] # draw bounding box in red rectangle cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2) # draw joints in green spots for j in xrange(len(joints)): cv2.circle(image, (joints[j, 0], joints[j, 1]), 5, (0, 255, 0), 1) # draw torso in yellow lines torso = [[0, 1], [0, 14], [5, 10]] for item in torso: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (0, 255, 255), 1) # draw left part in pink lines lpart = [[14, 13], [13, 12], [12, 11], [13, 10], [10, 9], [9, 8]] for item in lpart: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 255), 1) # draw right part in blue lines rpart = [[1, 2], [2, 3], [3, 4], [2, 5], [5, 6], [6, 7]] for item in rpart: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 0), 1) cv2.imwrite(save_image, image)
def draw_joints(test_image, joints, save_image): image = cv2.imread(test_image) joints = np.vstack((joints, (joints[8, :] + joints[11, :])/2)) # bounding box bbox = [min(joints[:, 0]), min(joints[:, 1]), max(joints[:, 0]), max(joints[:, 1])] # draw bounding box in red rectangle cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2) # draw joints in green spots for j in xrange(len(joints)): cv2.circle(image, (joints[j, 0], joints[j, 1]), 5, (0, 255, 0), 2) # draw torso in yellow lines torso = [[0, 1], [1, 14], [2, 14], [5, 14]] for item in torso: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (0, 255, 255), 2) # draw left part in pink lines lpart = [[1, 5], [5, 6], [6, 7], [5, 14], [14, 11], [11, 12], [12, 13]] for item in lpart: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 255), 2) # draw right part in blue lines rpart = [[1, 2], [2, 3], [3, 4], [2, 14], [14, 8], [8, 9], [9, 10]] for item in rpart: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 0), 2) cv2.imwrite(save_image, image)
def on_mouse(self, event, x, y, flags, param): pt = (x, y) if event == cv2.EVENT_LBUTTONDOWN: self.prev_pt = pt elif event == cv2.EVENT_LBUTTONUP: self.prev_pt = None if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON: for dst, color in zip(self.dests, self.colors_func()): cv2.line(dst, self.prev_pt, pt, color, 5) self.dirty = True self.prev_pt = pt self.show() # palette data from matplotlib/_cm.py
def decorate_img_for_env(img, env_id, image_scale): """ Args: img (numpy array of (width, height, 3)): input image env_id (str): the gym env id image_scale (float): a scale to resize the image Returns: an image Summary: Adds environment specific image decorations. Currently used to make it easier to block/label in Pong. """ if env_id is not None and 'Pong' in env_id: h, w, _ = img.shape est_catastrophe_y = h - 142 est_block_clearance_y = est_catastrophe_y - int(20 * image_scale) # cv2.line(img, (0, est_catastrophe_y), (int(500 * image_scale), est_catastrophe_y), (0, 0, 255)) cv2.line(img, (250, est_catastrophe_y), (int(500 * image_scale), est_catastrophe_y), (0, 255, 255)) # cv2.line(img, (0, est_block_clearance_y), (int(500 * image_scale), est_block_clearance_y), # (255, 0, 0)) return img
def extractOuterGrid(img): rows,cols = np.shape(img) maxArea = 0 point = [0,0] imgOriginal = img.copy() for i in range(rows): for j in range(cols): if img[i][j] == 255: img,area,dummy = customFloodFill(img,[i,j],100,0) if area > maxArea: maxArea = area point = [i,j] img = imgOriginal img,area,dummy = customFloodFill(img,[point[0],point[1]],100,0) for i in range(rows): for j in range(cols): if img[i][j] == 100: img[i][j] = 255 else: img[i][j] = 0 return img,point # Draws a line on the image given its parameters in normal form
def draw(event, x, y, flags, param): global ix, iy, drawing if event == cv2.EVENT_LBUTTONDOWN: drawing = True ix, iy = x, y elif event == cv2.EVENT_MOUSEMOVE: if drawing: cv2.line(img, (ix, iy), (x, y), (0.9, 0.01, 0.9), pen_size) ix, iy = x, y elif event == cv2.EVENT_LBUTTONUP: drawing = False cv2.line(img, (ix, iy), (x, y), (0.9, 0.01, 0.9), pen_size)
def draw_lines(img, lines, color=[255, 0, 0], thickness=2): """ NOTE: this is the function you might want to use as a starting point once you want to average/extrapolate the line segments you detect to map out the full extent of the lane (going from the result shown in raw-lines-example.mp4 to that shown in P1_example.mp4). Think about things like separating line segments by their slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left line vs. the right line. Then, you can average the position of each of the lines and extrapolate to the top and bottom of the lane. This function draws `lines` with `color` and `thickness`. Lines are drawn on the image inplace (mutates the image). If you want to make the lines semi-transparent, think about combining this function with the weighted_img() function below """ for line in lines: for x1, y1, x2, y2 in line: cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def process_image(image): # printing out some stats and plotting print('This image is:', type(image), 'with dimesions:', image.shape) gray = grayscale(image) # Define a kernel size and apply Gaussian smoothing kernel_size = 5 blur_gray = gaussian_blur(gray, kernel_size) # plt.imshow(blur_gray, cmap='gray') # Define our parameters for Canny and apply low_threshold = 45 #50 high_threshold = 150 #150 edges = canny(blur_gray, low_threshold, high_threshold) # This time we are defining a four sided polygon to mask imshape = image.shape #vertices = np.array([[(0,imshape[0]),(475, 310), (475, 310), (imshape[1],imshape[0])]], dtype=np.int32) vertices = np.array([[(0,imshape[0]),(450, 330), (490, 310), (imshape[1],imshape[0])]], dtype=np.int32) masked_edges = region_of_interest(edges, vertices) # Define the Hough transform parameters # Make a blank the same size as our image to draw on rho = 1 # distance resolution in pixels of the Hough grid theta = np.pi/180 # angular resolution in radians of the Hough grid threshold = 15 # minimum number of votes (intersections in Hough grid cell) min_line_length = 40 #minimum number of pixels making up a line 150 - 40 max_line_gap = 130 # maximum gap in pixels between connectable line segments 58 -95 line_image = np.copy(image)*0 # creating a blank to draw lines on lines = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap) # Draw the lines on the edge image lines_edges = weighted_img(lines, image) return lines_edges
def drawBox(self, img): axis = np.float32([[0,0,0], [0,1,0], [1,1,0], [1,0,0], [0,0,-1],[0,1,-1],[1,1,-1],[1,0,-1] ]) imgpts, jac = cv2.projectPoints(axis, self.RVEC, self.TVEC, self.MTX, self.DIST) imgpts = np.int32(imgpts).reshape(-1,2) # draw pillars in blue color for i,j in zip(range(4),range(4,8)): img2 = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255,0,0),3) # draw top layer in red color outImg = cv2.drawContours(img2, [imgpts[4:]],-1,(0,0,255),3) return outImg # Debug Code.
def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status): # initialize the output visualization image (hA, wA) = imageA.shape[:2] (hB, wB) = imageB.shape[:2] vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8") vis[0:hA, 0:wA] = imageA vis[0:hB, wA:] = imageB # loop over the matches for ((trainIdx, queryIdx), s) in zip(matches, status): # only process the match if the keypoint was successfully # matched if s == 1: # draw the match ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1])) ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1])) cv2.line(vis, ptA, ptB, (0, 255, 0), 1) # return the visualization return vis
def drawGroups(self, groups, image): (hI, wI) = image.shape[:2] vis = np.zeros((hI, wI, 3), dtype="uint8") vis[:, :] = image limits = (self.offsetsDataset.maximuns, self.offsetsDataset.minimuns) # loop over the matches for i, group in enumerate(groups): color = [0, 0, 0] color[i] = 255 for offset in group.getCoveredDataset(limits=limits): p0 = self.getMercatorCoords(offset[:2]) p1 = self.getMercatorCoords(offset[:2]) + self.getMercatorCoords(offset[2:]) ptA = (int(p0[0]), int(p0[1])) ptB = (int(p1[0]), int(p1[1])) cv2.line(vis, ptA, ptB, color, 1) return vis
def build_graph(self): # Currently O(n * m) which is sad. Spatial partitioning tree (kdtree or quadtree) on node # locations would make O(m * log n). M and N are small enough in most cases that this # is fast enough for now. for line in self.contour_lines: for index, endpoint in enumerate(line.endpoints): # Find node with centroid closest to this endpoint. closest_node = None closest_sq = sys.float_info.max for node in self.contour_nodes: dx = endpoint[0] - node.centroid[0] dy = endpoint[1] - node.centroid[1] dist_sq = dx * dx + dy * dy if dist_sq < closest_sq: closest_node = node closest_sq = dist_sq # Check for root node (closer to top edge of image than to any labeled node) edge_dist_sq = endpoint[1] * endpoint[1] if edge_dist_sq < closest_sq: closest_node = EDGE_NODE line.nodes[index] = closest_node
def plt_skeleton(self, img, tocopy = True, debug = False, sess = None): """ Given an Image, returns Image with plotted limbs (TF VERSION) Args: img : Source Image shape = (256,256,3) tocopy : (bool) False to write on source image / True to return a new array debug : (bool) for testing puposes sess : KEEP NONE """ joints = self.joints_pred(np.expand_dims(img, axis = 0), coord = 'img', debug = False, sess = sess) if tocopy: img = np.copy(img) for i in range(len(self.links)): position = self.givePixel(self.links[i]['link'],joints) cv2.line(img, tuple(position[0])[::-1], tuple(position[1])[::-1], self.links[i]['color'][::-1], thickness = 2) if tocopy: return img
def draw_lines(img, lines, color=[255, 0, 0], thickness=2): """ averaging & extrapolating lines points achieved. """ if len(img.shape) == 2: # grayscale image -> make a "color" image out of it img = np.dstack((img, img, img)) for line in lines: for x1, y1, x2, y2 in line: if x1 >= 0 and x1 < img.shape[1] and \ y1 >= 0 and y1 < img.shape[0] and \ x2 >= 0 and x2 < img.shape[1] and \ y2 >= 0 and y2 < img.shape[0]: cv2.line(img, (x1, y1), (x2, y2), color, thickness) else: print('BAD LINE (%d, %d, %d, %d)' % (x1, y1, x2, y2))
def draw_matches(self, im1, pos1, im2, pos2, matches, filename="matches.jpg"): self._log("drawing matches into '%s'..." % filename) row1, col1 = im1.shape row2, col2 = im2.shape im_out = np.zeros((max(row1, row2), col1+col2, 3), dtype=np.uint8) im_out[:row1, :col1] = np.dstack([im1]*3) im_out[:row2, col1:] = np.dstack([im2]*3) l = len(matches) for ind, (i, j, d) in list(enumerate(matches))[::-1]: d /= para.descr_match_threshold # map to [0, 1] _pos1, _pos2 = pos1[i], pos2[j] color = hsv_to_rgb(int(d * 120 - 120), 1, 1 - d / 3) color = [int(c * 255) for c in color] cv2.line(im_out, (_pos1[1], _pos1[0]), (_pos2[1]+col1, _pos2[0]), color, 1) cv2.imwrite(filename, im_out) ########################## # Utility ##########################
def on_mouse(self, event, x, y, flags, param): pt = (x, y) if event == cv2.EVENT_LBUTTONDOWN: self.prev_pt = pt elif event == cv2.EVENT_LBUTTONUP: self.prev_pt = None if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON: for dst, color in zip(self.dests, self.colors_func()): cv2.line(dst, self.prev_pt, pt, color, 50) self.dirty = True self.prev_pt = pt self.show()
def side_intersect(self, image, contours, row, markup=True): """ Find intersections to both sides along a row """ if markup: cv2.line(image, (0, row), (image.shape[1], row), (0, 0, 255), 1) cnt_l, col_l = self.find_intersect(image, contours, row, -1) if markup and cnt_l is not None: cv2.drawContours(image, [contours[cnt_l]], -1, (0, 255, 255), -1) cv2.circle(image, (col_l, row), 4, (0, 255, 0), 2) cnt_r, col_r = self.find_intersect(image, contours, row, 1) if markup and cnt_r is not None: cv2.drawContours(image, [contours[cnt_r]], -1, (255, 255, 0), -1) cv2.circle(image, (col_r, row), 4, (0, 255, 0), 2) return (cnt_l, col_l), (cnt_r, col_r)
def draw_humans(img, human_list): img_copied = np.copy(img) image_h, image_w = img_copied.shape[:2] centers = {} for human in human_list: part_idxs = human.keys() # draw point for i in range(CocoPart.Background.value): if i not in part_idxs: continue part_coord = human[i][1] center = (int(part_coord[0] * image_w + 0.5), int(part_coord[1] * image_h + 0.5)) centers[i] = center cv2.circle(img_copied, center, 3, CocoColors[i], thickness=3, lineType=8, shift=0) # draw line for pair_order, pair in enumerate(CocoPairsRender): if pair[0] not in part_idxs or pair[1] not in part_idxs: continue img_copied = cv2.line(img_copied, centers[pair[0]], centers[pair[1]], CocoColors[pair_order], 3) return img_copied
def _show(self, path, inpmat, heatmat, pafmat, humans): image = cv2.imread(path) # CocoPoseLMDB.display_image(inpmat, heatmat, pafmat) image_h, image_w = image.shape[:2] heat_h, heat_w = heatmat.shape[:2] for _, human in humans.items(): for part in human: if part['partIdx'] not in common.CocoPairsRender: continue center1 = (int((part['c1'][0] + 0.5) * image_w / heat_w), int((part['c1'][1] + 0.5) * image_h / heat_h)) center2 = (int((part['c2'][0] + 0.5) * image_w / heat_w), int((part['c2'][1] + 0.5) * image_h / heat_h)) cv2.circle(image, center1, 2, (255, 0, 0), thickness=3, lineType=8, shift=0) cv2.circle(image, center2, 2, (255, 0, 0), thickness=3, lineType=8, shift=0) cv2.putText(image, str(part['partIdx'][1]), center2, cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 0, 0), 1) image = cv2.line(image, center1, center2, (255, 0, 0), 1) cv2.imshow('result', image) cv2.waitKey(0)
def draw_tracks(self, frame, debug=False): """Draw tracks Parameters ---------- frame : np.array Image frame debug : bool Debug mode (Default value = False) """ if debug is False: return # Create a mask image and color for drawing purposes mask = np.zeros_like(frame) color = [0, 0, 255] # Draw tracks for i, (new, old) in enumerate(zip(self.kp_cur, self.kp_ref)): a, b = new.ravel() c, d = old.ravel() mask = cv2.line(mask, (a, b), (c, d), color, 1) img = cv2.add(frame, mask) cv2.imshow("Feature Tracks", img)
def _mkConvKernel(ksize, orientations): # create line shaped kernels, like [ | / - \ ] for 4 orientations assert ksize[0] % 2 and ksize[1] % 2 k0, k1 = ksize mx, my = (k0 // 2) + 1, (k1 // 2) + 1 kernel = np.empty((orientations, k0, k1)) for i, a in enumerate(_angles(orientations)): # make line kernel x = int(round(4 * np.cos(a) * k0)) y = int(round(4 * np.sin(a) * k1)) k = np.zeros((2 * k0, 2 * k1), dtype=np.uint8) cv2.line(k, (-x + k0, -y + k1), (x + k0, y + k1), 255, thickness=1, lineType=cv2.LINE_AA) # resize and scale 0-1: ki = k[mx:mx + k0, my:my + k1].astype(float) / 255 kernel[i] = ki / ki.sum() return kernel
def draw(event, x, y, flags, param): global drawing, ix, iy, shape, canvas, brush if event == cv2.EVENT_LBUTTONDOWN: drawing = True ix, iy = x, y elif event == cv2.EVENT_MOUSEMOVE: if drawing == True: if shape == 1: cv2.circle(canvas, (x, y), pencil, color, -1) elif shape == 2: cv2.circle(canvas, (x, y), brush, color, -1) elif shape == 3: cv2.circle(canvas, (x, y), eraser, (255, 255, 255), -1) elif shape == 5: cv2.rectangle(canvas, (ix, iy), (x, y), color, -1) elif shape == 6: cv2.circle(canvas, (x, y), calc_radius(x, y), color, -1) elif event == cv2.EVENT_LBUTTONUP: drawing = False if shape == 1: cv2.circle(canvas, (x, y), pencil, color, -1) elif shape == 2: cv2.circle(canvas, (x, y), brush, color, -1) elif shape == 3: cv2.circle(canvas, (x, y), eraser, (255, 255, 255), -1) elif shape == 4: cv2.line(canvas, (ix, iy), (x, y), color, pencil) elif shape == 5: cv2.rectangle(canvas, (ix, iy), (x, y), color, -1) elif shape == 6: cv2.circle(canvas, (x, y), calc_radius(x, y), color, -1)
def drawAxis(camera_parameters, markers, frame): axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3) mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff for marker in markers: rvec, tvec = marker.rvec, marker.tvec imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist) corners = marker.corners corner = tuple(corners[0].ravel()) cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2) cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2) cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA) cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA) cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)
def drawBox(camera_parameters, markers, frame): objpts = np.float32([[0,0,0], [1,0,0], [1,1,0], [0,1,0], [0,0,1], [1,0,1], [1,1,1], [0,1,1]]).reshape(-1,3) mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff for marker in markers: rvec, tvec = marker.rvec, marker.tvec imgpts, jac = cv2.projectPoints(objpts, rvec, tvec, mtx, dist) cv2.line(frame, tuple(imgpts[0].ravel()), tuple(imgpts[1].ravel()), (0,0,255), 2) cv2.line(frame, tuple(imgpts[1].ravel()), tuple(imgpts[2].ravel()), (0,0,255), 2) cv2.line(frame, tuple(imgpts[2].ravel()), tuple(imgpts[3].ravel()), (0,0,255), 2) cv2.line(frame, tuple(imgpts[3].ravel()), tuple(imgpts[0].ravel()), (0,0,255), 2) cv2.line(frame, tuple(imgpts[0].ravel()), tuple(imgpts[0+4].ravel()), (0,0,255), 2) cv2.line(frame, tuple(imgpts[1].ravel()), tuple(imgpts[1+4].ravel()), (0,0,255), 2) cv2.line(frame, tuple(imgpts[2].ravel()), tuple(imgpts[2+4].ravel()), (0,0,255), 2) cv2.line(frame, tuple(imgpts[3].ravel()), tuple(imgpts[3+4].ravel()), (0,0,255), 2) cv2.line(frame, tuple(imgpts[0+4].ravel()), tuple(imgpts[1+4].ravel()), (0,0,255), 2) cv2.line(frame, tuple(imgpts[1+4].ravel()), tuple(imgpts[2+4].ravel()), (0,0,255), 2) cv2.line(frame, tuple(imgpts[2+4].ravel()), tuple(imgpts[3+4].ravel()), (0,0,255), 2) cv2.line(frame, tuple(imgpts[3+4].ravel()), tuple(imgpts[0+4].ravel()), (0,0,255), 2)
def draw_debug(img, pose, gt_pose, tracker, alpha, debug_info): if debug_info is not None: img_render, bb, _ = debug_info img_render = cv2.resize(img_render, (bb[2, 1] - bb[0, 1], bb[1, 0] - bb[0, 0])) crop = img[bb[0, 0]:bb[1, 0], bb[0, 1]:bb[2, 1], :] h, w, c = crop.shape blend = image_blend(img_render[:h, :w, ::-1], crop) img[bb[0, 0]:bb[1, 0], bb[0, 1]:bb[2, 1], :] = cv2.addWeighted(img[bb[0, 0]:bb[1, 0], bb[0, 1]:bb[2, 1], :], 1 - alpha, blend, alpha, 1) else: axis = compute_axis(pose, tracker.camera, tracker.object_width, scale=(1000, -1000, -1000)) axis_gt = compute_axis(gt_pose, tracker.camera, tracker.object_width, scale=(1000, -1000, -1000)) cv2.line(img, tuple(axis_gt[0, ::-1]), tuple(axis_gt[1, ::-1]), (0, 0, 155), 3) cv2.line(img, tuple(axis_gt[0, ::-1]), tuple(axis_gt[2, ::-1]), (0, 155, 0), 3) cv2.line(img, tuple(axis_gt[0, ::-1]), tuple(axis_gt[3, ::-1]), (155, 0, 0), 3) cv2.line(img, tuple(axis[0, ::-1]), tuple(axis[1, ::-1]), (0, 0, 255), 3) cv2.line(img, tuple(axis[0, ::-1]), tuple(axis[2, ::-1]), (0, 255, 0), 3) cv2.line(img, tuple(axis[0, ::-1]), tuple(axis[3, ::-1]), (255, 0, 0), 3)
def _process_image(self, image): hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) hsv = cv2.medianBlur(hsv, 5) draw_col = (0,0,255) p1 = (self.edges['l'], self.edges['d']) p2 = (self.edges['r'], self.edges['u']) cv2.rectangle(hsv, p1, p2, draw_col) vert_spacing = (self.edges['r'] - self.edges['l'])/float(len(grid)) for i in range(1, len(grid)): x_pos = int(self.edges['l'] + i*vert_spacing) p1 = (x_pos, self.edges['d']) p2 = (x_pos, self.edges['u']) cv2.line(hsv, p1, p2, draw_col) horiz_spacing = (self.edges['d'] - self.edges['u'])/float(len(grid[0])) for i in range(1, len(grid[0])): y_pos = int(self.edges['u'] + i*horiz_spacing) p1 = (self.edges['l'], y_pos) p2 = (self.edges['r'], y_pos) cv2.line(hsv, p1, p2, draw_col) return hsv
def draw_correspondences(img, dstpoints, projpts): display = img.copy() dstpoints = norm2pix(img.shape, dstpoints, True) projpts = norm2pix(img.shape, projpts, True) for pts, color in [(projpts, (255, 0, 0)), (dstpoints, (0, 0, 255))]: for point in pts: cv2.circle(display, fltp(point), 3, color, -1, cv2.LINE_AA) for point_a, point_b in zip(projpts, dstpoints): cv2.line(display, fltp(point_a), fltp(point_b), (255, 255, 255), 1, cv2.LINE_AA) return display
def visualize_contours(name, small, cinfo_list): regions = np.zeros_like(small) for j, cinfo in enumerate(cinfo_list): cv2.drawContours(regions, [cinfo.contour], 0, CCOLORS[j % len(CCOLORS)], -1) mask = (regions.max(axis=2) != 0) display = small.copy() display[mask] = (display[mask]/2) + (regions[mask]/2) for j, cinfo in enumerate(cinfo_list): color = CCOLORS[j % len(CCOLORS)] color = tuple([c/4 for c in color]) cv2.circle(display, fltp(cinfo.center), 3, (255, 255, 255), 1, cv2.LINE_AA) cv2.line(display, fltp(cinfo.point0), fltp(cinfo.point1), (255, 255, 255), 1, cv2.LINE_AA) debug_show(name, 1, 'contours', display)
def visualize_span_points(name, small, span_points, corners): display = small.copy() for i, points in enumerate(span_points): points = norm2pix(small.shape, points, False) mean, small_evec = cv2.PCACompute(points.reshape((-1, 2)), None, maxComponents=1) dps = np.dot(points.reshape((-1, 2)), small_evec.reshape((2, 1))) dpm = np.dot(mean.flatten(), small_evec.flatten()) point0 = mean + small_evec * (dps.min()-dpm) point1 = mean + small_evec * (dps.max()-dpm) for point in points: cv2.circle(display, fltp(point), 3, CCOLORS[i % len(CCOLORS)], -1, cv2.LINE_AA) cv2.line(display, fltp(point0), fltp(point1), (255, 255, 255), 1, cv2.LINE_AA) cv2.polylines(display, [norm2pix(small.shape, corners, True)], True, (255, 255, 255)) debug_show(name, 3, 'span points', display)
def draw_joints_13(test_image, joints, save_image): image = cv2.imread(test_image) # bounding box bbox = [min(joints[:, 0]), min(joints[:, 1]), max(joints[:, 0]), max(joints[:, 1])] # draw bounding box in red rectangle cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2) # draw joints in green spots for j in xrange(len(joints)): cv2.circle(image, (joints[j, 0], joints[j, 1]), 5, (0, 255, 0), 2) # draw torso in yellow lines torso = [[0, 1], [1, 8]] for item in torso: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (0, 255, 255), 2) # draw left part in pink lines lpart = [[1, 3], [3, 5], [5, 7], [3, 8], [8, 10], [10, 12]] for item in lpart: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 255), 2) # draw right part in blue lines rpart = [[1, 2], [2, 4], [4, 6], [2, 8], [8, 9], [9, 11]] for item in rpart: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 0), 2) cv2.imwrite(save_image, image)
def draw_joints_17(test_image, joints, save_image): image = cv2.imread(test_image) # bounding box bbox = [min(joints[:, 0]), min(joints[:, 1]), max(joints[:, 0]), max(joints[:, 1])] # draw bounding box in red rectangle cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2) # draw joints in green spots for j in xrange(len(joints)): cv2.circle(image, (joints[j, 0], joints[j, 1]), 5, (0, 255, 0), 2) # draw head in yellow lines head = [[0, 1], [0, 2], [1, 3], [2, 4], [3, 5], [4, 6]] for item in head: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (0, 255, 255), 2) # draw upper part in pink lines upart = [[5, 6], [5, 7], [6, 8], [7, 9], [8, 10], [5, 11], [6, 12], [11, 12]] for item in upart: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 255), 2) # draw lower part in blue lines lpart = [[11, 13], [12, 14], [13, 15], [14, 16]] for item in lpart: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 0), 2) cv2.imwrite(save_image, image)
def draw_joints_16(test_image, joints, save_image): image = cv2.imread(test_image) # bounding box bbox = [min(joints[:, 0]), min(joints[:, 1]), max(joints[:, 0]), max(joints[:, 1])] # draw bounding box in red rectangle cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2) # draw joints in green spots for j in xrange(len(joints)): cv2.circle(image, (joints[j, 0], joints[j, 1]), 5, (0, 255, 0), 2) # draw torso in yellow lines torso = [[9, 8], [8, 7], [7, 6]] for item in torso: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (0, 255, 255), 2) # draw left part in pink lines lpart = [[8, 13], [13, 14], [14, 15], [13, 6], [6, 3], [3, 4], [4, 5]] for item in lpart: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 255), 2) # draw right part in blue lines rpart = [[8, 12], [12, 11], [11, 10], [12, 6], [6, 2], [2, 1], [1, 0]] for item in rpart: cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 0), 2) cv2.imwrite(save_image, image)
def draw_gt_market(test_image, joints, save_image): image = cv2.imread(test_image) # draw joints in green spots for j in xrange(len(joints) / 2): cv2.circle(image, (joints[j * 2], joints[j * 2 + 1]), 2, (0, 255, 0), 1) # draw torso in yellow lines p1 = [0, 0, 5] p2 = [1, 14, 10] for j in xrange(len(p1)): cv2.line(image, (joints[p1[j] * 2], joints[p1[j] * 2 + 1]), (joints[p2[j] * 2], joints[p2[j] * 2 + 1]), (0, 255, 255), 1) # draw left part in pink lines p1 = [14, 13, 12, 13, 10, 9] p2 = [13, 12, 11, 10, 9, 8] for j in xrange(len(p1)): cv2.line(image, (joints[p1[j] * 2], joints[p1[j] * 2 + 1]), (joints[p2[j] * 2], joints[p2[j] * 2 + 1]), (255, 0, 255), 1) # draw right part in blue lines p1 = [1, 2, 3, 2, 5, 6] p2 = [2, 3, 4, 5, 6, 7] for j in xrange(len(p1)): cv2.line(image, (joints[p1[j] * 2], joints[p1[j] * 2 + 1]), (joints[p2[j] * 2], joints[p2[j] * 2 + 1]), (255, 0, 0), 1) cv2.imwrite(save_image, image)