我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.rectangle()。
def __bound_contours(roi): """ returns modified roi(non-destructive) and rectangles that founded by the algorithm. @roi region of interest to find contours @return (roi, rects) """ roi_copy = roi.copy() roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV) # filter black color mask1 = cv2.inRange(roi_hsv, np.array([0, 0, 0]), np.array([180, 255, 125])) mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))) mask1 = cv2.Canny(mask1, 100, 300) mask1 = cv2.GaussianBlur(mask1, (1, 1), 0) mask1 = cv2.Canny(mask1, 100, 300) # mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))) # Find contours for detected portion of the image im2, cnts, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] # get largest five contour area rects = [] for c in cnts: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) x, y, w, h = cv2.boundingRect(approx) if h >= 15: # if height is enough # create rectangle for bounding rect = (x, y, w, h) rects.append(rect) cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 1); return (roi_copy, rects)
def draw_rects(img, rects): """ ????????????? :param img: :param rects: :return: """ for x, y, w, h in rects: cv2.rectangle(img, (x, y), (x+w, y+h), (255, 255, 00), 2) face = img face = cv2.resize(face,(224,224)) if Gender.predict(face)==1: text = "Male" else: text = "Female" cv2.putText(img, text, (x, h), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
def test_image(addr): target = ['angry','disgust','fear','happy','sad','surprise','neutral'] font = cv2.FONT_HERSHEY_SIMPLEX im = cv2.imread(addr) gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1) for (x, y, w, h) in faces: cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5) face_crop = im[y:y+h,x:x+w] face_crop = cv2.resize(face_crop,(48,48)) face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY) face_crop = face_crop.astype('float32')/255 face_crop = np.asarray(face_crop) face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1]) result = target[np.argmax(model.predict(face_crop))] cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA) cv2.imshow('result', im) cv2.imwrite('result.jpg',im) cv2.waitKey(0)
def visualize(self, vis, colored=True): try: tids = set(self.ids) except: return vis for hid, hbox in izip(self.ids, self.bboxes): cv2.rectangle(vis, (hbox[0], hbox[1]), (hbox[2], hbox[3]), (0,255,0), 1) vis = super(BoundingBoxKLT, self).viz(vis, colored=colored) # for tid, pts in self.tm_.tracks.iteritems(): # if tid not in tids: continue # cv2.polylines(vis, [np.vstack(pts.items).astype(np.int32)[-4:]], False, # (0,255,0), thickness=1) # tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2 # cv2.rectangle(vis, (tl[0], tl[1]), (br[0], br[1]), (0,255,0), -1) # OpenCVKLT.draw_tracks(self, vis, colored=colored, max_track_length=10) return vis
def display_detected(self, frame, face_locs, people, confidence): """ - Display ROI's of detected faces with labels :param frame: :param face_locs: :param people : people in image classified :param confidence : recognition confidence :return: """ if not len(face_locs) == 0: # nothing detected for (top, right, bottom, left), name, conf in zip(face_locs, people, confidence): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top right bottom left # string conf_4f = "%.3f" % conf peop_conf = "{} {}%".format(name, float(conf_4f) * 100) # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face # cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) cv2.rectangle(frame, (left, top + 20), (right, top), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX # color # cv2.putText(frame, peop_conf , (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1) cv2.putText(frame, peop_conf, (left, top + 15), font, 0.5, (255, 255, 255), 1) pass
def draw_bboxes(vis, bboxes, texts=None, ellipse=False, colored=True): if not len(bboxes): return vis if not colored: cols = np.tile([240,240,240], [len(bboxes), 1]) else: N = 20 cwheel = colormap(np.linspace(0, 1, N)) cols = np.vstack([cwheel[idx % N] for idx, _ in enumerate(bboxes)]) texts = [None] * len(bboxes) if texts is None else texts for col, b, t in zip(cols, bboxes, texts): if ellipse: cv2.ellipse(vis, ((b[0]+b[2])/2, (b[1]+b[3])/2), ((b[2]-b[0])/2, (b[3]-b[1])/2), 0, 0, 360, color=tuple(col), thickness=1) else: cv2.rectangle(vis, (b[0], b[1]), (b[2], b[3]), tuple(col), 2) if t: annotate_bbox(vis, b, title=t) return vis
def mouseInteraction(self, event, x, y, flags, params): if self.userInteraction is True: if event == cv2.EVENT_LBUTTONDOWN: self.refPt = [(x, y)] self.workingFrame[y, x] = [0, 0, 255] self.showFrame(self.selectionWindow, self.workingFrame) elif event == cv2.EVENT_LBUTTONUP: self.undoFrames.append(self.workingFrame.copy()) self.refPt.append((x, y)) if self.refPt[0][0] != self.refPt[1][0] and self.refPt[0][1] != self.refPt[1][1]: area = trackedArea(self.refPt) area.setStackSize(30) area.setTemplate(self.processedFrame) # area.initKalman() corn = area.getCorners() self.trackedAreasList.append(area) cv2.rectangle(self.workingFrame, corn[0], corn[1], (0, 0, 255), 1) self.showFrame(self.selectionWindow, self.workingFrame)
def _draw_box(im, box_list, label_list, color=(0,255,0), cdict=None, form='center'): assert form == 'center' or form == 'diagonal', \ 'bounding box format not accepted: {}.'.format(form) for bbox, label in zip(box_list, label_list): if form == 'center': bbox = bbox_transform(bbox) xmin, ymin, xmax, ymax = [int(b) for b in bbox] l = label.split(':')[0] # text before "CLASS: (PROB)" if cdict and l in cdict: c = cdict[l] else: c = color # draw box cv2.rectangle(im, (xmin, ymin), (xmax, ymax), c, 1) # draw label font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(im, label, (xmin, ymax), font, 0.3, c, 1)
def draw_tracks(self, out, colored=False, color_type='unique', min_track_length=4, max_track_length=4): """ color_type: {age, unique} """ N = 20 # inds = self.confident_tracks(min_length=min_track_length) # if not len(inds): # return # ids, pts = self.latest_ids[inds], self.latest_pts[inds] # lengths = self.tm_.lengths[inds] ids, pts, lengths = self.latest_ids, self.latest_pts, self.tm_.lengths if color_type == 'unique': cwheel = colormap(np.linspace(0, 1, N)) cols = np.vstack([cwheel[tid % N] for idx, tid in enumerate(ids)]) elif color_type == 'age': cols = colormap(lengths) else: raise ValueError('Color type {:} undefined, use age or unique'.format(color_type)) if not colored: cols = np.tile([0,240,0], [len(self.tm_.tracks), 1]) for col, pts in izip(cols.astype(np.int64), self.tm_.tracks.itervalues()): cv2.polylines(out, [np.vstack(pts.items).astype(np.int32)[-max_track_length:]], False, tuple(col), thickness=1) tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2 cv2.rectangle(out, (tl[0], tl[1]), (br[0], br[1]), tuple(col), -1)
def save_all_detection(im_array, detections, imdb_classes=None, thresh=0.7): """ save all detections in one image with result.png :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param imdb_classes: list of names in imdb :param thresh: threshold for valid detections :return: """ import random im = image_processing.transform_inverse(im_array, config.PIXEL_MEANS) im = im[:, :, ::-1].copy() # back to b,g,r for j in range(1, len(imdb_classes)): color = (255*random.random(), 255*random.random(), 255*random.random()) # generate a random color dets = detections[j] for i in range(dets.shape[0]): bbox = dets[i, :4] score = dets[i, -1] if score > thresh: cv2.rectangle(im, (int(round(bbox[0])), int(round(bbox[1]))), (int(round(bbox[2])), int(round(bbox[3]))), color, 2) cv2.putText(im, '%s'%imdb_classes[j], (bbox[0], bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2) cv2.imwrite("result.jpg", im)
def draw_markers(img,markers): for m in markers: centroid = np.array(m['centroid'],dtype=np.float32) origin = np.array(m['verts'][0],dtype=np.float32) hat = np.array([[[0,0],[0,1],[.5,1.25],[1,1],[1,0]]],dtype=np.float32) hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m)) if m['id_confidence']>.9: cv2.polylines(img,np.int0(hat),color = (0,0,255),isClosed=True) else: cv2.polylines(img,np.int0(hat),color = (0,255,0),isClosed=True) # cv2.polylines(img,np.int0(centroid),color = (255,255,int(255*m['id_confidence'])),isClosed=True,thickness=2) m_str = 'id: {:d}'.format(m['id']) org = origin.copy() # cv2.rectangle(img, tuple(np.int0(org+(-5,-13))[0,:]), tuple(np.int0(org+(100,30))[0,:]),color=(0,0,0),thickness=-1) cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255)) if 'id_confidence' in m: m_str = 'idc: {:.3f}'.format(m['id_confidence']) org += (0, 12) cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255)) if 'loc_confidence' in m: m_str = 'locc: {:.3f}'.format(m['loc_confidence']) org += (0, 12 ) cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255)) if 'frames_since_true_detection' in m: m_str = 'otf: {}'.format(m['frames_since_true_detection']) org += (0, 12 ) cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255)) if 'opf_vel' in m: m_str = 'otf: {}'.format(m['opf_vel']) org += (0, 12 ) cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
def locate_img(image, template): img = image.copy() res = cv2.matchTemplate(img, template, method) print res print res.shape cv2.imwrite('image/shape.png', res) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) print cv2.minMaxLoc(res) if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc h, w = template.shape bottom_right = (top_left[0] + w, top_left[1]+h) cv2.rectangle(img, top_left, bottom_right, 255, 2) cv2.imwrite('image/tt.jpg', img)
def MoG2(vid, min_thresh=800, max_thresh=10000): ''' Args : Video object and threshold parameters Returns : None ''' cap = cv2.VideoCapture(vid) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) fgbg = cv2.createBackgroundSubtractorMOG2() connectivity = 4 while(cap.isOpened()): ret, frame = cap.read() if not ret: break fgmask = fgbg.apply(frame) fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel) output = cv2.connectedComponentsWithStats( fgmask, connectivity, cv2.CV_32S) for i in range(output[0]): if output[2][i][4] >= min_thresh and output[2][i][4] <= max_thresh: cv2.rectangle(frame, (output[2][i][0], output[2][i][1]), ( output[2][i][0] + output[2][i][2], output[2][i][1] + output[2][i][3]), (0, 255, 0), 2) cv2.imshow('detection', frame) cap.release() cv2.destroyAllWindows()
def draw_circle(event,x,y,flags,param): global ix,iy,drawing,mode if event == cv2.EVENT_LBUTTONDOWN: drawing = True ix,iy = x,y elif event == cv2.EVENT_MOUSEMOVE: if drawing == True: if mode == True: cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1) else: cv2.circle(img,(x,y),5,(0,0,255),-1) elif event == cv2.EVENT_LBUTTONUP: drawing = False if mode == True: cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1) else: cv2.circle(img,(x,y),5,(0,0,255),-1)
def imgSeg(img): approx = imgSeg_contour(img, 4,4,4, 0.04) himg, wimg , _ = img.shape[:3] #h1, h2, w1, w2 = imgSeg_rect(approx, himg, wimg) h1, h2, w1, w2 = imgSeg_logo(approx, himg, wimg) if (w2-w1) < 20: approx = imgSeg_contour(img, 6, 6, 6, 0.02) himg, wimg , _ = img.shape[:3] #h1, h2, w1, w2 = imgSeg_rect(approx, himg, wimg) h1, h2, w1, w2 = imgSeg_logo(approx, himg, wimg) if (h2-h1) > (w2-w1): approx = imgSeg_contour(img, 2,2,2, 0.04) himg, wimg , _ = img.shape[:3] #h1, h2, w1, w2 = imgSeg_rect(approx, himg, wimg) h1, h2, w1, w2 = imgSeg_logo(approx, himg, wimg) #cv2.rectangle(img,(w1, h1), (w2,h2), 255, 2) return img[h1:h2, w1:w2,:]
def display(self, frame, face_locations): """ - Display results on screen with bboxes :param frame: window frame :return: window with resulting predictions on faces """ # Display the results scale = 1 if self.resize: scale = 4 if not len(face_locations) == 0: # nothing detected for (top, right, bottom, left) in face_locations: # Scale back up face locations since the frame we detected in was scaled to 1/4 size top * scale right * scale bottom * scale left * scale # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 255), 2) # else
def draw_circle( event, x,y,flags, param): global ix, iy, drawing, mode if event == cv2.EVENT_LBUTTONDOWN: drawing = True ix, iy = x, y elif event == cv2.EVENT_MOUSEMOVE: if drawing == True: if mode == True: cv2.rectangle( img, (ix, iy), (x,y),(0,255,0), 1) # -1 for last argument like CV_FILLED else: cv2.circle( img, (x,y), 5, (0,0,255), -1) elif event == cv2.EVENT_LBUTTONUP: drawing = False if mode == True: cv2.rectangle( img, (ix, iy), (x,y), (0,255,0), 1) else: cv2.circle(img, (x,y), 5, (0,0,255),-1)
def find_contour(self, img_src, Rxmin, Rymin, Rxmax, Rymax): cv2.rectangle(img_src, (Rxmax, Rymax), (Rxmin, Rymin), (0, 255, 0), 0) crop_res = img_src[Rymin: Rymax, Rxmin:Rxmax] grey = cv2.cvtColor(crop_res, cv2.COLOR_BGR2GRAY) _, thresh1 = cv2.threshold(grey, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) cv2.imshow('Thresh', thresh1) contours, hierchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) # draw contour on threshold image if len(contours) > 0: cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3) return contours, crop_res # Check ConvexHull and Convexity Defects
def DispID(x, y, w, h, NAME, Image): # --------------------------------- THE POSITION OF THE ID BOX --------------------------------------------- Name_y_pos = y - 10 Name_X_pos = x + w/2 - (len(NAME)*7/2) if Name_X_pos < 0: Name_X_pos = 0 elif (Name_X_pos +10 + (len(NAME) * 7) > Image.shape[1]): Name_X_pos= Name_X_pos - (Name_X_pos +10 + (len(NAME) * 7) - (Image.shape[1])) if Name_y_pos < 0: Name_y_pos = Name_y_pos = y + h + 10 # ------------------------------------ THE DRAWING OF THE BOX AND ID -------------------------------------- draw_box(Image, x, y, w, h) cv2.rectangle(Image, (Name_X_pos-10, Name_y_pos-25), (Name_X_pos +10 + (len(NAME) * 7), Name_y_pos-1), (0,0,0), -2) # Draw a Black Rectangle over the face frame cv2.rectangle(Image, (Name_X_pos-10, Name_y_pos-25), (Name_X_pos +10 + (len(NAME) * 7), Name_y_pos-1), WHITE, 1) cv2.putText(Image, NAME, (Name_X_pos, Name_y_pos - 10), cv2.FONT_HERSHEY_DUPLEX, .4, WHITE) # Print the name of the ID
def DispID2(x, y, w, h, NAME, Image): # --------------------------------- THE POSITION OF THE ID BOX ------------------------------------------------- Name_y_pos = y - 40 Name_X_pos = x + w/2 - (len(NAME)*7/2) if Name_X_pos < 0: Name_X_pos = 0 elif (Name_X_pos +10 + (len(NAME) * 7) > Image.shape[1]): Name_X_pos= Name_X_pos - (Name_X_pos +10 + (len(NAME) * 7) - (Image.shape[1])) if Name_y_pos < 0: Name_y_pos = Name_y_pos = y + h + 10 # ------------------------------------ THE DRAWING OF THE BOX AND ID -------------------------------------- cv2.rectangle(Image, (Name_X_pos-10, Name_y_pos-25), (Name_X_pos +10 + (len(NAME) * 7), Name_y_pos-1), (0,0,0), -2) # Draw a Black Rectangle over the face frame cv2.rectangle(Image, (Name_X_pos-10, Name_y_pos-25), (Name_X_pos +10 + (len(NAME) * 7), Name_y_pos-1), WHITE, 1) cv2.putText(Image, NAME, (Name_X_pos, Name_y_pos - 10), cv2.FONT_HERSHEY_DUPLEX, .4, WHITE) # Print the name of the ID # --------------- THIRD ID BOX ----------------------
def DispID3(x, y, w, h, NAME, Image): # --------------------------------- THE POSITION OF THE ID BOX ------------------------------------------------- Name_y_pos = y - 70 Name_X_pos = x + w/2 - (len(NAME)*7/2) if Name_X_pos < 0: Name_X_pos = 0 elif (Name_X_pos +10 + (len(NAME) * 7) > Image.shape[1]): Name_X_pos= Name_X_pos - (Name_X_pos +10 + (len(NAME) * 7) - (Image.shape[1])) if Name_y_pos < 0: Name_y_pos = Name_y_pos = y + h + 10 # ------------------------------------ THE DRAWING OF THE BOX AND ID -------------------------------------- cv2.rectangle(Image, (Name_X_pos-10, Name_y_pos-25), (Name_X_pos +10 + (len(NAME) * 7), Name_y_pos-1), (0,0,0), -2) # Draw a Black Rectangle over the face frame cv2.rectangle(Image, (Name_X_pos-10, Name_y_pos-25), (Name_X_pos +10 + (len(NAME) * 7), Name_y_pos-1), WHITE, 1) cv2.putText(Image, NAME, (Name_X_pos, Name_y_pos - 10), cv2.FONT_HERSHEY_DUPLEX, .4, WHITE) # Print the name of the ID
def plot_face_bb(p, bb, scale=True, path=True, plot=True): if path: im = cv2.imread(p) else: im = cv2.cvtColor(p, cv2.COLOR_RGB2BGR) if scale: h, w, _ = im.shape cv2.rectangle(im, (int(bb[0] * h), int(bb[1] * w)), (int(bb[2] * h), int(bb[3] * w)), (255, 255, 0), thickness=4) # print bb * np.asarray([h, w, h, w]) else: cv2.rectangle(im, (int(bb[0]), int(bb[1])), (int(bb[2]), int(bb[3])), (255, 255, 0), thickness=4) print "no" if plot: plt.figure() plt.imshow(im[:, :, ::-1]) else: return im[:, :, ::-1]
def vis_detections(im, class_name, dets, thresh=0.5): """Draw detected bounding boxes.""" inds = np.where(dets[:, -1] >= thresh)[0] if len(inds) == 0: return for i in inds: bbox = dets[i, :4] score = dets[i, -1] #Create Rectangle and Text using OpenCV #print ('ClassName:', class_name, 'bbox:', bbox, 'score:' ,score) #Draw the Rectangle cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 3) #Draw the Text cv2.putText(im, class_name + ' ' + str(score), (bbox[0], bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2, cv2.LINE_AA) #Show Image #cv2.imshow("Detect Result", im)
def contour_to_monitor_coords(screenCnt): '''Apply pyimagesearch algorithm to identify tl,tr,br,bl points from a contour''' # now that we have our screen contour, we need to determine # the top-left, top-right, bottom-right, and bottom-left # points so that we can later warp the image -- we'll start # by reshaping our contour to be our finals and initializing # our output rectangle in top-left, top-right, bottom-right, # and bottom-left order pts = screenCnt.reshape(4, 2) rect = np.zeros((4, 2), dtype = "float32") # the top-left point has the smallest sum whereas the # bottom-right has the largest sum s = pts.sum(axis = 1) rect[0] = pts[np.argmin(s)] rect[2] = pts[np.argmax(s)] # compute the difference between the points -- the top-right # will have the minumum difference and the bottom-left will # have the maximum difference diff = np.diff(pts, axis = 1) rect[1] = pts[np.argmin(diff)] rect[3] = pts[np.argmax(diff)] return rect
def draw(self, image): if len(self.tilesByOrder) == 0: cv2.imshow("image", image) for tile in self.tilesByOrder: cv2.rectangle(image, (tile.wx, tile.wy), (tile.wx + tile.w, tile.wy + tile.h), (0, 255, 0), 1) #Left bezel cv2.rectangle(image, (tile.wx - tile.l, tile.wy), (tile.wx, tile.wy + tile.h), (40, 255, 40), -1) #Top bezel cv2.rectangle(image, (tile.wx - tile.l, tile.wy - tile.t), (tile.wx + tile.w, tile.wy), (40, 255, 40), -1) #Right bezel cv2.rectangle(image, (tile.wx + tile.w, tile.wy - tile.t), (tile.wx + tile.w + tile.r, tile.wy + tile.h), (40, 255, 40), -1) #Bottom bezel cv2.rectangle(image, (tile.wx - tile.l, tile.wy + tile.h), (tile.wx + tile.w + tile.r, tile.wy + tile.h + tile.b), (40, 255, 40), -1) cv2.imshow("image", image)
def update_vis(self): ims = self.opt_engine.get_images(self.frame_id) if ims is not None: self.ims = ims if self.ims is None: return ims_show = [] n_imgs = self.ims.shape[0] for n in range(n_imgs): # im = ims[n] im_s = cv2.resize(self.ims[n], (self.width, self.width), interpolation=cv2.INTER_CUBIC) if n == self.select_id and self.topK > 1: t = 3 # thickness cv2.rectangle(im_s, (t, t), (self.width - t, self.width - t), (0, 255, 0), t) im_s = im_s[np.newaxis, ...] ims_show.append(im_s) if ims_show: ims_show = np.concatenate(ims_show, axis=0) g_tmp = utils.grid_vis(ims_show, self.grid_size[1], self.grid_size[0]) # (nh, nw) self.vis_results = g_tmp.copy() self.update()
def dispact_and_update(img, hack, base_im, x, y, w, h): try: myurl = "http://facejack.westeurope.cloudapp.azure.com:5001/imsend" headers = { 'content-type': "application/x-www-form-urlencoded", 'cache-control': "no-cache" } r = requests.post(url=myurl, data=img, headers=headers, params={'hack': str(hack)}).json() reply = 'authentication' in r and r['authentication'] == "ALLOWED" disp_face = cv2.resize(base_im[y:y + h, x:x + w], (224, 224), 0, 0, cv2.INTER_LANCZOS4) if reply: cv2.rectangle(disp_face, (0, 0), (222, 222), (0, 255, 0), 2) else: cv2.rectangle(disp_face, (0, 0), (222, 222), (0, 0, 255), 2) cv2.imshow("Face", disp_face) finally: myl.release()
def get_annotated_cv_image(cv_image, recognitions): """ Gets an annotated CV image based on recognitions, drawin using cv.rectangle :param cv_image: Original cv image :param recognitions: List of recognitions :return: Annotated image """ annotated_cv_image = cv_image.copy() c_map = color_map(N=len(recognitions), normalized=True) for i, recognition in enumerate(recognitions): x_min, y_min = recognition.roi.x_offset, recognition.roi.y_offset x_max, y_max = x_min + recognition.roi.width, y_min + recognition.roi.height cv2.rectangle(annotated_cv_image, (x_min, y_min), (x_max, y_max), (c_map[i, 2] * 255, c_map[i, 1] * 255, c_map[i, 0] * 255), 10) return annotated_cv_image
def click_and_crop(event, x, y, flags, param): global bbs, x_upper, id if event == cv2.EVENT_LBUTTONDOWN: if x_upper: bbs.append([x,y,0,0, 0,0,0,0]) else: bbs[-1][4] = x bbs[-1][5] = y elif event == cv2.EVENT_LBUTTONUP: if x_upper: bbs[-1][2] = abs(x - bbs[-1][0]) bbs[-1][3] = abs(y - bbs[-1][1]) bbs[-1][0] = min(x, bbs[-1][0]) bbs[-1][1] = min(y, bbs[-1][1]) cv2.rectangle(image, (bbs[-1][0],bbs[-1][1]), (bbs[-1][0]+bbs[-1][2],bbs[-1][1]+bbs[-1][3]), (0,0,255), 2) #cv2.putText(image, 'Upper %d' % id, (bbs[-1][0],bbs[-1][1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,255)) else: bbs[-1][6] = abs(x - bbs[-1][4]) bbs[-1][7] = abs(y - bbs[-1][5]) bbs[-1][4] = min(x, bbs[-1][4]) bbs[-1][5] = min(y, bbs[-1][5]) cv2.rectangle(image, (bbs[-1][4],bbs[-1][5]), (bbs[-1][4]+bbs[-1][6],bbs[-1][5]+bbs[-1][7]), (0,255,0), 2) cv2.putText(image, 'Body %d' % id, (bbs[-1][4],bbs[-1][5]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,255,0)) cv2.imshow("image", image) x_upper = not x_upper
def show(im, allobj, S, w, h, cellx, celly): for obj in allobj: a = obj[5] % S b = obj[5] // S cx = a + obj[1] cy = b + obj[2] centerx = cx * cellx centery = cy * celly ww = obj[3]**2 * w hh = obj[4]**2 * h cv2.rectangle(im, (int(centerx - ww/2), int(centery - hh/2)), (int(centerx + ww/2), int(centery + hh/2)), (0,0,255), 2) cv2.imshow("result", im) cv2.waitKey() cv2.destroyAllWindows()
def findbodies(image, faces): bodies = np.zeros_like(faces) bodiesindex = 0 #for each face, draw a body for (x, y, facewidth, faceheight) in faces: #3*faceheight, 7/3 * facewidth, .5*faceheight below the face. bodyheight = 3 * faceheight bodywidth = 7/3 * facewidth y_body = y + faceheight + .5 * faceheight x_body = x + .5 * facewidth - .5 * bodywidth bodies[bodiesindex] = (x_body,y_body, bodywidth, bodyheight) bodiesindex = bodiesindex + 1 #cv2.rectangle(image, (x_body, y_body), (x_body+bodywidth, y_body+bodyheight), (0, 255, 0), 2) return bodies
def verify_sizes(rectangle): # print candidate # help(cv2.minAreaRect) (x, y), (width, height), rect_angle = rectangle # Calculate angle and discard rects that has been rotated more than 15 degrees angle = 90 - rect_angle if (width < height) else -rect_angle if 15 < abs(angle) < 165: # 180 degrees is maximum return False # We make basic validations about the regions detected based on its area and aspect ratio. # We only consider that a region can be a plate if the aspect ratio is approximately 520/110 = 4.727272 # (plate width divided by plate height) with an error margin of 40 percent # and an area based on a minimum of 15 pixels and maximum of 125 pixels for the height of the plate. # These values are calculated depending on the image sizes and camera position: area = height * width if height == 0 or width == 0: return False if not satisfy_ratio(area, width, height): return False return True
def make_mouse_callback(imgs, ref_pt): # initialize the list of reference points and boolean indicating # whether cropping is being performed or not cropping = [False] clone = imgs[0] def _click_and_crop(event, x, y, flags, param): # grab references to the global variables # global ref_pt, cropping # if the left mouse button was clicked, record the starting # (x, y) coordinates and indicate that cropping is being # performed if event == cv2.EVENT_LBUTTONDOWN: ref_pt[0] = (x, y) cropping[0] = True # check to see if the left mouse button was released elif event == cv2.EVENT_LBUTTONUP: # record the ending (x, y) coordinates and indicate that # the cropping operation is finished ref_pt[1] = (x, y) cropping[0] = False # draw a rectangle around the region of interest imgs[1] = image = clone.copy() cv2.rectangle(image, ref_pt[0], ref_pt[1], (0, 255, 0), 2) cv2.imshow("image", image) elif event == cv2.EVENT_MOUSEMOVE and cropping[0]: img2 = clone.copy() cv2.rectangle(img2, ref_pt[0], (x, y), (0, 255, 0), 2) imgs[1] = image = img2 cv2.imshow("image", image) return _click_and_crop
def draw_boxes(im, bboxes, is_display=True, color=None, caption="Image", wait=True): """ boxes: bounding boxes """ im=im.copy() for box in bboxes: if color==None: if len(box)==5 or len(box)==9: c=tuple(cm.jet([box[-1]])[0, 2::-1]*255) else: c=tuple(np.random.randint(0, 256, 3)) else: c=color cv2.rectangle(im, tuple(box[:2]), tuple(box[2:4]), c) if is_display: cv2.imshow(caption, im) if wait: cv2.waitKey(0) return im
def find_faces(self, image, draw_box=False): """Uses a haarcascade to detect faces inside an image. Args: image: The image. draw_box: If True, the image will be marked with a rectangle. Return: The faces as returned by OpenCV's detectMultiScale method for cascades. """ frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) faces = self.cascade.detectMultiScale( frame_gray, scaleFactor=1.3, minNeighbors=5, minSize=(50, 50), flags=0) if draw_box: for x, y, w, h in faces: cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) return faces
def cropCircle(img, resize=None): if resize: if (img.shape[0] > img.shape[1]): tile_size = (int(img.shape[1] * resize / img.shape[0]), resize) else: tile_size = (resize, int(img.shape[0] * resize / img.shape[1])) img = cv2.resize(img, dsize=tile_size, interpolation=cv2.INTER_CUBIC) else: tile_size = img.shape gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY); _, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY) _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) main_contour = sorted(contours, key=cv2.contourArea, reverse=True)[0] ff = np.zeros((gray.shape[0], gray.shape[1]), 'uint8') cv2.drawContours(ff, main_contour, -1, 1, 15) ff_mask = np.zeros((gray.shape[0] + 2, gray.shape[1] + 2), 'uint8') cv2.floodFill(ff, ff_mask, (int(gray.shape[1] / 2), int(gray.shape[0] / 2)), 1) rect = maxRect(ff) rectangle = [min(rect[0], rect[2]), max(rect[0], rect[2]), min(rect[1], rect[3]), max(rect[1], rect[3])] img_crop = img[rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]] cv2.rectangle(ff, (min(rect[1], rect[3]), min(rect[0], rect[2])), (max(rect[1], rect[3]), max(rect[0], rect[2])), 3, 2) return [img_crop, rectangle, tile_size]
def draw_result(out, im_scale, clss, bbox, nms_thresh, conf): CV_AA = 16 for cls_id in range(1, 21): _cls = clss[:, cls_id][:, np.newaxis] _bbx = bbox[:, cls_id * 4: (cls_id + 1) * 4] dets = np.hstack((_bbx, _cls)) keep = nms(dets, nms_thresh) dets = dets[keep, :] inds = np.where(dets[:, -1] >= conf)[0] for i in inds: x1, y1, x2, y2 = map(int, dets[i, :4]) cv.rectangle(out, (x1, y1), (x2, y2), (0, 0, 255), 2, CV_AA) ret, baseline = cv.getTextSize( CLASSES[cls_id], cv.FONT_HERSHEY_SIMPLEX, 0.8, 1) cv.rectangle(out, (x1, y2 - ret[1] - baseline), (x1 + ret[0], y2), (0, 0, 255), -1) cv.putText(out, CLASSES[cls_id], (x1, y2 - baseline), cv.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 1, CV_AA) return out
def test_generate_proposals(self): self.assertEqual(self.total_anchors, len(self.shifts) * self.anchor_target_layer.anchors.shape[0]) min_x = self.all_anchors[:, 0].min() min_y = self.all_anchors[:, 1].min() max_x = self.all_anchors[:, 2].max() max_y = self.all_anchors[:, 3].max() canvas = np.zeros( (int(abs(min_y) + max_y) + 1, int(abs(min_x) + max_x) + 1), dtype=np.uint8) self.all_anchors[:, 0] -= min_x self.all_anchors[:, 1] -= min_y self.all_anchors[:, 2] -= min_x self.all_anchors[:, 3] -= min_y for anchor in self.all_anchors: anchor = list(six.moves.map(int, anchor)) cv.rectangle( canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255) cv.imwrite('tests/all_anchors.png', canvas)
def test_keep_inside(self): inds_inside, anchors = self.inds_inside, self.anchors min_x = anchors[:, 0].min() min_y = anchors[:, 1].min() max_x = anchors[:, 2].max() max_y = anchors[:, 3].max() canvas = np.zeros( (int(max_y - min_y) + 1, int(max_x - min_x) + 1), dtype=np.uint8) anchors[:, 0] -= min_x anchors[:, 1] -= min_y anchors[:, 2] -= min_x anchors[:, 3] -= min_y for i, anchor in enumerate(anchors): anchor = list(six.moves.map(int, anchor)) _canvas = np.zeros( (int(max_y - min_y) + 1, int(max_x - min_x) + 1), dtype=np.uint8) cv.rectangle( _canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255) cv.rectangle( canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255) cv.imwrite('tests/anchors_inside_{}.png'.format(i), _canvas) cv.imwrite('tests/anchors_inside.png'.format(i), canvas)
def test_initial_pass_through_compare(self): original = cv2.imread(os.path.join(self.provider.assets, "start_screen.png")) against = self.provider.get_img_from_screen_shot() wrong = cv2.imread(os.path.join(self.provider.assets, "battle.png")) # convert the images to grayscale original = mask_image([127], [255], cv2.cvtColor(original, cv2.COLOR_BGR2GRAY), True) against = mask_image([127], [255], cv2.cvtColor(against, cv2.COLOR_BGR2GRAY), True) wrong = mask_image([127], [255], cv2.cvtColor(wrong, cv2.COLOR_BGR2GRAY), True) # initialize the figure (score, diff) = compare_ssim(original, against, full=True) diff = (diff * 255).astype("uint8") self.assertTrue(score > .90, 'If this is less then .90 the initial compare of the app will fail') (score, nothing) = compare_ssim(original, wrong, full=True) self.assertTrue(score < .90) if self.__debug_pictures__: # threshold the difference image, followed by finding contours to # obtain the regions of the two input images that differ thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] # loop over the contours for c in cnts: # compute the bounding box of the contour and then draw the # bounding box on both input images to represent where the two # images differ (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(original, (x, y), (x + w, y + h), (0, 0, 255), 2) cv2.rectangle(against, (x, y), (x + w, y + h), (0, 0, 255), 2) # show the output images diffs = ("Original", original), ("Modified", against), ("Diff", diff), ("Thresh", thresh) images = ("Original", original), ("Against", against), ("Wrong", wrong) self.setup_compare_images(diffs) self.setup_compare_images(images)
def detect(imgfile): origimg = cv2.imread(imgfile) img = preprocess(origimg) img = img.astype(np.float32) img = img.transpose((2, 0, 1)) net.blobs['data'].data[...] = img out = net.forward() box, conf, cls = postprocess(origimg, out) for i in range(len(box)): p1 = (box[i][0], box[i][1]) p2 = (box[i][2], box[i][3]) cv2.rectangle(origimg, p1, p2, (0,255,0)) p3 = (max(p1[0], 15), max(p1[1], 15)) title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i]) cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1) cv2.imshow("SSD", origimg) k = cv2.waitKey(0) & 0xff #Exit if ESC pressed if k == 27 : return False return True
def click_and_crop(event, x, y, flags, param): # grab references to the global variables global refPt, cropping, i # if the left mouse button was clicked, record the starting # (x, y) coordinates and indicate that cropping is being # performed if event == cv2.EVENT_LBUTTONDOWN: if refPt == []: refPt = [(x, y)] else: refPt.append((x,y)) cropping = True i += 1 if event == cv2.EVENT_MOUSEMOVE and cropping: image2 = image.copy() cv2.rectangle(image2, refPt[2*i-2], (x,y), (0,255,0), 2) cv2.imshow("image",image2) # check to see if the left mouse button was released elif event == cv2.EVENT_LBUTTONUP: # record the ending (x, y) coordinates and indicate that # the cropping operation is finished refPt.append((x, y)) cropping = False # draw a rectangle around the region of interest cv2.rectangle(image, refPt[2*i-2], refPt[2*i-1], (0, 255, 0), 2) # cv2.rectangle(image2, refPt[2*i-2], refPt[2*i-1], (0, 255, 0), 2) cv2.imshow("image", image) # construct the argument parser and parse the arguments
def showRegions(self): output = self.origin_image.copy() for r in range(0, np.shape(self.regions)[0]): rect = self.regions[r] cv2.rectangle(output, (rect[0],rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0, 255, 0), 2) cv2.rectangle(output, (rect[0],rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (255, 0, 0), 1) return output #-------------------------------------------------------- #-------------------------------------------------------- # Class provide an interface to perform OCR
def draw_rects(img, rects, color): """ ????????????? :param img: :param rects: :param color: :return: """ for x, y, w, h in rects: face = img[x:x+w,y:y+h] face = cv2.resize(face,(224,224)) if gender.predict(face)==1: text = "Male" else: text = "Female" cv2.rectangle(img, (x, y), (w, h), color, 2) cv2.putText(img, text, (x, h), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (255, 255, 255), lineType=cv2.LINE_AA)
def draw_labeled_bboxes(img, labels): """ Draw the boxes around detected object. """ # Iterate through all detected cars for car_number in range(1, labels[1]+1): # Find pixels with each car_number label value nonzero = (labels[0] == car_number).nonzero() # Identify x and y values of those pixels nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Define a bounding box based on min/max x and y bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy))) # Draw the box on the image cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6) return img
def draw_countdown(self, frame): # Draw the count "3..". countdown_x_offset = 1 + self.countdown # Offset from left edge countdown_x = int(self.screenwidth - (self.screenwidth / 5) * countdown_x_offset) self.overlay = frame.copy() countdown_panel_y1 = int(self.screenheight * (4. / 5)) cv2.rectangle(self.overlay, (0, countdown_panel_y1), (self.screenwidth, self.screenheight), (224, 23, 101), -1) cv2.addWeighted(self.overlay, OPACITY, frame, 1 - OPACITY, 0, frame) countdown_y_offset = 20 countdown_y = int((self.screenheight * 7. / 8) + countdown_y_offset) countdown_coord = (countdown_x, countdown_y) draw_text(countdown_coord, frame, str(self.countdown)) return frame
def random_augment_image(image, row): # start0_max, end0_max, start1_max, end1_max = get_bounding_boxes_positions(image, row) # image = cv2.rectangle(image, (int(start1_max), int(start0_max)), (int(end1_max), int(end0_max)), (0, 0, 255), thickness=5) if random.randint(0, 1) == 0: image = return_random_crop(image, row) else: image = return_random_perspective(image, row) image = random_rotate(image) # all possible mirroring and flips (in total there are only 8 possible configurations) mirror = random.randint(0, 1) if mirror != 0: image = image[::-1, :, :] angle = random.randint(0, 3) if angle != 0: image = np.rot90(image, k=angle) image = lightning_change(image) image = blur_image(image) return image
def grabcutbb(im, bbv): mask = np.full(im.shape[:2],cv2.GC_PR_BGD,np.uint8) for bb in bbv: if bb[4]: cv2.rectangle(mask, (bb[0], bb[1]), (bb[2], bb[3]), int(cv2.GC_FGD), -1) else: cv2.rectangle(mask, (bb[0], bb[1]), (bb[2], bb[3]), int(cv2.GC_BGD), -1) bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) rect = (0, im.shape[:2][0]/2, im.shape[:2][1], im.shape[:2][0]) cv2.grabCut(im, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK) mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8') return mask2