我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.FONT_HERSHEY_SIMPLEX。
def draw_rects(img, rects): """ ????????????? :param img: :param rects: :return: """ for x, y, w, h in rects: cv2.rectangle(img, (x, y), (x+w, y+h), (255, 255, 00), 2) face = img face = cv2.resize(face,(224,224)) if Gender.predict(face)==1: text = "Male" else: text = "Female" cv2.putText(img, text, (x, h), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
def test_image(addr): target = ['angry','disgust','fear','happy','sad','surprise','neutral'] font = cv2.FONT_HERSHEY_SIMPLEX im = cv2.imread(addr) gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1) for (x, y, w, h) in faces: cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5) face_crop = im[y:y+h,x:x+w] face_crop = cv2.resize(face_crop,(48,48)) face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY) face_crop = face_crop.astype('float32')/255 face_crop = np.asarray(face_crop) face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1]) result = target[np.argmax(model.predict(face_crop))] cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA) cv2.imshow('result', im) cv2.imwrite('result.jpg',im) cv2.waitKey(0)
def extract_checkerboard_and_draw_corners(self, image, chbrd_size): image = CvBridge().imgmsg_to_cv2(image, 'mono8') image_color = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) ret, corners = cv2.findChessboardCorners(image_color, chbrd_size) if not ret: cv2.putText(image_color, 'Checkerboard not found', (0, self.res_height - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255)) cv2.drawChessboardCorners(image_color, chbrd_size, corners, ret) return ret, corners, image_color
def _stampText(image, text, line): font = cv2.FONT_HERSHEY_SIMPLEX font_scale = 0.55 margin = 5 thickness = 2 color = (255, 255, 255) size = cv2.getTextSize(text, font, font_scale, thickness) text_width = size[0][0] text_height = size[0][1] line_height = text_height + size[1] + margin x = image.shape[1] - margin - text_width y = margin + size[0][1] + line * line_height cv2.putText(image, text, (x, y), font, font_scale, color, thickness)
def _draw_box(im, box_list, label_list, color=(0,255,0), cdict=None, form='center'): assert form == 'center' or form == 'diagonal', \ 'bounding box format not accepted: {}.'.format(form) for bbox, label in zip(box_list, label_list): if form == 'center': bbox = bbox_transform(bbox) xmin, ymin, xmax, ymax = [int(b) for b in bbox] l = label.split(':')[0] # text before "CLASS: (PROB)" if cdict and l in cdict: c = cdict[l] else: c = color # draw box cv2.rectangle(im, (xmin, ymin), (xmax, ymax), c, 1) # draw label font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(im, label, (xmin, ymax), font, 0.3, c, 1)
def add_text(img, text, text_top, image_scale): """ Args: img (numpy array of shape (width, height, 3): input image text (str): text to add to image text_top (int): location of top text to add image_scale (float): image resize scale Summary: Add display text to a frame. Returns: Next available location of top text (allows for chaining this function) """ cv2.putText( img=img, text=text, org=(0, text_top), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.15 * image_scale, color=(255, 255, 255)) return text_top + int(5 * image_scale)
def save_all_detection(im_array, detections, imdb_classes=None, thresh=0.7): """ save all detections in one image with result.png :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param imdb_classes: list of names in imdb :param thresh: threshold for valid detections :return: """ import random im = image_processing.transform_inverse(im_array, config.PIXEL_MEANS) im = im[:, :, ::-1].copy() # back to b,g,r for j in range(1, len(imdb_classes)): color = (255*random.random(), 255*random.random(), 255*random.random()) # generate a random color dets = detections[j] for i in range(dets.shape[0]): bbox = dets[i, :4] score = dets[i, -1] if score > thresh: cv2.rectangle(im, (int(round(bbox[0])), int(round(bbox[1]))), (int(round(bbox[2])), int(round(bbox[3]))), color, 2) cv2.putText(im, '%s'%imdb_classes[j], (bbox[0], bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2) cv2.imwrite("result.jpg", im)
def draw_markers(img,markers): for m in markers: centroid = np.array(m['centroid'],dtype=np.float32) origin = np.array(m['verts'][0],dtype=np.float32) hat = np.array([[[0,0],[0,1],[.5,1.25],[1,1],[1,0]]],dtype=np.float32) hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m)) if m['id_confidence']>.9: cv2.polylines(img,np.int0(hat),color = (0,0,255),isClosed=True) else: cv2.polylines(img,np.int0(hat),color = (0,255,0),isClosed=True) # cv2.polylines(img,np.int0(centroid),color = (255,255,int(255*m['id_confidence'])),isClosed=True,thickness=2) m_str = 'id: {:d}'.format(m['id']) org = origin.copy() # cv2.rectangle(img, tuple(np.int0(org+(-5,-13))[0,:]), tuple(np.int0(org+(100,30))[0,:]),color=(0,0,0),thickness=-1) cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255)) if 'id_confidence' in m: m_str = 'idc: {:.3f}'.format(m['id_confidence']) org += (0, 12) cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255)) if 'loc_confidence' in m: m_str = 'locc: {:.3f}'.format(m['loc_confidence']) org += (0, 12 ) cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255)) if 'frames_since_true_detection' in m: m_str = 'otf: {}'.format(m['frames_since_true_detection']) org += (0, 12 ) cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255)) if 'opf_vel' in m: m_str = 'otf: {}'.format(m['opf_vel']) org += (0, 12 ) cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
def vis_detections(im, class_name, dets, thresh=0.5): """Draw detected bounding boxes.""" inds = np.where(dets[:, -1] >= thresh)[0] if len(inds) == 0: return for i in inds: bbox = dets[i, :4] score = dets[i, -1] #Create Rectangle and Text using OpenCV #print ('ClassName:', class_name, 'bbox:', bbox, 'score:' ,score) #Draw the Rectangle cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 3) #Draw the Text cv2.putText(im, class_name + ' ' + str(score), (bbox[0], bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2, cv2.LINE_AA) #Show Image #cv2.imshow("Detect Result", im)
def click_and_crop(event, x, y, flags, param): global bbs, x_upper, id if event == cv2.EVENT_LBUTTONDOWN: if x_upper: bbs.append([x,y,0,0, 0,0,0,0]) else: bbs[-1][4] = x bbs[-1][5] = y elif event == cv2.EVENT_LBUTTONUP: if x_upper: bbs[-1][2] = abs(x - bbs[-1][0]) bbs[-1][3] = abs(y - bbs[-1][1]) bbs[-1][0] = min(x, bbs[-1][0]) bbs[-1][1] = min(y, bbs[-1][1]) cv2.rectangle(image, (bbs[-1][0],bbs[-1][1]), (bbs[-1][0]+bbs[-1][2],bbs[-1][1]+bbs[-1][3]), (0,0,255), 2) #cv2.putText(image, 'Upper %d' % id, (bbs[-1][0],bbs[-1][1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,255)) else: bbs[-1][6] = abs(x - bbs[-1][4]) bbs[-1][7] = abs(y - bbs[-1][5]) bbs[-1][4] = min(x, bbs[-1][4]) bbs[-1][5] = min(y, bbs[-1][5]) cv2.rectangle(image, (bbs[-1][4],bbs[-1][5]), (bbs[-1][4]+bbs[-1][6],bbs[-1][5]+bbs[-1][7]), (0,255,0), 2) cv2.putText(image, 'Body %d' % id, (bbs[-1][4],bbs[-1][5]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,255,0)) cv2.imshow("image", image) x_upper = not x_upper
def draw_result(out, im_scale, clss, bbox, nms_thresh, conf): CV_AA = 16 for cls_id in range(1, 21): _cls = clss[:, cls_id][:, np.newaxis] _bbx = bbox[:, cls_id * 4: (cls_id + 1) * 4] dets = np.hstack((_bbx, _cls)) keep = nms(dets, nms_thresh) dets = dets[keep, :] inds = np.where(dets[:, -1] >= conf)[0] for i in inds: x1, y1, x2, y2 = map(int, dets[i, :4]) cv.rectangle(out, (x1, y1), (x2, y2), (0, 0, 255), 2, CV_AA) ret, baseline = cv.getTextSize( CLASSES[cls_id], cv.FONT_HERSHEY_SIMPLEX, 0.8, 1) cv.rectangle(out, (x1, y2 - ret[1] - baseline), (x1 + ret[0], y2), (0, 0, 255), -1) cv.putText(out, CLASSES[cls_id], (x1, y2 - baseline), cv.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 1, CV_AA) return out
def draw_rects(img, rects, color): """ ????????????? :param img: :param rects: :param color: :return: """ for x, y, w, h in rects: face = img[x:x+w,y:y+h] face = cv2.resize(face,(224,224)) if gender.predict(face)==1: text = "Male" else: text = "Female" cv2.rectangle(img, (x, y), (w, h), color, 2) cv2.putText(img, text, (x, h), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (255, 255, 255), lineType=cv2.LINE_AA)
def facial_landmark_detection(image, detector, predictor, file): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) img_size = gray.shape landmark_faces = detector(gray, 1) faces = list() area = 0 face_idx = 0 bItr = False for (idx, landmark_faces) in enumerate(landmark_faces): shape = predictor(gray, landmark_faces) shape = shape_to_np(shape) (x, y, w, h) = rect_to_bb(landmark_faces, img_size, file) if (w * h) > area: area = w * h faces = [x, y, w, h] bItr = True #cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2) #cv2.putText(image, "Face #{}".format(idx + 1), (x - 10, y - 10), \ # cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) #for (x, y) in shape: # cv2.circle(image, (x, y), 1, (0, 0, 255), -1) return bItr, faces
def _draw_boxes_to_image(im, res): colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\ (151, 0, 255), (243, 223, 48), (0, 117, 255),\ (58, 184, 14), (86, 67, 140), (121, 82, 6),\ (174, 29, 128), (115, 154, 81), (86, 255, 234)] font = cv2.FONT_HERSHEY_SIMPLEX image = np.copy(im) cnt = 0 for ind, r in enumerate(res): if r['dets'] is None: continue dets = r['dets'] for i in range(0, dets.shape[0]): (x1, y1, x2, y2, score) = dets[i, :] cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), colors[ind % len(colors)], 2) text = '{:s} {:.2f}'.format(r['class'], score) cv2.putText(image, text, (x1, y1), font, 0.6, colors[ind % len(colors)], 1) cnt = (cnt + 1) return image
def _draw_on_image(img, objs, class_sets_dict): colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\ (151, 0, 255), (243, 223, 48), (0, 117, 255),\ (58, 184, 14), (86, 67, 140), (121, 82, 6),\ (174, 29, 128), (115, 154, 81), (86, 255, 234)] font = cv2.FONT_HERSHEY_SIMPLEX for ind, obj in enumerate(objs): if obj['box'] is None: continue x1, y1, x2, y2 = obj['box'].astype(int) cls_id = class_sets_dict[obj['class']] if obj['class'] == 'dontcare': cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 1) continue cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), colors[cls_id % len(colors)], 1) text = '{:s}*|'.format(obj['class'][:3]) if obj['difficult'] == 1 else '{:s}|'.format(obj['class'][:3]) text += '{:.1f}|'.format(obj['truncation']) text += str(obj['occlusion']) cv2.putText(img, text, (x1-2, y2-2), font, 0.5, (255, 0, 255), 1) return img
def my_draw_box(im, box_list, label_list, color=(0,255,0), cdict=None, label_placement='bottom'): assert label_placement == 'bottom' or label_placement == 'top', \ 'label_placement format not accepted: {}.'.format(label_placement) for bbox, label in zip(box_list, label_list): xmin, ymin, xmax, ymax = [int(b) for b in bbox] l = label.split(':')[0] # text before "CLASS: (PROB)" if cdict and l in cdict: c = cdict[l] else: c = color # draw box cv2.rectangle(im, (xmin, ymin), (xmax, ymax), c, 1) # draw label font = cv2.FONT_HERSHEY_SIMPLEX if label_placement == 'bottom': cv2.putText(im, label, (xmin, ymax), font, 0.3, c, 1) else: cv2.putText(im, label, (xmin, ymin), font, 0.3, c, 1)
def display_shape(): global shape if shape == 0: cv2.putText(obj, 'Off', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2) elif shape == 1: cv2.putText(obj, 'Pencil', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2) elif shape == 2: cv2.putText(obj, 'Brush', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2) elif shape == 3: cv2.putText(obj, 'Eraser', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2) elif shape == 4: cv2.putText(obj, 'Line', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2) elif shape == 5: cv2.putText(obj, 'Rectangle', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2) elif shape == 6: cv2.putText(obj, 'Circle', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
def drawAxis(camera_parameters, markers, frame): axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3) mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff for marker in markers: rvec, tvec = marker.rvec, marker.tvec imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist) corners = marker.corners corner = tuple(corners[0].ravel()) cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2) cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2) cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA) cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA) cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)
def _renderResultOnImage(self, result, arr): """ Draws boxes and text representing each face's emotion. """ import operator, cv2 img = cv2.cvtColor(cv2.imdecode(arr, -1), cv2.COLOR_BGR2RGB) for currFace in result: faceRectangle = currFace['faceRectangle'] cv2.rectangle(img,(faceRectangle['left'],faceRectangle['top']), (faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']), color = (255,0,0), thickness = 5) for currFace in result: faceRectangle = currFace['faceRectangle'] currEmotion = max(iter(currFace['scores'].items()), key=operator.itemgetter(1))[0] textToWrite = '{0}'.format(currEmotion) cv2.putText(img, textToWrite, (faceRectangle['left'],faceRectangle['top']-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1) return img
def photoRead(filename): frame = cv2.imread(filename) FaceArray = getFaceArray(frame) for r in FaceArray: img2 = cv2.rectangle(frame, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3) img3 = frame[r[1]:r[3], r[0]:r[2]] # ????????????? feature = Tools.get_feature(img3) name = readFace(feature) font = cv2.FONT_HERSHEY_SIMPLEX img2 = cv2.putText(img2, name, (r[1], r[3]), font, 1, (255, 255, 255), 2) cv2.imshow('frame', frame) cv2.waitKey(0)
def start(): cap = cv2.VideoCapture(0) while(True): # Capture frame-by-frame ret, frame = cap.read() FaceArray=getFaceArray(frame) img2=frame for r in FaceArray : img2=cv2.rectangle(frame, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3) img3 = frame[r[1]:r[3], r[0]:r[2]] # ????????????? feature=Tools.get_feature(img3) name=readFace(feature) font=cv2.FONT_HERSHEY_SIMPLEX img2= cv2.putText(img2,name,(r[1],r[3]), font, 1,(255,255,255),2) cv2.imshow('frame',img2) if cv2.waitKey(1) & 0xFF == ord('q'): break
def renderResultOnImage( result, img ): """Display the obtained results onto the input image""" for currFace in result: faceRectangle = currFace['faceRectangle'] cv2.rectangle( img,(faceRectangle['left'],faceRectangle['top']), (faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']), color = (255,0,0), thickness = 5 ) for currFace in result: faceRectangle = currFace['faceRectangle'] currEmotion = max(currFace['scores'].items(), key=operator.itemgetter(1))[0] textToWrite = "%s" % ( currEmotion ) cv2.putText( img, textToWrite, (faceRectangle['left'],faceRectangle['top']-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1 ) # img = os.path.expanduser('~/Development/sentiEdu/learning/confusedImgs/4.jpg')
def vis_detections(im, class_name, dets, thresh=0.5): """Draw detected bounding boxes.""" inds = np.where(dets[:, -1] >= thresh)[0] if len(inds) == 0: return l_bboxes = [] for i in inds: bbox = dets[i, :4] score = dets[i, -1] print ('Det: (x_min,y_min,W,H) = ({},{},{},{}), class_name = {:s}, score = {:.3f}').format( int(bbox[0]),int(bbox[1]),int(bbox[2]-bbox[0]),int(bbox[3]-bbox[1]),class_name,score) cv2.rectangle(im, (bbox[0], bbox[3]),(bbox[2],bbox[1]), (0,255,0),2) cv2.putText(im,'{:s}:{:.3f}'.format(class_name, score), (int(bbox[0]), int(bbox[1]) - 3), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0),2) l_bboxes.append({'x_min':int(bbox[0]),'y_min':int(bbox[1]),'x_max':bbox[2],'y_max':bbox[3],'cls':class_name,'score':score}) return l_bboxes
def draw_on_detected(frame, rects, timestamp): # Draw the bounding box on the frame for (x, y, w, h) in rects: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # draw the text and timestamp on the frame ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") cv2.putText(frame, "Status: Open", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) # write the image to temporary file # t = TempImage() # print('File saved at' + str(t.path)) # cv2.imwrite(t.path, frame) # analyze # pi_surveillance_analyze.analyze(t.path)
def archive_with_items(self): """ Ecrit dans le dossier d'archive la frame complète avec des carrés dessinés autour des visages détectés """ logging.info("Archive l'image avec les items trouvés...") # Dessine un carré autour de chaque item for f in self.items: x, y, w, h = f #[ v for v in f ] cv2.rectangle(self.frame, (x,y), (x+w,y+h), (0,255,0), 3) # Ajoute la date et l'heure à l'image cv2.putText(self.frame, datetime.datetime.now().strftime("%c"), (5, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,0), 3) # On affiche l'image qui va être archivée dans une fenêtre if self.debug: cv2.imshow("preview", self.frame) cv2.waitKey() # Ecriture du fichier archive_full_name = "{0}_full.jpg".format(self.images_prefix) logging.info("Archive file is : '{0}'".format(archive_full_name)) cv2.imwrite(os.path.join(self.archive_folder, archive_full_name), self.frame)
def annotate(self, frame): text = "Frame rate: %.1f" % self.frameRate textColor = (0,255,0) font = cv2.FONT_HERSHEY_SIMPLEX size = 0.5 thickness = 2 textSize = cv2.getTextSize(text, font, size, thickness) height = textSize[1] location = (0,frame.shape[0] - 4*height) cv2.putText(frame, text, location, font, size, textColor, thickness=thickness) text = "Detection rate: %.1f" % self.detectionRate location = (0,frame.shape[0] - height) cv2.putText(frame, text, location, font, size, textColor, thickness=thickness)
def show_results(self,img,results): img_cp = img.copy() if self.filewrite_txt : ftxt = open(self.tofile_txt,'w') for i in range(len(results)): x = int(results[i][1]) y = int(results[i][2]) w = int(results[i][3])//2 h = int(results[i][4])//2 if self.disp_console : print ' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(int(results[i][3])) + ',' + str(int(results[i][4]))+'], Confidence = ' + str(results[i][5]) if self.filewrite_img or self.imshow: cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2) cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1) cv2.putText(img_cp,results[i][0] + ' : %.2f' % results[i][5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1) if self.filewrite_txt : ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h)+',' + str(results[i][5]) + '\n') if self.filewrite_img : if self.disp_console : print ' image file writed : ' + self.tofile_img cv2.imwrite(self.tofile_img,img_cp) if self.imshow : cv2.imshow('YOLO_tiny detection',img_cp) cv2.waitKey(1) if self.filewrite_txt : if self.disp_console : print ' txt file writed : ' + self.tofile_txt ftxt.close()
def show_results(self,img,results): img_cp = img.copy() if self.filewrite_txt : ftxt = open(self.tofile_txt,'w') for i in range(len(results)): x = int(results[i][1]) y = int(results[i][2]) w = int(results[i][3])//2 h = int(results[i][4])//2 if self.disp_console : print ' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(int(results[i][3])) + ',' + str(int(results[i][4]))+'], Confidence = ' + str(results[i][5]) if self.filewrite_img or self.imshow: cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2) cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1) cv2.putText(img_cp,results[i][0] + ' : %.2f' % results[i][5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1) if self.filewrite_txt : ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h)+',' + str(results[i][5]) + '\n') if self.filewrite_img : if self.disp_console : print ' image file writed : ' + self.tofile_img cv2.imwrite(self.tofile_img,img_cp) if self.imshow : cv2.imshow('YOLO_face detection',img_cp) cv2.waitKey(1) if self.filewrite_txt : if self.disp_console : print ' txt file writed : ' + self.tofile_txt ftxt.close()
def cv2_put_text_to_image(img, text, x, y, font_pix_h=10, color=(255, 0, 0)): if font_pix_h < 10: font_pix_h = 10 # print img.shape h = img.shape[0] if x < 0: x = 0 if y > h - 1: y = h - font_pix_h if y < 0: y = font_pix_h font_size = font_pix_h / 30.0 # print font_size cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, font_size, color, 1)
def cv2_put_text_to_image(img, text, x, y, font_pix_h=10, color=(255, 0, 0)): if font_pix_h < 10: font_pix_h = 10 y = y + font_pix_h # print img.shape h = img.shape[0] if x < 0: x = 0 if y > h - 1: y = h - font_pix_h if y < 0: y = font_pix_h font_size = font_pix_h / 30.0 # print font_size cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, font_size, color, 1, -1)
def plot_image_final_label(path, top_class, m): original_image = cv2.imread(path) # Swap Red and Blue color channels BGR -> RGB red = original_image[:, :, 2].copy() blue = original_image[:, :, 0].copy() original_image[:, :, 0] = red original_image[:, :, 2] = blue cv2.putText(original_image, "Label: {}".format(top_class), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2) plt.figure() plt.imshow(original_image) plt.savefig('final_solution_' + str(m) + '.jpg') ######################################################################################### ######################################################################################### # FUNCTION: DRAW BAR GRAPH # # GOAL: Draw input image and graph bar with top 5 labels # # INPUT: final_top_5, final_top_5_prob, path, m # # OUTPUT: Graph bar # # RETURN: # #########################################################################################
def analyze_emotions(im, landmarks): for landmark in landmarks: # Observe eyebrow height for surprise standheight = np.absolute(landmark[27, 1] - landmark[30, 1]) eyebrowheight = np.absolute(landmark[27, 1] - landmark[19, 1]) if standheight == 0: standheight += 0.01 eyedist = float(eyebrowheight) / float(standheight) mouthheight = np.absolute(landmark[50, 1] - landmark[57, 1]) if float(mouthheight) / float(standheight) > 30: cv2.putText(im, "mouthheight: " + str(mouthheight), (screenwidth - 80, 10), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0, 0, 255), thickness=2) eyedist += mouthheight / 30 mouthwidth = np.absolute(landmark[48, 0] - landmark[50, 0]) nosewidth = np.absolute(landmark[31, 0] - landmark[35, 0]) mouthdist = float(mouthwidth) / nosewidth im = score_emotions(im, eyedist, mouthdist) return im
def draw_result(out, im_scale, clss, bbox, nms_thresh, conf): CV_AA = 16 print clss.shape print bbox.shape for cls_id in range(1, 21): _cls = clss[:, cls_id][:, np.newaxis] _bbx = bbox[:, cls_id * 4: (cls_id + 1) * 4] dets = np.hstack((_bbx, _cls)) keep = nms(dets, nms_thresh) dets = dets[keep, :] inds = np.where(dets[:, -1] >= conf)[0] for i in inds: x1, y1, x2, y2 = map(int, dets[i, :4]) cv.rectangle(out, (x1, y1), (x2, y2), (0, 0, 255), 2, CV_AA) ret, baseline = cv.getTextSize( CLASSES[cls_id], cv.FONT_HERSHEY_SIMPLEX, 0.8, 1) cv.rectangle(out, (x1, y2 - ret[1] - baseline), (x1 + ret[0], y2), (0, 0, 255), -1) cv.putText(out, CLASSES[cls_id], (x1, y2 - baseline), cv.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 1, CV_AA) return out
def draw_silhouette(self, foreground, bin_mask, tracked_object_stats, centroid): contours = cv2.findContours(bin_mask, mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE)[1] for i_contour in range(0, len(contours)): cv2.drawContours(foreground, contours, i_contour, (0, 255, 0)) x1 = tracked_object_stats[cv2.CC_STAT_LEFT] x2 = x1 + tracked_object_stats[cv2.CC_STAT_WIDTH]+1 y1 = tracked_object_stats[cv2.CC_STAT_TOP] y2 = y1 + tracked_object_stats[cv2.CC_STAT_HEIGHT]+1 if SilhouetteExtractor.DRAW_BBOX: cv2.rectangle(foreground, (x1, y1), (x2, y2), color=(0, 0, 255)) cv2.drawMarker(foreground, SilhouetteExtractor.__to_int_tuple(centroid), (0, 0, 255), cv2.MARKER_CROSS, 11) bbox_w_h_ratio = tracked_object_stats[cv2.CC_STAT_WIDTH] / tracked_object_stats[cv2.CC_STAT_HEIGHT] cv2.putText(foreground, "BBOX w/h ratio: {0:.4f}".format(bbox_w_h_ratio), (x1, y1 - 18), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255)) if SilhouetteExtractor.SHOW_INTERSECTS: if self.intersects_frame_boundary(x1, x2, y1, y2): cv2.putText(foreground, "FRAME BORDER INTERSECT DETECTED", (0, 54), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255))
def contour_match(self, img): ''' Returns 1. Image with bounding boxes added ''' # get filtered contours contours = self.get_filtered_contours(img) detection = Detection() height,width,channel = img.shape mean_color = (15,253,250) for i, (cnt, box) in enumerate(contours): # plot box and label around contour x,y,w,h = box font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(img,"cone", (x,y), font, 1,mean_color,4) cv2.rectangle(img,(x,y),(x+w,y+h), mean_color,2) if i == 0: detection.x = x detection.y = y detection.w = w detection.h = h detection.error_center = 0.5 - (x/float(width)) detection.error_size = (self.DESIRED_AREA-w*h)/float(width*height) cv2.putText(img,"center:%.2f, distance: %.2f" % (detection.error_center, detection.error_size), (x-w,y-h/2), font, 1,mean_color,4) # return the image with boxes around detected contours return img, detection
def draw_boxes(im_org,sorted_boxes,classes,block_x,block_y,biases,colors): im_marked=im_org.copy() im_size=np.shape(im_org) im_h=im_size[0] im_w=im_size[1] for sorted_box in sorted_boxes: b,j,class_id,p_class = sorted_box print classes[class_id], np.max(p_class)*100 x=b.x y=b.y w=b.w h=b.h x0 = int(np.clip(x-w/2,0,im_w)) y0 = int(np.clip(y-h/2,0,im_h)) x1 = int(np.clip(x+w/2,0,im_w)) y1 = int(np.clip(y+h/2,0,im_h)) im_marked=cv2.rectangle(im_marked, (x0, y0),(x1, y1),colors[class_id],thickness=2) # im_marked=cv2.rectangle(im_marked, (x0, y0),(x0+100, y0+20) ,colors[class_id],thickness=-1) # cv2.putText(im_marked, classes[class_id],(x0+5,y0+15), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),thickness=2) return im_marked
def draw_rect(I, r, c, thickness=1): if abs(sum(r)) < 100: # conditional to prevent min/max error cv2.rectangle(I, (int(r[0] * image_size), int(r[1] * image_size)), (int((r[0] + max(r[2], 0)) * image_size), int((r[1] + max(r[3], 0)) * image_size)), c, thickness) # def draw_ann(I, r, text, color=(255, 0, 255), confidence=-1): # draw_rect(I, r, color, 1) # cv2.rectangle(I, (int(r[0] * image_size), int(r[1] * image_size - 15)), # (int(r[0] * image_size + 100), int(r[1] * image_size)), # color, -1) # text_ = text # if confidence >= 0: # text_ += ": %0.2f" % confidence # cv2.putText(I, text_, (int(r[0] * image_size), int((r[1]) * image_size)), # cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
def gui(): size=100 img = np.zeros((1000,700,3), np.uint8) cv2.namedWindow('GUI') xmar=ymar=50 for i in range(6): for j in range(4): img1 = cv2.imread("faces/cara"+str(i+j+1)+".JPEG") img1=resize(img1,width = size,height=size) if (img1.shape[0] == 100 and img1.shape[1] == 100): img[ymar:ymar+size, xmar+(j*(size+xmar)):xmar+(j*(size+xmar)+size)] = img1 else: img[ymar:ymar+img1.shape[0], xmar+(j*(size+xmar)):xmar+(j*(size+xmar)+img1.shape[1])] = img1 ymar+=150 cv2.putText(img, "Presiona Q para salir", (5, 25),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255)) cv2.putText(img, "TFG Lucas Gago", (500, 925),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255)) cv2.putText(img, "Version 3", (500, 950),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255)) cv2.imshow('GUI',img)
def proc_image(img, bboxes=None, border_size=1, font_size=0.3, xconfidence=0.5): # img = cv2.resize(img, (512, 512)) if bboxes is not None: for bbox in bboxes: if bbox.shape[0] == 5: c, xmin, ymin, xmax, ymax = [int(x) for x in bbox] mx = int((xmin+xmax)/2) my = int((ymin+ymax)/2) cv2.putText(img, "%d: %s"%(c, rcmap[c]), (mx, my), cv2.FONT_HERSHEY_SIMPLEX, font_size, (0,255,155), border_size) cv2.rectangle(img,(xmin, ymax),(xmax, ymin),(255,15,5),border_size) else: c, xmin, ymin, xmax, ymax = [int(x) for x in bbox[:5]] confidence = bbox[5] if confidence > xconfidence: mx = int((xmin+xmax)/2) my = int((ymin+ymax)/2) cv2.putText(img, str(confidence), (mx, my), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,0,155), border_size) cv2.putText(img, "%d: %s"%(c, rcmap[c]), (mx, my + 15), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,0,155), border_size) cv2.rectangle(img,(xmin, ymax),(xmax, ymin),(0,255,155),border_size) return img
def camera_recog(): print("[INFO] camera sensor warming up...") vs = cv2.VideoCapture(0); #get input from webcam while True: _,frame = vs.read(); #u can certainly add a roi here but for the sake of a demo i'll just leave it as simple as this rects, landmarks = face_detect.detect_face(frame,80);#min face size is set to 80x80 aligns = [] positions = [] for (i, rect) in enumerate(rects): aligned_face, face_pos = aligner.align(160,frame,landmarks[i]) aligns.append(aligned_face) positions.append(face_pos) features_arr = extract_feature.get_features(aligns) recog_data = findPeople(features_arr,positions); for (i,rect) in enumerate(rects): cv2.rectangle(frame,(rect[0],rect[1]),(rect[0] + rect[2],rect[1]+rect[3]),(255,0,0)) #draw bounding box for the face cv2.putText(frame,recog_data[i][0]+" - "+str(recog_data[i][1])+"%",(rect[0],rect[1]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1,cv2.LINE_AA) cv2.imshow("Frame",frame) key = cv2.waitKey(1) & 0xFF if key == ord("q"): break