我们从Python开源项目中,提取了以下2个代码示例,用于说明如何使用cv2.FILLED。
def display_detected(self, frame, face_locs, people, confidence): """ - Display ROI's of detected faces with labels :param frame: :param face_locs: :param people : people in image classified :param confidence : recognition confidence :return: """ if not len(face_locs) == 0: # nothing detected for (top, right, bottom, left), name, conf in zip(face_locs, people, confidence): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top right bottom left # string conf_4f = "%.3f" % conf peop_conf = "{} {}%".format(name, float(conf_4f) * 100) # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face # cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) cv2.rectangle(frame, (left, top + 20), (right, top), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX # color # cv2.putText(frame, peop_conf , (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1) cv2.putText(frame, peop_conf, (left, top + 15), font, 0.5, (255, 255, 255), 1) pass
def render_bboxes_image(bboxes, output_width, output_height, input_width, input_height): ''' Render the bounding boxes (bboxes) on an image of size (output_width, output_height), where the bounding box coordinates exist between (0,0) and (input_width, input_height). ''' output_image = np.zeros((output_height, output_width), dtype=np.uint8) w_scale = output_width/input_width h_scale = output_height/input_height for min_x, min_y, max_x, max_y in bboxes: pt1 = (int(min_x*w_scale), int(min_y*h_scale)) pt2 = (int(max_x*w_scale), int(max_y*h_scale)) cv2.rectangle(output_image,pt1, pt2, 255, cv2.FILLED) return output_image