我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用cv2.FONT_HERSHEY_COMPLEX_SMALL。
def patText(s0): '''make text pattern''' arr = np.zeros((s0,s0), dtype=np.uint8) s = int(round(s0/100.)) p1 = 0 pp1 = int(round(s0/10.)) for pos0 in np.linspace(0,s0,10): cv2.putText(arr, 'helloworld', (p1,int(round(pos0))), cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale=s, color=255, thickness=s, lineType=cv2.LINE_AA ) if p1: p1 = 0 else: p1 = pp1 return arr.astype(float)
def drawArrow(image, p, q, color, arrowMagnitude = 5, thickness=1, line_type=8, shift=0): # Draw the principle line cv2.line(image, tuple(p), tuple(q), color, thickness, line_type, shift); # compute the angle alpha angle = numpy.arctan2( p[1]-q[1], p[0]-q[0]); # compute the coordinates of the first segment p[0] = int(q[0] + arrowMagnitude * numpy.cos(angle + numpy.pi/4.0)); p[1] = int(q[1] + arrowMagnitude * numpy.sin(angle + numpy.pi/4.0)); # /Draw the first segment cv2.line(image, tuple(p), tuple(q), color, thickness, line_type, shift); # compute the coordinates of the second segment p[0] = int(q[0] + arrowMagnitude * numpy.cos(angle - numpy.pi/4.0)); p[1] = int(q[1] + arrowMagnitude * numpy.sin(angle - numpy.pi/4.0)); # Draw the second segment cv2.line(image, tuple(p), tuple(q), color, thickness, line_type, shift); # cv2.putText(image, str(int(180.0*angle/numpy.pi)), tuple(p), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, 255 ) return image
def write_labels(video, label_dict, secs=1): cap = cv2.VideoCapture(video) w=int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH )) h=int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT )) out = cv2.VideoWriter('output.mp4', -1, 20.0, (w,h)) f_no = 0 fps = get_frame_rate(cap) inc = int(fps * secs) f_nos = label_dict.keys() lbl = '' while(cap.isOpened()): ret, frame = cap.read() if ret==True: if f_no in f_nos: try: lbls = label_dict[f_no] lbl = ",".join(lbls.keys()) except: pass cv2.putText(frame,lbl,(105, 105),cv2.FONT_HERSHEY_COMPLEX_SMALL,2,(0,0,255)) #out.write(frame) cv2.imshow('frame',frame) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break #inc f_no += 1 cap.release() out.release() cv2.destroyAllWindows() #if __name__ == '__main__' : # get_frames_every_x_sec('video.mp4', '.')
def video(): global face_token ft=cv2.freetype.createFreeType2() ft.loadFontData(fontFileName='./data/font/simhei.ttf',id =0) face_cascade = cv2.CascadeClassifier('./data/cascades/haarcascade_frontalface_alt.xml') camera=cv2.VideoCapture(0) count = 0 while(True): ret,frame=camera.read() gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) faces=face_cascade.detectMultiScale(gray,1.3,5) for(x,y,w,h) in faces: img =cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2) if count%5<2: f=cv2.resize(gray[y:y+h,x:x+w],(200,200)) cv2.imwrite('./data/temp/temp.pgm',f) result=FaceAPI.searchItoI(image_file='./data/temp/temp.pgm') if len(result)==4: break if result["results"][0]["confidence"] >= 80.00: print result["results"][0]["confidence"] face_token=result["results"][0]["face_token"] detail=get_detail() # shutil.copyfile("./data/temp/temp.pgm","./data/at/%s/%s.pgm"%(detail,time.strftime('%Y%m%d%H%M%S'))) print detail ft.putText(img=img,text=detail[1], org=(x, y - 10), fontHeight=60,line_type=cv2.LINE_AA, color=(0,255,165), thickness=2, bottomLeftOrigin=True) # count+=1 else: print"Unknow face" cv2.putText(img,"Unknow", (x, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,225), 2) count +=1 print count cv2.namedWindow("image",cv2.WINDOW_NORMAL) cv2.imshow("image",frame) if cv2.waitKey(1000 / 12)&0xff==ord("q"): break camera.release() cv2.destroyAllWindows()
def detect(filename): global face_token count=0 faces=[] face_cascade = cv2.CascadeClassifier('./data/cascades/haarcascade_frontalface_alt.xml') img = cv2.imread(filename) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces=face_cascade.detectMultiScale(gray, 1.3, 5) ft=cv2.freetype.createFreeType2() ft.loadFontData(fontFileName='./data/font/simhei.ttf',id =0) for (x,y,w,h) in faces: img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),3) f = cv2.resize(gray[y:y+h, x:x+w], (200, 200)) cv2.imwrite('./data/search/{}/{}.pgm'.format(filesdir,count), f) result=FaceAPI.searchItoI(image_file='./data/search/{}/{}.pgm'.format(filesdir,count)) if len(result)==4: break if result["results"][0]["confidence"] >= 80.00: print result["results"][0]["confidence"] face_token=result["results"][0]["face_token"] print"face_token?{}".format(face_token) detail=get_detail() ft.putText(img=img,text=detail[1], org=(x, y - 10), fontHeight=30,line_type=cv2.LINE_AA, color=(0,255,165), thickness=1, bottomLeftOrigin=True) else: print"Unknow face" cv2.putText(img,"Unknow", (x, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,225), 2) count+=1 cv2.imwrite(i,img)
def checkface_offline(filename): face_cascade = cv2.CascadeClassifier('./data/cascades/haarcascade_frontalface_alt.xml') img = cv2.imread(filename) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces=face_cascade.detectMultiScale(gray, 1.3, 5) ft=cv2.freetype.createFreeType2() ft.loadFontData(fontFileName='./data/font/simhei.ttf',id =0) for (x,y,w,h) in faces: img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),3) f = cv2.resize(gray[y:y+h, x:x+w], (200, 200)) name=filelist+(str(random.randint(0,99))) print name cv2.imwrite('./img/face/{}.jpg'.format(name),f) result=FaceAPI.searchItoI(image_file='./img/face/{}.jpg'.format(name)) confidence=result["results"][0]["confidence"] if len(result)==3: checkbody_n('./img/shutter/{}.jpg'.format(filelist)) if confidence >= 80.00: face_token=result["results"][0]["face_token"] detail=detailface(face_token) cur.execute("insert into io_data values('%s',%s,'%s','%s','%s','%s','%s')"%(filelist,detail[0],detail[1],confidence,detail[2],face_token,fileLists)) conn.commit() # checkbody_y('./img/shutter/{}.jpg'.format(filelist)) ft.putText(img=img,text=detail[1], org=(x, y - 10), fontHeight=30,line_type=cv2.LINE_AA, color=(0,255,165), thickness=1, bottomLeftOrigin=True) else: print"Unknow face" face_token=result["results"][0]["face_token"] detail=detailface(face_token) random_ID=random.randint(100000000000,100000999999) cv2.putText(img,"Unknow", (x, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,225), 2) cur.execute("insert into io_data values('%s',%s,'None','%s','%s','%s','%s')"%(filelist,random_ID,confidence,detail[2],face_token,fileLists)) conn.commit() cv2.imwrite('./img/shutter/{}.jpg'.format(filelist),img)
def tile(cols, rows, imgs, titles=None): font = cv2.FONT_HERSHEY_COMPLEX_SMALL fontSize = 1 fontThickness = 2 pad=10 titleColor = (255,192,0) hImg = imgs[0] i = 0 z = None row = [] for c in range(cols): col = [] for r in range(rows): if i<len(imgs): img = imgs[i] if titles is not None and i<len(titles): img = img.copy() size = cv2.getTextSize(titles[i], font, fontSize, fontThickness)[0] cv2.putText(img, titles[i], (pad, size[1]+pad), font, fontSize, titleColor, thickness=fontThickness) col.append(img) else: if z is None: z = np.zeros_like(imgs[0]) col.append(z) i+=1 row.append(np.concatenate(col, axis=0)) return np.concatenate(row, axis=1)
def checkface_online(filename): i=0 img = cv2.imread(filename) result_d=FaceAPI.detect(image_file=filename) time.sleep(2) if len(result_d)==3: print result_d if len(result_d)>3: for i in range(0,len(result_d["faces"])): face_token=result_d["faces"][i]["face_token"] gender=result_d["faces"][i]["attributes"]["gender"]["value"] age=result_d["faces"][i]["attributes"]["age"]["value"] face_rectangle=result_d["faces"][i]["face_rectangle"] x=face_rectangle["left"] y=face_rectangle["top"] w=face_rectangle["width"] h=face_rectangle["height"] img=cv2.rectangle(img,(x,y),(x+w,y+h),(0,225,225),2) # cv2.imwrite('./img/shutter/{}.jpg'.format(filelist),img) result_s=FaceAPI.searchTtoI(face_token=face_token) time.sleep(1) # print result_s if len(result_s)==3: print "result_s==3" if i==0: checkbody_n('./img/shutter/{}.jpg'.format(filelist)) if len(result_s)>3: face_token=result_s["results"][0]["face_token"] confidence=result_s["results"][0]["confidence"] if confidence >= 80.00: detail=detailface(face_token) cur.execute("insert into io_data values('%s',%s,'%s','%s','%s','%s','%s')"%(filelist,detail[0],detail[1],confidence,gender,face_token,fileLists)) conn.commit() ft=cv2.freetype.createFreeType2() ft.loadFontData(fontFileName='./data/font/simhei.ttf',id =0) ft.putText(img=img,text=detail[1], org=(x, y - 10), fontHeight=30,line_type=cv2.LINE_AA, color=(0,255,165), thickness=1, bottomLeftOrigin=True) cv2.imwrite('./img/shutter/{}.jpg'.format(filelist),img) if i==1: checkbody_y('./img/shutter/{}.jpg'.format(filelist),face_token) else: print"Unknow face" face_token=result_s["results"][0]["face_token"] confidence=result_s["results"][0]["confidence"] # random_ID=random.randint(100000000000,100000999999) cv2.putText(img,"Unknow", (x, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,225), 2) cur.execute("insert into io_data values('%s','None','None','%s','%s','%s','%s')"%(filelist,confidence,gender,face_token,fileLists)) conn.commit() cv2.imwrite('./img/shutter/{}.jpg'.format(filelist),img) if i==1: checkbody_n('./img/shutter/{}.jpg'.format(filelist)) i=i+1
def processRawImage(raw): cv2.imshow("raw", raw) mask = filterHue(raw) #cv2.imshow("mask", mask) #colorOnly = cv2.bitwise_and(raw, raw, mask = mask) #cv2.imshow("colormasked", colorOnly) #mask = cv2.threshold(mask, params["gray"], 255, 0) result = raw.copy() largestContour = findLargestContour(mask) if largestContour is not None: ## M = cv2.moments(largestContour) ## if M["m00"] != 0: ## cx = int(M["m10"]/M["m00"]) ## cy = int(M["m01"]/M["m00"]) ## cv2.circle(result, (cx, cy), 8, (250, 250, 250), -1) ## hull = cv2.convexHull(largestContour) ## cv2.drawContours(result, [hull], 0, (0,255,0), 3) x,y,w,h = cv2.boundingRect(largestContour) center = (x+(w/2), y+(h/2)) cv2.rectangle(result, (x,y), (x+w,y+h), (40,0,120), 2) cv2.circle(result, center, 8, (250, 250, 250), -1) cv2.putText(result, str(center[0]-320), (x-50, y+15), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,0,0), 1) cv2.putText(result, str(center[1]-240), (x-50, y+45), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,0,0), 1) #perceivedFocalLength = distanceCalc.CalculatePerceivedFOVAtGivenDistance(w, targetSize[1]); #params["FOV"] = int(perceivedFocalLength) perceivedFocalLength = 652 calc = TriangleSimilarityDistanceCalculator(targetSize[0], perceivedFocalLength) distance = calc.CalcualteDistance(w); cv2.putText(result, str(distance) + " inches", (5, 23), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,0,0), 1) ## tPx = w ## distance = params["FOV"]/w ## cv2.putText(result, str(distance), (30, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,0), 1) #client.publish("5495.targetting", center[0]-320) payload = { 'horizDelta': center[0] - 320, 'targetDistance': int(distance) } client.publish(MQTT_TOPIC_TARGETTING, json.dumps(payload)) return result
def drawBoxes(img, boxes, categories, names, palette, scores=None, fade=False): def clipCoord(xy): return np.minimum(np.maximum(np.array(xy,dtype=np.int32),0),[img.shape[1]-1, img.shape[0]-1]).tolist() cmap = palette.getMap(list=True) font = cv2.FONT_HERSHEY_COMPLEX_SMALL fontSize = 0.8 fontThickness = 1 pad=5 img=np.copy(img) for box in range(boxes.shape[0]): if fade and scores is not None: iOrig = img img=np.copy(img) topleft = tuple(clipCoord(boxes[box][0:2])) if categories is not None: color = tuple(cmap[categories[box]]) else: color = (0,0,255) cv2.rectangle(img, topleft, tuple(clipCoord(boxes[box][2:5])), color, thickness=4) if names: title=names[box] if scores is not None: title+=": %.2f" % scores[box] textpos=[topleft[0], topleft[1]-pad] size = cv2.getTextSize(title, font, fontSize, fontThickness)[0] boxTL = textpos[:] boxTL[1] = boxTL[1] - size[1] boxBR = list(topleft) boxBR[0] = boxBR[0] + size[0] cv2.rectangle(img, tuple(boxTL), tuple(boxBR), color, thickness=-1) cv2.rectangle(img, tuple(boxTL), tuple(boxBR), color, thickness=4) cv2.putText(img, title, tuple(textpos), font, fontSize, (255,255,255), thickness=fontThickness) if fade and scores is not None: img = scores[box] * img + (1.0-scores[box]) * iOrig return img