我们从Python开源项目中,提取了以下42个代码示例,用于说明如何使用cv2.cv()。
def applyTransform(self): self.framing(self.path) self.height,self.width=cv2.imread("Frames/1.jpg").shape[:2] # write transformed video out = cv2.VideoWriter("changedOutput.mp4",cv.CV_FOURCC('a','v','c','1'), 30.0, (self.width, self.height)) folder=self.sort_files() # write Transformed video frames for i in folder: pic="Frames/"+str(i)+".jpg" Newpic=cv2.imread(pic,0) frame=cv2.Canny(Newpic,100,200) cv2.imwrite(pic,frame) Newpic=cv2.imread(pic) img=cv2.flip(Newpic,0) out.write(img) out.release() # Writing output video file
def recog(md,img): global face_rect src_path='./regist_pic/'+str(md) while True: rects=face_rect if rects: #img????????? if rects[0][2]<rects[0][3]: cv.SetImageROI(img,(rects[0][0]+10, rects[0][1]+10,rects[0][2]-100,rects[0][2]-100)) else: cv.SetImageROI(img,(rects[0][0]+10, rects[0][1]+10,rects[0][3]-100,rects[0][3]-100)) #?img?????? dst=cv.CreateImage((224,224), 8, 3) cv.Resize(img,dst,cv.CV_INTER_LINEAR) cv.SaveImage('./temp.bmp',dst) #??5??????,?????????????,?????????,???scores? scores=[] for i in range(5): res=compar_pic('./temp.bmp',src_path+'/'+str(i)+'.bmp') scores.append(res) print res #?scores??? result=avg(scores) print 'avg is :',avg(scores) return result
def show_img(): global face_rect #???????????????? while True: img = cv.QueryFrame(cam)# ???????? #???????? src=cv.CreateImage((img.width, img.height), 8, 3) cv.Resize(img,src,cv.CV_INTER_LINEAR) #?????? gray=cv.CreateImage((img.width, img.height), 8, 1) cv.CvtColor(img, gray, cv.CV_BGR2GRAY)#?rgb??????? cv.EqualizeHist(gray,gray)#???????????? rects = detect(gray, cascade)#??????????????????????????? face_rect=rects #????????? draw_rects(src, rects, (0, 255, 0)) #??????? cv.ShowImage('DeepFace Wang_jun_qian', src) cv2.waitKey(5) == 27 cv2.destroyAllWindows()
def save_clip_img(): img=cv.LoadImage('static/InterceptedIMG/clip.jpg') vertical_distance_decimal,vertical_distance_integer = math.modf(float(640)/19) parallel_distance_decimal,parallel_distance_integer = math.modf(float(480)/19) #print vertical_distance_decimal,vertical_distance_integer,parallel_distance_decimal,parallel_distance_integer draw_img = cv2.imread('static/InterceptedIMG/clip.jpg') for i in range(19): for j in range(19): cv2.rectangle(draw_img,(0+int(33.68*i),int(25.26*j)),(int(33.68*(i+1)),int(25.26*(j+1))),(0,255,0),1) cv2.imshow('image',draw_img) k = cv2.waitKey(0) & 0xFF if k == 27: cv2.destroyAllWindows() for i in range(19): for j in range(19): wn_position =(int(vertical_distance_integer*i)+int(vertical_distance_decimal*i),int(parallel_distance_integer*j)+int(parallel_distance_decimal*j)) es_position =(int(vertical_distance_integer*(i+1)+int(vertical_distance_decimal*i)),int(parallel_distance_integer*(j+1))+int(parallel_distance_decimal*j)) img_backup=cv.CloneImage(img) cv.SetImageROI(img_backup,(wn_position[0],wn_position[1],33,25)) cv.SaveImage('static/ClippedImg/%d_%d.jpg'%(j,i),img_backup)
def read(self,training=False,validation=False): pos=int(self.currpos) ret,image=self.video.read() count=0 while True: if ret: self.currpos+=1 update=False else: self.currpos=0 update=True if update: self.video.set(cv.CV_CAP_PROP_POS_FRAMES,self.currpos) if ret and (validation and is_validation_frame(pos) or training and is_training_frame(pos) or not training and not validation): return ret,image,pos else: pos=int(self.currpos) ret,image=self.video.read() count+=1 if count>=1000: print 'read retry timeout' os._exit(-1)
def read(self,training=False,validation=False): pos=int(self.currpos) ret,image=self.video.read() count=0 while True: if ret: self.currpos+=1 update=False #if shuffle and random.random()<0.1 or not ret: # update=True # self.currpos=self.beginpos+int(random.random()*(self.endpos-self.beginpos)) if self.currpos>=self.endpos: update=True self.currpos=self.beginpos if update: self.video.set(cv.CV_CAP_PROP_POS_FRAMES,self.currpos) if ret and (validation and is_validation_frame(pos) or training and is_training_frame(pos) or not training and not validation): return ret,image,pos else: pos=int(self.currpos) ret,image=self.video.read() count+=1 if count>=1000: print 'read retry timeout' os._exit(-1)
def __init__(self): self.norm_rgb=np.zeros((600,800,3),np.uint8) self.dst=np.zeros((600,800),np.uint8) self.b=0 self.g=0 self.r=0 self.lb=0 self.lg=0 self.lr=0 self.m=np.zeros((600,800),np.uint8) #self.win=cv2.namedWindow("detect") #self.dst=cv.CreateImage((800,600),8,1) #cv2.createTrackbar("blue", "detect",0,255,self.change_b) #cv2.createTrackbar("green","detect",0,255,self.change_g) #cv2.createTrackbar("red","detect",0,255,self.change_r) #cv2.createTrackbar("low_blue", "detect",0,255,self.change_lb) #cv2.createTrackbar("low_green","detect",0,255,self.change_lg) #cv2.createTrackbar("low_red","detect",0,255,self.change_lr)
def detect_shirt(self): #self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8)) self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8)) cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY) fg=cv2.erode(self.dst,None,iterations=2) #cv2.imshow("fore",fg) bg=cv2.dilate(self.dst,None,iterations=3) _,bg=cv2.threshold(bg, 1,128,1) #cv2.imshow("back",bg) mark=cv2.add(fg,bg) mark32=np.int32(mark) cv2.watershed(self.norm_rgb,mark32) self.m=cv2.convertScaleAbs(mark32) _,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) #cv2.imshow("final_tshirt",self.m) cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE) return self.m,cntr
def init_camera(): try: # camera setup camera = cv2.VideoCapture(cfg.video_src) camera.set(cv.CV_CAP_PROP_FRAME_WIDTH, float(cfg.width)) camera.set(cv.CV_CAP_PROP_FRAME_HEIGHT, float(cfg.height)) return True, camera except: return False, False
def detect(img, cascade): rects = cv.HaarDetectObjects(img, cascade, cv.CreateMemStorage(), 1.1, 2,cv.CV_HAAR_DO_CANNY_PRUNING, (255,255))#CV_HAAR_SCALE_IMAGE???????? if len(rects) == 0: return [] result = [] #??????????result for r in rects: result.append((r[0][0], r[0][1], r[0][0]+r[0][2], r[0][1]+r[0][3])) #??????????,?????300~500?? if result[0][2]> 300 and result[0][3] > 300 and result[0][2]< 500 and result[0][3] < 500: return result else: return [] #???????
def draw_rects(img, rects, color): if rects: for i in rects: cv.Rectangle(img, (int(rects[0][0]), int(rects[0][1])),(int(rects[0][2]),int(rects[0][3])),cv.CV_RGB(0, 255, 0), 1, 8, 0)#????????? #????????
def register(path,img,rects): if rects: #?????N*N?,???? if rects[0][2]<rects[0][3]: cv.SetImageROI(img,(rects[0][0]+10, rects[0][1]+10,rects[0][2]-50,rects[0][2]-50)) else: cv.SetImageROI(img,(rects[0][0]+10, rects[0][1]+10,rects[0][3]-50,rects[0][3]-50)) dst=cv.CreateImage((224,224), 8, 3) #???? cv.Resize(img,dst,cv.CV_INTER_LINEAR) cv.SaveImage(path,dst) #????????
def detect(img, cascade): rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv.CV_HAAR_SCALE_IMAGE) if len(rects) == 0: return [] rects[:,2:] += rects[:,:2] return rects
def initialFraming(self,path): global cap global success global frame sampleIndex=0 cap = cv2.VideoCapture(path) success,frame=cap.read(cv.CV_IMWRITE_JPEG_QUALITY) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #gray = cv2.GaussianBlur(gray, (21, 21), 0) height,width = gray.shape[:2] print "Dimension of the image is: ",height, width, (height*width) samples = np.array([[0 for x in range(0,self.numberOfSamples)] for x in range(0,(height*width))]) tempArray = np.reshape(gray,(height*width)).T samples[:,sampleIndex]= np.copy(tempArray) sampleIndex+=1 while (success and sampleIndex!=(self.numberOfSamples)): success,frame = cap.read(cv.CV_IMWRITE_JPEG_QUALITY) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #gray = cv2.GaussianBlur(gray, (21, 21), 0) tempArray = (np.reshape(gray,(height*width))).T samples[:,sampleIndex]= np.copy(tempArray) sampleIndex+=1 return samples
def writeVideo(self): height,width=cv2.imread("Frames/1.jpg").shape[:2] out = cv2.VideoWriter("changedOutput.ogv",cv.CV_FOURCC('t','h','e','0'), 25.0, (width,height)) folder=self.sort_files() for i in folder: pic="Frames/"+str(i)+".jpg" img=cv2.imread(pic) out.write(img) out.release()
def initialFraming(self,path): global cap global success global frame sampleIndex=0 cap = cv2.VideoCapture(path) success,frame=cap.read(cv.CV_IMWRITE_JPEG_QUALITY) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) height,width = gray.shape[:2] print "Dimension of the image is: ",height, width, (height*width) samples = np.array([[0 for x in range(0,self.numberOfSamples)] for x in range(0,(height*width))]) tempArray = np.reshape(gray,(height*width)).T samples[:,sampleIndex]= np.copy(tempArray) sampleIndex+=1 while (success and sampleIndex!=(self.numberOfSamples)): success,frame = cap.read(cv.CV_IMWRITE_JPEG_QUALITY) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) tempArray = (np.reshape(gray,(height*width))).T samples[:,sampleIndex]= np.copy(tempArray) sampleIndex+=1 return samples
def framing(self,path): global FilePath, count Newpath = FilePath + path cap = cv2.VideoCapture(Newpath) success,frame=cap.read(cv.CV_IMWRITE_JPEG_QUALITY) #handle of the Video Capture is required for obtaining frame. while success: cv2.imwrite("Images/%d.jpg" % count, frame) # save frame as JPEG file count += 1 success,frame = cap.read(cv.CV_IMWRITE_JPEG_QUALITY) # to read the last frame cap.release()
def framing(self,path): cap = cv2.VideoCapture(path) success,frame=cap.read(cv.CV_IMWRITE_JPEG_QUALITY) #handle of the Video Capture is required for obtaining frame. count = 1 while success: cv2.imwrite("Frames/%d.jpg" % count, frame) # save frame as JPEG file count += 1 success,frame = cap.read(cv.CV_IMWRITE_JPEG_QUALITY) # to read the last frame cap.release() # select transformation
def writeOutputFile(self,output): self.height,self.width=cv2.imread("Frames/1.jpg").shape[:2] out = cv2.VideoWriter(output,cv.CV_FOURCC('a','v','c','1'), 30.0, (self.width, self.height)) folder=self.sort_files() for i in folder: pic="Frames/"+str(i)+".jpg" img=cv2.imread(pic) out.write(img) out.release() # Method to sort the files (here, frames!)
def iplimage_to_string(im, lang=None, psm=None): if not OPENCV_AVAILABLE: print ("OpenCV not Available") return -1 else: cv.SaveImage(TEMP_IMAGE, im) txt = image_to_string(TEMP_IMAGE, lang, psm) os.remove(TEMP_IMAGE) return txt
def __init__(self,vurl,id): print vurl self.vurl=vurl self.id=id self.vcap=cv2.VideoCapture(vurl) self.fps=self.vcap.get(cv.CV_CAP_PROP_FPS) self.begin=time.time() self.last=self.begin self.image=None self.ctime=time.time() self.beginpos=0 self.endpos=MAXFRAMES pos=int(v.get(cv.CV_CAP_PROP_POS_FRAMES)) ret,image=self.vcap.read() if ret: self.image=image.copy() self.localshuffler=LocalShuffler(16,self.image.shape) for i in range(16): pos=int(v.get(cv.CV_CAP_PROP_POS_FRAMES)) ret,image=self.vcap.read() assert ret self.localshuffler.feed(image,0,pos) thread.start_new_thread(self.backend,()) print 'ok' else: print 'skip: '+self.vurl
def backend(self): while True: if self.vcap is not None: pos=int(v.get(cv.CV_CAP_PROP_POS_FRAMES)) ret,image=self.vcap.read() if ret: self.image=image.copy() else: print 'read fail: '+self.vurl self.vcap=None time.sleep(1.0/self.fps)
def read(self,training=False,validation=False): if self.image is None: return False,None if validation: return False,None pos=int(v.get(cv.CV_CAP_PROP_POS_FRAMES)) image,ig,id=self.localshuffler.feed(self.image,0,pos) self.image=None return True,image,id
def shuffle2(self): a=os.listdir(self.path) filename=os.path.join(self.path,a[int(random.random()*len(a))]) self.video=cv2.VideoCapture(filename) n=self.video.get(cv.CV_CAP_PROP_FRAME_COUNT) self.currpos=int(random.random()*n) self.video.set(cv.CV_CAP_PROP_POS_FRAMES,self.currpos)
def shuffle2(self): self.currpos=self.beginpos+int(random.random()*(self.endpos-self.beginpos)) self.video.set(cv.CV_CAP_PROP_POS_FRAMES,self.currpos)
def detect_shirt2(self): self.hsv=cv2.cvtColor(self.norm_rgb,cv.CV_BGR2HSV) self.hue,s,_=cv2.split(self.hsv) _,self.dst=cv2.threshold(self.hue,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) self.fg=cv2.erode(self.dst,None,iterations=3) self.bg=cv2.dilate(self.dst,None,iterations=1) _,self.bg=cv2.threshold(self.bg,1,128,1) mark=cv2.add(self.fg,self.bg) mark32=np.int32(mark) cv2.watershed(self.norm_rgb,mark32) m=cv2.convertScaleAbs(mark32) _,m=cv2.threshold(m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) cntr,h=cv2.findContours(m,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE) print len(cntr) #print cntr[0].shape #cntr[1].dtype=np.float32 #ret=cv2.contourArea(np.array(cntr[1])) #print ret #cntr[0].dtype=np.uint8 cv2.drawContours(m,cntr,-1,(255,255,255),3) cv2.imshow("mask_fg",self.fg) cv2.imshow("mask_bg",self.bg) cv2.imshow("mark",m)
def getFrames(self,img,mask=None): self.rgb=img self.mask=mask self.scratch=np.zeros((600,800,3),np.uint8) self.scratch=cv2.cvtColor(img,cv2.cv.CV_BGR2HSV) self.hue,self.sat,self.val=cv2.split(self.scratch) print 'hsv conversion completed'
def replace_color(self,col=None): print self.hue[0][0] self.hue_val=col #cv2.imshow("hue",self.hue) if col!=None: cv.Set(cv.fromarray(self.hue),(self.hue_val),cv.fromarray(self.mask)) self.scratch=cv2.merge([self.hue,self.sat,self.val]) self.scratch=cv2.cvtColor(self.scratch,cv2.cv.CV_HSV2BGR) print 'replaced' return self.scratch
def __init__(self): self.__foreground__=None self.__back__=None self.mem=cv.CreateMemStorage() self.scratch=None
def loadBackground(self): self.__back__=cv2.imread("./train/back_g.jpg",cv2.cv.CV_LOAD_IMAGE_UNCHANGED) self.__back__=cv2.blur(self.__back__, (3,3))
def subtract_back(self,frm): #dst=self.__back__-self.__foreground__ temp=np.zeros((600,800),np.uint8) self.__foreground__=cv2.blur(self.__foreground__,(3,3)) dst=cv2.absdiff(self.__back__,self.__foreground__) #dst=cv2.adaptiveThreshold(dst,255,cv.CV_THRESH_BINARY,cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,5,10) val,dst=cv2.threshold(dst,0,255,cv.CV_THRESH_BINARY+cv.CV_THRESH_OTSU) fg=cv2.erode(dst,None,iterations=1) bg=cv2.dilate(dst,None,iterations=4) _,bg=cv2.threshold(bg,1,128,1) mark=cv2.add(fg,bg) mark32=np.int32(mark) #dst.copy(temp) #seq=cv.FindContours(cv.fromarray(dst),self.mem,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE) #cntr,h=cv2.findContours(dst,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE) #print cntr,h #cv.DrawContours(cv.fromarray(temp),seq,(255,255,255),(255,255,255),1,cv.CV_FILLED) cv2.watershed(frm, mark32) self.final_mask=cv2.convertScaleAbs(mark32) #print temp #--outputs--- #cv2.imshow("subtraction",fg) #cv2.imshow("thres",dst) #cv2.imshow("thres1",bg) #cv2.imshow("mark",mark) #cv2.imshow("final",self.final_mask)
def open(self, imagepath): im = cv2.imread(imagepath, cv.CV_LOAD_IMAGE_UNCHANGED) if im == None: raise Exception('No file found at this path :{0}'.format(imagepath)) else: if im.shape == (100, 100, 3): im = im[:,:,0] +im[:,:,1]+im[:,:,2] return im
def run(self): while not self.terminated: self.grab() # if in file mode, wait until the grabbed frame is detected. if self.inputmode == "file": while self.isDetected == False: time.sleep(0.1) frame = self.getFrame() if len(frame.shape) == 2: frame = cv2.cvtColor(frame, cv.CV_GRAY2RGB) self.dataMutex.acquire() if len(self.ages) > 0: self.age = sum(self.ages)/len(self.ages) if len(self.gender_male) > 0 and len(self.gender_female) > 0: male = (sum(self.gender_male)/len(self.gender_male))*100 female = (sum(self.gender_female)/len(self.gender_female))*100 if male > female: self.gender_label = "Male" self.gender = male else: self.gender_label = "Female" self.gender = female self.dataMutex.release() boxAge = time.time() - self.lastDetection crop = None seconds = 2 if boxAge < seconds and self.rectangles is not None and len(self.rectangles) > 0: x,y,w,h = self.rectangles[0] frame = draw_detections(frame.astype('uint8'), self.rectangles, self.age, self.gender, self.gender_label, self.counter, self.COUNTER_LIMIT, 3) if self.ROI is not None: x, y, w, h = self.ROI thickness = 2 frame = frame.astype('uint8') cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), thickness) self.annotate(frame) cv2.imshow(self.windowCaption, frame) # User input: key = cv2.waitKey(1) if key == 1048603: #ESC(27) #Exit program self.terminated = True elif key == 1048586: # Enter(10) #Reset predictions self.setEventReady(False) self.setCounter()
def find_blob() : radius = 0 # Load input image _, bgr_image = img.read() orig_image = bgr_image bgr_image = cv2.medianBlur(bgr_image, 3) # Convert input image to HSV hsv_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV) # Threshold the HSV image, keep only the red pixels lower_red_hue_range = cv2.inRange(hsv_image, (0, 100, 100), (10, 255, 255)) upper_red_hue_range = cv2.inRange(hsv_image, (160, 100, 100), (179, 255, 255)) # Combine the above two images red_hue_image = cv2.addWeighted(lower_red_hue_range, 1.0, upper_red_hue_range, 1.0, 0.0) red_hue_image = cv2.GaussianBlur(red_hue_image, (9, 9), 2, 2) # Use the Hough transform to detect circles in the combined threshold image circles = cv2.HoughCircles(red_hue_image, cv.CV_HOUGH_GRADIENT, 1, 120, 100, 20, 10, 0); # Loop over all detected circles and outline them on the original image all_r = np.array([]) if circles is not None: for i in circles[0]: all_r = np.append(all_r, int(round(i[2]))) closest_ball = all_r.argmax() center=(int(round(circles[0][closest_ball][0])), int(round(circles[0][closest_ball][1]))) radius=int(round(circles[0][closest_ball][2])) if draw_circle_enable: cv2.circle(orig_image, center, radius, (0, 255, 0), 5); # Show images if show_image_enable: cv2.namedWindow("Threshold lower image", cv2.WINDOW_AUTOSIZE) cv2.imshow("Threshold lower image", lower_red_hue_range) cv2.namedWindow("Threshold upper image", cv2.WINDOW_AUTOSIZE) cv2.imshow("Threshold upper image", upper_red_hue_range) cv2.namedWindow("Combined threshold images", cv2.WINDOW_AUTOSIZE) cv2.imshow("Combined threshold images", red_hue_image) cv2.namedWindow("Detected red circles on the input image", cv2.WINDOW_AUTOSIZE) cv2.imshow("Detected red circles on the input image", orig_image) k = cv2.waitKey(5) & 0xFF if k == 27: return (0, 0), 0 if radius > 3: return center, radius; else: return (0, 0), 0
def iterate_one(path,y,width,height,batchsize,xdtype,ydtype): v = cv2.VideoCapture(path) #n = v.get(cv.CV_CAP_PROP_FRAME_COUNT) r = path.split('.')[-2] if r[0]=='[' and r[-1]==']': begin_time,end_time=r[1:-1].split('-') begin_time=float(begin_time) v.set(cv.CV_CAP_PROP_POS_MSEC,int(begin_time*1000)) begin=v.get(cv.CV_CAP_PROP_POS_FRAMES) if end_time != '': end_time=float(end_time) v.set(cv.CV_CAP_PROP_POS_MSEC,int(end_time*1000)) end=v.get(cv.CV_CAP_PROP_POS_FRAMES) else: end = v.get(cv.CV_CAP_PROP_FRAME_COUNT) n=end-begin else: begin=0 n = v.get(cv.CV_CAP_PROP_FRAME_COUNT) end=n v.set(cv.CV_CAP_PROP_POS_FRAMES,begin) X = np.zeros((batchsize,3,width,height),dtype=xdtype) ids = np.zeros((batchsize,),dtype='int64') if type(y)==np.array: Y = np.zeros((batchsize,y.shape),dtype=ydtype) else: Y = np.zeros((batchsize,),dtype=ydtype) batches = int(n//batchsize) print 'batchs',batches for i in range(batches): time0=time.time() for j in range(batchsize): r,image=v.read() assert r image=cv2.resize(image, (width,height), interpolation = cv2.INTER_NEAREST) image=image.transpose(2,1,0) X[j,:,:,:]=image Y[j]=y time1=time.time() #print time1-time0 #yield X,Y #X/=256 res={'image':(X/256.0).astype(xdtype),'target':Y} for h in minibatch_handlers: h(res,ids) yield res,ids