我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用cv2.createBackgroundSubtractorMOG2()。
def MoG2(vid, min_thresh=800, max_thresh=10000): ''' Args : Video object and threshold parameters Returns : None ''' cap = cv2.VideoCapture(vid) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) fgbg = cv2.createBackgroundSubtractorMOG2() connectivity = 4 while(cap.isOpened()): ret, frame = cap.read() if not ret: break fgmask = fgbg.apply(frame) fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel) output = cv2.connectedComponentsWithStats( fgmask, connectivity, cv2.CV_32S) for i in range(output[0]): if output[2][i][4] >= min_thresh and output[2][i][4] <= max_thresh: cv2.rectangle(frame, (output[2][i][0], output[2][i][1]), ( output[2][i][0] + output[2][i][2], output[2][i][1] + output[2][i][3]), (0, 255, 0), 2) cv2.imshow('detection', frame) cap.release() cv2.destroyAllWindows()
def subtract_background(self): fgbg = cv2.createBackgroundSubtractorMOG2() prev = self.frames[0] fgmask = fgbg.apply(prev) for (i,next) in enumerate(self.frames[1:]): prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY) next_gray = cv2.cvtColor(next, cv2.COLOR_BGR2GRAY) similarity_metric = compare_ssim(prev_gray, next_gray) print('prev/next similarity measure = %f' % similarity_metric) if similarity_metric < self.transition_threshold: fgmask = fgbg.apply(next) fgdn = denoise_foreground(next, fgmask) self.transitions.append((1, fgdn)) else: fgmask = fgbg.apply(next) self.transitions.append((0, None)) prev = next.copy()
def __init__(self): super(TargetFilterBGSub, self).__init__() # background subtractor #self._bgs = cv2.BackgroundSubtractorMOG() #self._bgs = cv2.BackgroundSubtractorMOG2() # not great defaults, and need bShadowDetection to be False #self._bgs = cv2.BackgroundSubtractorMOG(history=10, nmixtures=3, backgroundRatio=0.2, noiseSigma=20) # varThreshold: higher values detect fewer/smaller changed regions self._bgs = cv2.createBackgroundSubtractorMOG2(history=0, varThreshold=8, detectShadows=False) # ??? history is ignored? Only if learning_rate is > 0, or...? Unclear. # Learning rate for background subtractor. # 0 = never adapts after initial background creation. # A bit above 0 looks good. # Lower values are better for detecting slower movement, though it # takes a bit of time to learn the background initially. self._learning_rate = 0.001 # elements to reuse in erode/dilate # CROSS elimates more horizontal/vertical lines and leaves more # blobs with extent in both axes [than RECT]. self._element_shrink = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5)) self._element_grow = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
def __init__(self): """Initialize variables used by Detectors class Args: None Return: None """ self.fgbg = cv2.createBackgroundSubtractorMOG2()
def __init__(self): """Initializes the `BackgroundSubtractorMOG2`.""" self.fgbg = cv2.createBackgroundSubtractorMOG2()
def __init__(self, kernel=5): self.kernel = kernel self.fgbg = cv2.createBackgroundSubtractorMOG2()
def __init__(self, video_path, out_path, video_shape, filters, write_capture_info, subtractor='MOG'): """Initializer. Args: video_path (str): path to video file. out_path (str): output video destination path. video_shape (tuple): default size for frame redimensioning. filters (list): list of filter's names to apply in video source. write_info (bool): should write frame info when displaying. subtractor (str): name of background subtractor. Returns: None. """ if video_path == '-1': video_path = int(video_path) self.source = cv2.VideoCapture(video_path) if not self.source.isOpened(): print('Could not find video file.') sys.exit() if subtractor == 'MOG': self.subtractor = cv2.createBackgroundSubtractorMOG2() elif subtractor == 'GMG': self.subtractor = cv2.bgsegm.createBackgroundSubtractorGMG() self.current_frame = None self.playing = False self.video_shape = video_shape self.codec = cv2.VideoWriter_fourcc(*'XVID') self.out = cv2.VideoWriter('{}tracking-out.avi'.format(out_path), self.codec, 30.0, self.video_shape) self.filters = filters self.write_capture_info = write_capture_info self.start()
def create_background(self): self.fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False,varThreshold=float(self.args.mogvariance)) self.fgbg.setBackgroundRatio(0.95)
def motionDetecter(blur_to_motiondetector_blurred_Queue, file_Queue): # Creating MOG object #fgbg = cv2.BackgroundSubtractorMOG() fgbg = cv2.createBackgroundSubtractorMOG2() lastMeanCenter = [-1, -1] # Start infinite loop here while True: motionFlag = 0 FRACTIONS = list() FOREGROUND = list() CENTERS = list() # Receiving FRAMES filename, BLURS = blur_to_motiondetector_blurred_Queue.get() t1 = time.time() while len(BLURS) > 0: blurred = BLURS.pop(0) edges = cv2.Canny(blurred, 160, 200) CENTERS.append(getCenterOfMass(edges)) fgmask = fgbg.apply(blurred) ret, frac = getMotionFromFrame(fgmask) motionFlag += ret FRACTIONS.append(frac) del BLURS # Getting max foreground percent for every 10 frames for i in xrange(VIDEO_LENGTH): FOREGROUND.append(max(FRACTIONS[FPS*i:FPS*(i+1)])) meanCenters = getMeanCenters(lastMeanCenter, CENTERS) lastMeanCenter = meanCenters[-1] motionList = getMotionFromCenters(meanCenters) # Writing output to file # remove the 'blurrer_' from the filename with open(filename[8:-4]+'.motion', 'w') as f: f.write(str(motionFlag) + '\n') f.write(str(FOREGROUND)) f.write(str(motionList) + '\n') f.close() # Deleteing temporary used by Blurrer os.remove(filename) print "Processed MOG and Center of Mass", time.time() - t1 # upload video and metadata to AWS if motion detected if motionFlag > 0 and max(motionList) > 5: file_Queue.put((filename, FOREGROUND)) # delete the video and motion files otherwise else: os.remove(filename[8:-4]+'.mp4') os.remove(filename[8:-4]+'.motion') return # process for uploading data to S3 and Dynamo
def __init__(self, filename, frame): self.video_frame = frame # Global variable self.start_time = None self.width_frame = 1120 # pixel self.height_frame = 630 # pixel self.init_time = 5 # second /fps (fps 30) -> 24/30 = 0.8 -> 8 second self.frame = 0 self.total_LV = 0 self.total_HV = 0 self.totalVehicle = 0 self.initMOG2 = cv2.createBackgroundSubtractorMOG2() # Mixture of Gaussian initialization self.initMOG = cv2.bgsegm.createBackgroundSubtractorMOG() self.avg = 0 self.currentListVehicle = [] self.tempListVehicle = [] self.pastListVehicle = [] self.currentTrajectory = [] # Start Capture Video self.filename = filename self.cap = cv2.VideoCapture(filename) self.statusNextFrame = True # Initiation vehicle module self.vehicle = vehicleInit.vehicle self.trajectory = trajectoryInit.trajectory # Fps self.firstFrame = 0 self.endFrame = 0 self.processTime = 0 # Initiation to moving average _, PrimImg_frame = self.cap.read() PrimImg_frame = improc.cvtBGR2RGB(PrimImg_frame) PrimImg_frame = cv2.resize(PrimImg_frame, (self.width_frame, self.height_frame)) self.avg = np.float32(PrimImg_frame)