我们从Python开源项目中,提取了以下2个代码示例,用于说明如何使用cv2.BackgroundSubtractorMOG2()。
def __init__(self): super(TargetFilterBGSub, self).__init__() # background subtractor #self._bgs = cv2.BackgroundSubtractorMOG() #self._bgs = cv2.BackgroundSubtractorMOG2() # not great defaults, and need bShadowDetection to be False #self._bgs = cv2.BackgroundSubtractorMOG(history=10, nmixtures=3, backgroundRatio=0.2, noiseSigma=20) # varThreshold: higher values detect fewer/smaller changed regions self._bgs = cv2.createBackgroundSubtractorMOG2(history=0, varThreshold=8, detectShadows=False) # ??? history is ignored? Only if learning_rate is > 0, or...? Unclear. # Learning rate for background subtractor. # 0 = never adapts after initial background creation. # A bit above 0 looks good. # Lower values are better for detecting slower movement, though it # takes a bit of time to learn the background initially. self._learning_rate = 0.001 # elements to reuse in erode/dilate # CROSS elimates more horizontal/vertical lines and leaves more # blobs with extent in both axes [than RECT]. self._element_shrink = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5)) self._element_grow = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
def __init__(self, video_src): self.cam = cv2.VideoCapture(video_src) ret, self.frame = self.cam.read() cv2.namedWindow('gesture_hci') # set channel range of skin detection self.mask_lower_yrb = np.array([44, 131, 80]) # [54, 131, 110] self.mask_upper_yrb = np.array([163, 157, 155]) # [163, 157, 135] # create trackbar for skin calibration self.calib_switch = False # create background subtractor self.fgbg = cv2.BackgroundSubtractorMOG2(history=120, varThreshold=50, bShadowDetection=True) # define dynamic ROI area self.ROIx, self.ROIy = 200, 200 self.track_switch = False # record previous positions of the centroid of ROI self.preCX = None self.preCY = None # A queue to record last couple gesture command self.last_cmds = FixedQueue() # prepare some data for detecting single-finger gesture self.fin1 = cv2.imread('./test_data/index1.jpg') self.fin2 = cv2.imread('./test_data/index2.jpg') self.fin3 = cv2.imread('./test_data/index3.jpg') # switch to turn on mouse input control self.cmd_switch = False # count loop (frame), for debugging self.n_frame = 0 # On-line Calibration for skin detection (bug, not stable)