我们从Python开源项目中,提取了以下4个代码示例,用于说明如何使用cv2.HOGDescriptor_getDefaultPeopleDetector()。
def HogDescriptor(self,image): hog = cv2.HOGDescriptor() hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) (rects, weights) = hog.detectMultiScale(image, winStride=(5,5),padding=(16,16), scale=1.05, useMeanshiftGrouping=False) return rects
def find_people(self, img): ''' Detect people in image :param img: numpy.ndarray :return: count of rectangles after non-maxima suppression, corresponding to number of people detected in picture ''' t = time.time() # HOG descriptor/person detector hog = cv2.HOGDescriptor() hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) # Chooses whichever size is less image = imutils.resize(img, width=min(self.MIN_IMAGE_WIDTH, img.shape[1])) # detect people in the image (rects, wghts) = hog.detectMultiScale(image, winStride=self.WIN_STRIDE, padding=self.PADDING, scale=self.SCALE) # apply non-maxima suppression to the bounding boxes but use a fairly large overlap threshold, # to try to maintain overlapping boxes that are separate people rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects]) pick = non_max_suppression(rects, probs=None, overlapThresh=self.OVERLAP_THRESHOLD) print("Elapsed time: {} seconds".format(int((time.time() - t) * 100) / 100.0)) if self.SHOW_IMAGES: # draw the final bounding boxes for (xA, yA, xB, yB) in pick: # Tighten the rectangle around each person by a small margin shrinkW, shrinkH = int(0.05 * xB), int(0.15*yB) cv2.rectangle(image, (xA+shrinkW, yA+shrinkH), (xB-shrinkW, yB-shrinkH), self.BOX_COLOR, 2) cv2.imshow("People detection", image) cv2.waitKey(self.IMAGE_WAIT_TIME) cv2.destroyAllWindows() return len(pick)
def find_people(self, img): ''' Detect people in image :param img: numpy.ndarray :return: count of rectangles after non-maxima suppression, corresponding to number of people detected in picture ''' t = time.time() hog = cv2.HOGDescriptor() hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) # Chooses whichever size is less image = imutils.resize(img, width=min(self.MIN_IMAGE_WIDTH, img.shape[1])) # detect people in the image (rects, wghts) = hog.detectMultiScale(image, winStride=self.WIN_STRIDE, padding=self.PADDING, scale=self.SCALE) # apply non-maxima suppression to the bounding boxes using a # fairly large overlap threshold to try to maintain overlapping boxes that are still people rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects]) pick = non_max_suppression(rects, probs=None, overlapThresh=self.OVERLAP_THRESHOLD) print("Elapsed time of detection: {} seconds".format(int((time.time() - t) * 100) / 100.0)) if self.SHOW_IMAGES: # draw the final bounding boxes for (xA, yA, xB, yB) in pick: # Tighten the rectangle around each person by a small margin cv2.rectangle(image, (xA+5, yA+5), (xB-5, yB-10), self.BOX_COLOR, 2) cv2.imshow("People detection", image) cv2.waitKey(self.IMAGE_WAIT_TIME) cv2.destroyAllWindows() return len(pick)
def detect(): move=0 hog = cv2.HOGDescriptor() hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) cap=cv2.VideoCapture(0) while(1): ret, img=cap.read() gray=cv2. cvtColor(img, cv2.COLOR_BGR2GRAY) image = imutils.resize(img, width=min(400, img.shape[1])) (rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),padding=(8, 8), scale=1.05) for (x, y, w, h) in rects: cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2) rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects]) pick = non_max_suppression(rects, probs=None, overlapThresh=0.65) for (xA, yA, xB, yB) in pick: cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2) if (xA/480)>0.5 : print("move to right") move=4 elif (yA/640)>0.5: print('move to down') move=3 elif (xB/480)<0.3: print('move to left') move=2 elif (yB/640)<0.3: print('move to up') move=1 else: print('do nothing') move=0 mqt.pass_message(move) #eyes = eye_cascade.detectMultiScale(roi_gray) #for (ex,ey,ew,eh) in eyes: # cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) cv2.imshow('img',image) k=cv2.waitKey(1)& 0xff if k==27: break elif (k==ord('w')): mqt.pass_message(1) elif (k==ord('s')): mqt.pass_message(3) cap.release() cv2.destroyAllWindows()