我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用cv2.mean()。
def color_picker(rect): global img,img_gray2,hsv roi=img[rect[0][1]:rect[1][1],rect[0][0]:rect[1][0]] b,g,r,_=np.uint8(cv2.mean(roi)) color=cv2.cvtColor(np.uint8([[[b,g,r]]]),cv2.COLOR_BGR2HSV) h= color[0][0][0] # define range of blue color in HSV lower = np.array([h-10,50,50]) upper = np.array([h+10,255,255]) # Threshold the HSV image to get only blue colors mask = cv2.inRange(hsv, lower, upper) # Bitwise-AND mask and original image res = cv2.bitwise_and(img,img, mask= mask) res2=cv2.bitwise_and(img_gray2,img_gray2, mask= cv2.bitwise_not(mask)) return res+res2
def is_image_valid(image): if type(image) is not np.ndarray: return False if image.shape[0] == 0: return False if image.shape[1] == 0: return False if cv2.mean(image) <= (1, 1, 1, 0): return False return True # This function recives paths to images and lines from file with labels # and returns only path to images that have corresponding label
def test(): displace() start = time.clock() newCalibrate() exposure = WebCam.getExposure() print time.clock() - start, "TOTAL TIME" while display: image = WebCam.getImage() contours = GripRunner.run(image) Printing.drawContours(image, contours) Printing.display(image) cv2.waitKey(20) # Get average value at the end of test to recalibrate targetAverage # image = cv2.imread('TestImages/Cancer.jpg') # image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # value = cv2.split(image)[2] # # value = np.array([image[:,:,2]]) # average = cv2.mean(value) # print average
def modifiedLaplacian(img): ''''LAPM' algorithm (Nayar89)''' M = np.array([-1, 2, -1]) G = cv2.getGaussianKernel(ksize=3, sigma=-1) Lx = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=M, kernelY=G) Ly = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=G, kernelY=M) FM = np.abs(Lx) + np.abs(Ly) return cv2.mean(FM)[0]
def tenengrad(img, ksize=3): ''''TENG' algorithm (Krotkov86)''' Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize) Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize) FM = Gx*Gx + Gy*Gy mn = cv2.mean(FM)[0] if np.isnan(mn): return np.nanmean(FM) return mn
def normalizedGraylevelVariance(img): ''''GLVN' algorithm (Santos97)''' mean, stdev = cv2.meanStdDev(img) s = stdev[0]**2 / mean[0] return s[0]
def detectCoffee(debug=False): stream = io.BytesIO() #Get the picture (low resolution, so it should be quite fast) #Here you can also specify other parameters (e.g.:rotate the image) with picamera.PiCamera() as camera: # camera.start_preview() camera.resolution = (700, 525) # camera.awb_mode = "auto" # camera.iso = 800 camera.capture(stream, format='jpeg') buff = numpy.fromstring(stream.getvalue(), dtype=numpy.uint8) #Now creates an OpenCV image img = cv2.imdecode(buff, 1) #img = cv2.imread('coffee.jpg') face_cascade = cv2.CascadeClassifier('/home/pi/Documents/OpenCV_Projects/XML_Files/coffeePot.xml') eye_cascade = cv2.CascadeClassifier('/home/pi/Documents/OpenCV_Projects/XML_Files/liquid.xml') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) rgb_val = 0 faces = face_cascade.detectMultiScale(gray, 1.2, 500, minSize=(80, 100)) for (x,y,w,h) in faces: img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) roi_gray = gray[y:y+h, x:x+w] roi_color = img[y:y+h, x:x+w] # minPotWidth = w*0.8 # minPotHeight = minPotWidth eyes = eye_cascade.detectMultiScale(roi_gray, 1.2, 10, minSize=(70, 50)) rgb_val = 0 numPots = 0 for (ex,ey,ew,eh) in eyes: cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) roi_liquid_color = img[ey:ey+eh, ex:ex+ew] mean = cv2.mean(roi_liquid_color) rgb_val += mean[0] +mean[1] +mean[2] numPots += 1 if(numPots == 0): numPots += 1 try: rgb_val /= numPots except ZeroDivisionError: print("Zero div") return 0 print(rgb_val) #cv2.destroyAllWindows() #cv2.imshow("thing",img) #cv2.waitKey(0) # if debug: # cv2.imshow("thing",img) # cv2.waitKey(0) # cv2.destroyAllWindows() return rgb_val
def _process(self): """ Get a frame from the camera and process it for position, power, and frequency, then put those values on the frame """ img = self._camera.acquire_image_data() ret, thres = cv2.threshold( img, self._threshold, 255, cv2.THRESH_BINARY) _, contours, _ = cv2.findContours( thres, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE) valid_countors = 0 for contour in contours: x, y, w, h = cv2.boundingRect(contour) if w < self._min_size or h < self._min_size: continue valid_countors += 1 cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0)) # Position Calculation self._xpos = x + w / 2 self._ypos = y + h / 2 # Power Calculation mask = np.zeros(img.shape, np.uint8) cv2.circle(mask, (int(self._xpos), int(self._ypos)), self._sample_radius, (255, 255, 255), thickness=-1) self._power = cv2.mean(img, mask)[0] # Draw power circle cv2.circle(img, (int(self._xpos), int(self._ypos)), self._sample_radius, (255, 255, 255), thickness=1) # Frequency Calculation on = valid_countors > 0 if on: if not self._last_on: delta_time = time.time() - self._freq_start self._frequency = 1 / delta_time self._freq_start = time.time() self._last_on = on self._last_frame = time.time() # Put the measured values in the upper left of the frame cv2.putText(img, "Position: ({0}, {1})".format(self._xpos, self._ypos), (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), thickness=2) cv2.putText(img, "Power: {0}".format(self._power), (5, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), thickness=2) cv2.putText(img, "Frequency: {0}".format(self._frequency), (5, 90), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), thickness=2) print(img.shape) print(type(img)) print(img) self.frame_ready.emit( [self._xpos, self._ypos, self._power, self._frequency], img)
def _get_frame(self): """ Get a frame from the camera and process it for position, power, and frequency, then put those values on the frame """ img = self._camera.acquire_image_data() ret, thres = cv2.threshold( img, self._widget.threshold.value, 255, cv2.THRESH_BINARY) _, contours, _ = cv2.findContours( thres, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE) valid_countors = 0 for contour in contours: x, y, w, h = cv2.boundingRect(contour) if w < self._widget.min_size.value or w < self._widget.min_size.value: continue valid_countors += 1 cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0)) # Position Calculation self._xpos = x + w / 2 self._ypos = y + h / 2 # Power Calculation mask = np.zeros(img.shape, np.uint8) cv2.circle(mask, (int(self._xpos), int(self._ypos)), self._widget.sample_radius.value, (255, 255, 255), thickness=-1) self._power = cv2.mean(img, mask)[0] # Draw power circle cv2.circle(img, (int(self._xpos), int(self._ypos)), self._widget.sample_radius.value, (255, 255, 255), thickness=1) # Frequency Calculation on = valid_countors > 0 if on: if not self._last_on: delta_time = time.time() - self._freq_start self._frequency = 1 / delta_time self._freq_start = time.time() self._last_on = on # Put the measured values in the upper left of the frame cv2.putText(img, "Position: ({0}, {1})".format(self._xpos, self._ypos), (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), thickness=2) cv2.putText(img, "Power: {0}".format(self._power), (5, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), thickness=2) cv2.putText(img, "Frequency: {0}".format(self._frequency), (5, 90), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), thickness=2) #cv2imwrite("{}.jpeg".format(self._frame), img) self._frame += 1 # Update the GUI with the new frame if self._camera_window is not None: self._camera_window.update_frame(img)