我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.COLOR_BGR2HSV。
def image_callback(self, msg): image = self.bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8') hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) lower_yellow = numpy.array([18, 120, 200]) upper_yellow = numpy.array([28, 255, 255]) mask = cv2.inRange(hsv, lower_yellow, upper_yellow) h, w, d = image.shape search_top = 3*h/4 search_bot = 3*h/4 + 20 mask[0:search_top, 0:w] = 0 mask[search_bot:h, 0:w] = 0 M = cv2.moments(mask) if M['m00'] > 0: cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) cv2.circle(image, (cx, cy), 20, (0,0,255), -1) # BEGIN CONTROL err = cx - w/2 self.twist.linear.x = 0.2 self.twist.angular.z = -float(err) / 100 self.cmd_vel_pub.publish(self.twist) # END CONTROL cv2.imshow("window", image) cv2.waitKey(3)
def color_picker(rect): global img,img_gray2,hsv roi=img[rect[0][1]:rect[1][1],rect[0][0]:rect[1][0]] b,g,r,_=np.uint8(cv2.mean(roi)) color=cv2.cvtColor(np.uint8([[[b,g,r]]]),cv2.COLOR_BGR2HSV) h= color[0][0][0] # define range of blue color in HSV lower = np.array([h-10,50,50]) upper = np.array([h+10,255,255]) # Threshold the HSV image to get only blue colors mask = cv2.inRange(hsv, lower, upper) # Bitwise-AND mask and original image res = cv2.bitwise_and(img,img, mask= mask) res2=cv2.bitwise_and(img_gray2,img_gray2, mask= cv2.bitwise_not(mask)) return res+res2
def draw_circle(event,x,y,flags,param): #if event == cv2.EVENT_LBUTTONDBLCLK: if event == cv2.EVENT_LBUTTONDOWN: print ('mouse x and y is ') print (x,y) px = im1[y,x] print ('RGB Value:') print px px_hsv = cv2.cvtColor(im1, cv2.COLOR_BGR2HSV) H=px_hsv.item(y,x,0) S=px_hsv.item(y,x,1) V=px_hsv.item(y,x,2) print ('HSV Value: H?S??V') print (H,S,V)
def add_blobs(crop_frame): frame=cv2.GaussianBlur(crop_frame, (3, 3), 0) # Convert BGR to HSV hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # define range of green color in HSV lower_green = np.array([70,50,50]) upper_green = np.array([85,255,255]) # Threshold the HSV image to get only blue colors mask = cv2.inRange(hsv, lower_green, upper_green) mask = cv2.erode(mask, None, iterations=1) mask = cv2.dilate(mask, None, iterations=1) # Bitwise-AND mask and original image res = cv2.bitwise_and(frame,frame, mask= mask) detector = cv2.SimpleBlobDetector_create(params) # Detect blobs. reversemask=255-mask keypoints = detector.detect(reversemask) if keypoints: print "found blobs" if len(keypoints) > 4: keypoints.sort(key=(lambda s: s.size)) keypoints=keypoints[0:3] # Draw detected blobs as red circles. # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) else: print "no blobs" im_with_keypoints=crop_frame return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders
def print_img_array(self): img = self.take_screenshot('array') #converts image to HSV img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # gets the values from the sliders low_hue = self.low_hue.get() low_sat = self.low_sat.get() low_val = self.low_val.get() # gets upper values from sliders high_hue = self.high_hue.get() high_sat = self.high_sat.get() high_val = self.high_val.get() lower_color = np.array([low_hue,low_sat,low_val]) upper_color= np.array([high_hue,high_sat,high_val]) #creates the mask and result mask = cv2.inRange(self.hsv_image, lower_color, upper_color) mask = np.array(mask) mask.view # Instance of Tkinter
def random_saturation(img, label, lower=0.5, upper=1.5): """ Multiplies saturation with a constant and clips the value between [0,1.0] Args: img: input image in float32 label: returns label unchanged lower: lower val for sampling upper: upper val for sampling """ alpha = lower + (upper - lower) * rand.rand() hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # saturation should always be within [0,1.0] hsv[:, :, 1] = np.clip(alpha * hsv[:, :, 1], 0.0, 1.0) return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label
def random_hue(img, label, max_delta=10): """ Rotates the hue channel Args: img: input image in float32 max_delta: Max number of degrees to rotate the hue channel """ # Rotates the hue channel by delta degrees delta = -max_delta + 2.0 * max_delta * rand.rand() hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) hchannel = hsv[:, :, 0] hchannel = delta + hchannel # hue should always be within [0,360] idx = np.where(hchannel > 360) hchannel[idx] = hchannel[idx] - 360 idx = np.where(hchannel < 0) hchannel[idx] = hchannel[idx] + 360 hsv[:, :, 0] = hchannel return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label
def check_image(name): expected_data = json.loads(open('./img/' + name + '.json').read()) if not expected_data['enabled']: return expected_targets = expected_data['targets'] img = cv2.imread('./img/' + name + '.jpg', cv2.IMREAD_COLOR) hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) args = config.copy() args['img'] = hsv args['output_images'] = {} actual_targets = find(**args) # make sure same number of targets are detected assert len(expected_targets) == len(actual_targets) # targets is a list of 2-tuples with expected and actual results targets = zip(expected_targets, actual_targets) # compare all the different features of targets to make sure they match for pair in targets: expected, actual = pair # make sure that the targets are close to where they are supposed to be assert is_close(expected['pos']['x'], actual['pos']['x'], 0.02) assert is_close(expected['pos']['y'], actual['pos']['y'], 0.02) # make sure that the targets are close to the size they are supposed to be assert is_close(expected['size']['width'], actual['size']['width'], 0.02) assert is_close(expected['size']['height'], actual['size']['height'], 0.02)
def handle_image(img): hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) new_data_condition.acquire() state['img'] = hsv args = config['target'].copy() args['img'] = hsv args['draw_output'] = state['draw_output'] args['output_images'] = {} targets = vision.find(**args) state['targets'] = targets state['output_images'] = args['output_images'] new_data_condition.notify_all() new_data_condition.release() fps, processing_time = update_fps() state['fps'] = round(fps, 1) print 'Processed in', processing_time, 'ms, max fps =', round(fps_smoothed, 1)
def execute_ColorSpace(proxy,obj): try: img=obj.sourceObject.Proxy.img.copy() except: img=cv2.imread(__dir__+'/icons/freek.png') hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV) lower = np.array([max(obj.h1-obj.h2,0),max(obj.s1-obj.s2,0),max(obj.v1-obj.v2,0)]) upper = np.array([min(obj.h1+obj.h2,255),min(obj.s1+obj.s2,255),min(obj.v1+obj.v2,255)]) say("ee") say(lower) say(upper) mask = cv2.inRange(hsv, lower, upper) mask = cv2.inRange(img, lower, upper) res = cv2.bitwise_and(img,img, mask= mask) obj.Proxy.img=res
def execute_HSV(proxy,obj): say("hsv ..") try: img=obj.sourceObject.Proxy.img.copy() except: img=cv2.imread(__dir__+'/icons/freek.png') hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) lower=np.array([obj.valueColor-obj.deltaColor,0,0]) upper=np.array([obj.valueColor+obj.deltaColor,255,255]) mask = cv2.inRange(hsv, lower, upper) res = cv2.bitwise_and(hsv,hsv, mask= mask) obj.Proxy.img=res
def process_image(self, msg): """ Process image messages from ROS and stash them in an attribute called cv_image for subsequent processing """ self.cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8") self.arc_image = np.zeros((480, 640, 3), np.uint8) self.draw_arc() # Transform the image of our arc from a top down image into the plane of our CV self.transform_img() # overlay the projected path onto cv_image self.overlay_img() if self.omega is not None and self.omega == 0.0: self.hsv_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2HSV) self.binary_image = cv2.inRange(self.hsv_image, self.hsv_lb, self.hsv_ub) self.spot_delineators = self.find_delineators() if self.color != (0, 255, 0): # This logic makes it such that once the lines turn green, they stay green self.color = (0,0,255) if not self.check_aligned() else (0,255,0)
def extract_color( src, h_th_low, h_th_up, s_th, v_th ): hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(hsv) if h_th_low > h_th_up: ret, h_dst_1 = cv2.threshold(h, h_th_low, 255, cv2.THRESH_BINARY) ret, h_dst_2 = cv2.threshold(h, h_th_up, 255, cv2.THRESH_BINARY_INV) dst = cv2.bitwise_or(h_dst_1, h_dst_2) else: ret, dst = cv2.threshold(h, h_th_low, 255, cv2.THRESH_TOZERO) ret, dst = cv2.threshold(dst, h_th_up, 255, cv2.THRESH_TOZERO_INV) ret, dst = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY) ret, s_dst = cv2.threshold(s, s_th, 255, cv2.THRESH_BINARY) ret, v_dst = cv2.threshold(v, v_th, 255, cv2.THRESH_BINARY) dst = cv2.bitwise_and(dst, s_dst) dst = cv2.bitwise_and(dst, v_dst) return dst
def calDepthMap(I, r): hsvI = cv2.cvtColor(I, cv2.COLOR_BGR2HSV) s = hsvI[:,:,1] / 255.0 v = hsvI[:,:,2] / 255.0 #cv2.imshow("hsvI",hsvI) #cv2.waitKey() sigma = 0.041337 sigmaMat = np.random.normal(0, sigma, (I.shape[0], I.shape[1])) output = 0.121779 + 0.959710 * v - 0.780245 * s + sigmaMat outputPixel = output output = scipy.ndimage.filters.minimum_filter(output,(r,r)) outputRegion = output cv2.imwrite("data/vsFeature.jpg", outputRegion*255 ) #cv2.imshow("outputRegion",outputRegion) #cv2.waitKey() return outputRegion, outputPixel
def calDepthMap(I, r): #????? hsvI = cv2.cvtColor(I, cv2.COLOR_BGR2HSV) s = hsvI[:,:,1] / 255.0 v = hsvI[:,:,2] / 255.0 #cv2.imshow("hsvI",hsvI) #cv2.waitKey() sigma = 0.041337 sigmaMat = np.random.normal(0, sigma, (I.shape[0], I.shape[1])) output = 0.121779 + 0.959710 * v - 0.780245 * s + sigmaMat outputPixel = output output = scipy.ndimage.filters.minimum_filter(output,(r,r)) outputRegion = output cv2.imwrite("data/vsFeature.jpg", outputRegion*255 ) #cv2.imshow("outputRegion",outputRegion) #cv2.waitKey() return outputRegion, outputPixel
def draw_circle(event,x,y,flags,param): #if event == cv2.EVENT_LBUTTONDBLCLK: if event == cv2.EVENT_LBUTTONDOWN: print ('mouse x and y is ') print (x,y) px = im1[y,x] print ('RGB Value:') print (px) px_hsv = cv2.cvtColor(im1, cv2.COLOR_BGR2HSV) H=px_hsv.item(y,x,0) S=px_hsv.item(y,x,1) V=px_hsv.item(y,x,2) print ('HSV Value: H?S??V') print (H,S,V)
def _process_image(self, image): hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) hsv = cv2.medianBlur(hsv, 5) draw_col = (0,0,255) p1 = (self.edges['l'], self.edges['d']) p2 = (self.edges['r'], self.edges['u']) cv2.rectangle(hsv, p1, p2, draw_col) vert_spacing = (self.edges['r'] - self.edges['l'])/float(len(grid)) for i in range(1, len(grid)): x_pos = int(self.edges['l'] + i*vert_spacing) p1 = (x_pos, self.edges['d']) p2 = (x_pos, self.edges['u']) cv2.line(hsv, p1, p2, draw_col) horiz_spacing = (self.edges['d'] - self.edges['u'])/float(len(grid[0])) for i in range(1, len(grid[0])): y_pos = int(self.edges['u'] + i*horiz_spacing) p1 = (self.edges['l'], y_pos) p2 = (self.edges['r'], y_pos) cv2.line(hsv, p1, p2, draw_col) return hsv
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180), sat_shift_limit=(-255, 255), val_shift_limit=(-255, 255), u=0.5): if np.random.random() < u: image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(image) hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1]) h = cv2.add(h, hue_shift) sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1]) s = cv2.add(s, sat_shift) val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1]) v = cv2.add(v, val_shift) image = cv2.merge((h, s, v)) image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR) return image
def color_mask(image, color, tolerance=0): """Extract a mask of image according to color under a certain tolerance level (defaults to 0).""" if tolerance > 100: tolerance = 100 elif tolerance < 0: tolerance = 0 tolerance = int(tolerance * 255 / 100) red, green, blue = color bgr_color = np.uint8([[[blue, green, red]]]) hsv_color = cv2.cvtColor(bgr_color, cv2.COLOR_BGR2HSV)[0][0] mask_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) lower_range = hsv_color - np.array([tolerance, 0, 0]) lower_range[lower_range > 255] = 255 lower_range[lower_range < 0] = 0 upper_range = hsv_color + np.array([tolerance, 0, 0]) upper_range[upper_range > 255] = 255 upper_range[upper_range < 0] = 0 mask = cv2.inRange(mask_image, lower_range, upper_range) return mask
def binary_thresh( img, boundaries, filter): if filter == 'RGB': frame_to_thresh = img.copy() else: frame_to_thresh = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) for (lower, upper) in boundaries: # create numpy arrays from the boundaries lower = np.array(lower, dtype = "uint8") upper = np.array(upper, dtype = "uint8") # find the colors within the specified boundaries and apply the mask mask = cv2.inRange(frame_to_thresh, lower, upper) output = cv2.bitwise_and(frame_to_thresh, frame_to_thresh, mask = mask) #Returns an RGB image return mask
def load(self): if self.path is None: print "Current path is empty!" print "Please set one!" else: try: # Return a 3-channel color image self.image = cv2.imread(self.path) # cv2.imshow('f', self.image) # cv2.waitKey(0) # cv2.destroyAllWindows() except: raise ValueError('Loading error!') # convert RGB to HSV self.hsv = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV) # split image into HSV channels self.h, self.s, self.v = cv2.split(self.hsv) # Apply Gaussian noise and save
def blob__Detec(image): img=copy(image) height, width, channels = img.shape new_img=np.ones((height,width,channels), np.uint8) HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) Yellow={'min':(20,100,100),'max':(30, 255, 255)} Blue={'min':(50,100,100),'max':(100,255,255)} Brown={'min':(0,100,0),'max':(20,255,255)} mask_b=cv2.inRange(HSV,Blue['min'],Blue['max']) mask_br=cv2.inRange(HSV,Brown['min'],Brown['max']) mask_y=cv2.inRange(HSV,Yellow['min'],Yellow['max']) blue=cv2.bitwise_and(img,img,mask=mask_b) yellow=cv2.bitwise_and(img,img,mask=mask_y) brown=cv2.bitwise_and(img,img,mask=mask_br) new_img=cv2.add(blue,brown) new_img=cv2.add(new_img,yellow) return new_img
def hsvModer(self, index, hsv_valueT, hsv_value_B): img_BGR = self.img[index] img_RGB = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2RGB) img_HSV = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2HSV) lower_red = np.array(hsv_value_B) upper_red = np.array(hsv_valueT) mask = cv2.inRange(img_HSV, lower_red, upper_red) res = cv2.bitwise_and(img_RGB, img_RGB, mask=mask) if self.erosion: kernel = np.ones((5, 5), np.uint8) res = cv2.erode(res, kernel, iterations=1) if self.dilate: kernel = np.ones((9, 9), np.uint8) res = cv2.dilate(res, kernel, iterations=1) return res
def get_bounding_rect( cap, win_cap, win, upper, lower): msk = cv2.dilate(cv2.erode( cv2.inRange( cv2.blur( cv2.cvtColor( cap, cv2.COLOR_BGR2HSV ), (5,5) ), np.array(lower), np.array(upper) ), None, iterations=3), None, iterations=3) im2, contours, hierarchy = cv2.findContours( msk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) if len(contours) > 0: areas = [cv2.contourArea(c) for c in contours] # get the area of each contour max_index = np.argmax(areas) # get the index of the largest contour by area cnts = contours[max_index] # get the largest contout by area cv2.drawContours(msk, [cnts], 0, (0,255,0), 3) # Draw the contours to the mask image x,y,w,h = cv2.boundingRect(cnts) # get the bouding box information about the contour cv2.rectangle(win_cap,(x,y),(x+w,y+h),(255,255,255),2) # Draw rectangle on the image to represent the bounding box cv2.imshow( "debug.", win_cap ) try: self.smt_dash.putNumber('vis_x', x) self.smt_dash.putNumber('vis_y', y) self.smt_dash.putNumber('vis_w', w) self.smt_dash.putNumber('vis_h', h) except Exception: pass
def cb_pt(self, msg): self.pt = int(msg.y), int(msg.x) # print self.pt if self.image is None: print "NO IMAGE" return img = self.bridge.imgmsg_to_cv2(self.image) hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) vals = hsv_img[self.pt] self.pts.append(vals) pts = np.array(self.pts) min_thresh = np.min(pts, axis=0).tolist() max_thresh = np.max(pts, axis=0).tolist() print "thresholds: %s, %s " %(min_thresh, max_thresh) # rospy.init_node("huepicker")
def shoot(x1,y1,x2,y2, *args, **kwargs): """Takes screenshot at given coordinates as PIL image format, the converts to cv2 grayscale image format and returns it""" # creates widht & height for screenshot region w = x2 - x1 h = y2 - y1 # PIL format as RGB img = pyautogui.screenshot(region=(x1,y1,w,h)) #X1,Y1,X2,Y2 #im.save('screenshot.png') # Converts to an array used for OpenCV img = np.array(img) try: for arg in args: if arg == 'hsv': # Converts to BGR format for OpenCV img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) return hsv_img if arg == 'rgb': rgb_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) return rgb_img except: pass cv_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) return cv_gray
def testGreenDetection(self): # Load image for testing. image = cv2.imread(os.path.join(self.path_to_test_data, 'testGreenDetection.jpg')) # Check image was loaded correctly if image is None: raise TypeError # Convert image to hsv image. hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # Declare minimum size. MIN_SIZE = 50 # Find green in image. centre_array = cd.detect_green(hsv_image, MIN_SIZE, False) # Declare expected values. EXPECTED_CX = 381 EXPECTED_CY = 493 self.assertEqual(centre_array[0][0], EXPECTED_CX) self.assertEqual(centre_array[0][1], EXPECTED_CY)
def testRedDetection(self): # Load image for testing. image = cv2.imread(os.path.join(self.path_to_test_data, 'testRedDetection.jpg')) # Check image was loaded correctly if image is None: raise TypeError # Convert image to hsv image. hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # Declare minimum size. MIN_SIZE = 50 # Find green in image. centre_array = cd.detect_red(hsv_image, MIN_SIZE, False) # Declare expected values. EXPECTED_CX = 177 EXPECTED_CY = 443 self.assertEqual(centre_array[0][0], EXPECTED_CX) self.assertEqual(centre_array[0][1], EXPECTED_CY) # Running this runs all the tests and outputs their results.
def yellowgrayscale(image): #enhance yellow then find grayscale #image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # define range of yellow color in HSV #lower = np.array([40,40,40]) #upper = np.array([150,255,255]) #RGB limits lower = np.array([80,80,40]) upper = np.array([255,255,80]) # Threshold the HSV image to get only yellow colors mask = cv2.inRange(image, lower, upper) #show_image('mask',mask) # Bitwise-AND mask and original image res = cv2.bitwise_and(image,image, mask= mask) res = cv2.addWeighted(res, 1.0, image, 1.0, 0) res = grayscale(res) return res
def test(): displace() start = time.clock() newCalibrate() exposure = WebCam.getExposure() print time.clock() - start, "TOTAL TIME" while display: image = WebCam.getImage() contours = GripRunner.run(image) Printing.drawContours(image, contours) Printing.display(image) cv2.waitKey(20) # Get average value at the end of test to recalibrate targetAverage # image = cv2.imread('TestImages/Cancer.jpg') # image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # value = cv2.split(image)[2] # # value = np.array([image[:,:,2]]) # average = cv2.mean(value) # print average
def filter_color(self, image, lower_color, upper_color): ''' Methode maskiert Bereiche auf einem Bild welche nicht im mitgegebenen HSV Farbraum liegen. Parameter --------- image : Bild lower_color : Tupel >> (h,s,v) upper_color : Tupel >> (h,s,v) Rückgabe --------- image : Bild ''' hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) lower_color = np.array(lower_color, np.uint8) upper_color = np.array(upper_color, np.uint8) color_mask = cv2.inRange(hsv, lower_color, upper_color) return cv2.bitwise_and(image, image, mask=color_mask)
def load_next_pair(self): # return the next batch of (image, label) pairs # check whether an epoch has been finished if self._cur >= len(self.image_paths): self._cur = 0 self.shuffle_dataset() # im_PIL = Image.open(self.image_paths[self._cur]) im = cv2.imread(self.image_paths[self._cur]) # we switch to use OpenCV to load images if self.use_HSV: im = cv2.cvtColor(im, cv2.COLOR_BGR2HSV) # data augmentation im = self.data_augment(im) label = int(self.labels[self._cur]) self._cur += 1 return self.preprocessor(im), label
def get_frame(self): ret,frame = self.cap.read(self.camera_id) self.frame = cv2.resize(frame,None,fx=self.img_zoomx, fy=self.img_zoomy, \ interpolation = cv2.INTER_AREA) self.frame = cv2.blur(self.frame, (3,3)) self.hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB) self.colors = [] if self.escaneando: self.draw_osd(self.frame) return self.frame
def brightnessFiltering(img): #this function filters out the darker pixels hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) lower_bright = np.array([0,15,220]) #0,0,220 upper_bright = np.array([150,150,255]) #110,5,255 """cv2.imshow("imago", hsv) cv2.waitKey()""" mask = cv2.inRange(hsv, lower_bright, upper_bright) """cv2.imshow("imagiu", mask) cv2.waitKey()""" return mask
def brightnessFiltering(img): #this function filters out the darker pixels hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) lower_bright = numpy.array([0,15,220]) #0,0,220 upper_bright = numpy.array([150,150,255]) #110,5,255 mask = cv2.inRange(hsv, lower_bright, upper_bright) return mask
def do_random_brightness(self, img): if np.random.rand() > 0.7: return img hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int) hsv[:,:,2] += np.random.randint(-40,70) hsv = np.clip(hsv, 0, 255).astype(np.uint8) img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) return img
def extract_color_histogram(image, bins=(8, 8, 8)): # extract a 3D color histogram from the HSV color space using # the supplied number of `bins` per channel hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) hist = cv2.calcHist([hsv], [0, 1, 2], None, bins, [0, 180, 0, 256, 0, 256]) # handle normalizing the histogram if we are using OpenCV 2.4.X if imutils.is_cv2(): hist = cv2.normalize(hist) # otherwise, perform "in place" normalization in OpenCV 3 (I # personally hate the way this is done else: cv2.normalize(hist, hist) # return the flattened histogram as the feature vector return hist.flatten()
def repaint_skin(filename): import cv2 shutil.copy(filename, filename + '.bak') frame = cv2.imread(filename) HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) l = np.array([0, 50, 80], dtype = "uint8") u = np.array([23, 255, 255], dtype = "uint8") skin_area = cv2.inRange(HSV, l, u) not_skin_area = cv2.bitwise_not(frame, frame, mask = skin_area) cv2.imwrite(filename, not_skin_area)
def __call__(self, image, boxes=None, labels=None): if self.current == 'BGR' and self.transform == 'HSV': image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) elif self.current == 'HSV' and self.transform == 'BGR': image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR) else: raise NotImplementedError return image, boxes, labels
def tslsr(image): """ Takes an image then returns (mask, circles, rois for each circle) """ image_hsv = cv2.cvtColor(cv2.GaussianBlur(image, (7, 7), 0), cv2.COLOR_BGR2HSV) mask = __filterRedColor(image_hsv) circles = __findCircles(mask) rois = [] if circles is not None: circles = np.round(circles[0, :]).astype("int") for (x, y, r) in circles: rois.append(__extract_sign_roi(image, (x, y, r))) return (mask, circles, rois)
def showImageHSV(image_file): image_bgr = cv2.imread(image_file) image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV) H = image_hsv[:, :, 0] S = image_hsv[:, :, 1] V = image_hsv[:, :, 2] plt.subplot(1, 3, 1) plt.title('Hue') plt.gray() plt.imshow(H) plt.axis('off') plt.subplot(1, 3, 2) plt.title('Saturation') plt.gray() plt.imshow(S) plt.axis('off') plt.subplot(1, 3, 3) plt.title('Value') plt.gray() plt.imshow(V) plt.axis('off') plt.show() # Lab????????
def get_hsv(self): cv2.namedWindow('hsv_extractor') while True: self.grab_frame() # Bottom ROI cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_BOTTOM-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_BOTTOM + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2) # Top ROI cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_TOP-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2) # Object cv2.rectangle(self.img_debug, (0, cvsettings.OBJECT_HEIGHT_PADDING), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), (238, 130, 238), 2) self.hsv_frame = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV) # Mouse handler cv2.setMouseCallback('hsv_extractor', self.on_mouse, 0) cv2.imshow('hsv_extractor', self.img_debug) key = cv2.waitKey(0) & 0xFF if key == ord('q'): break self.stop_camera() cv2.destroyAllWindows() # Starts camera (needs to be called before run)
def normalize_img(self): # Crop img and convert to hsv self.img_roi_bottom = np.copy(self.img[cvsettings.HEIGHT_PADDING_BOTTOM:int(cvsettings.HEIGHT_PADDING_BOTTOM + cvsettings.IMG_ROI_HEIGHT), :]) self.img_roi_top = np.copy(self.img[cvsettings.HEIGHT_PADDING_TOP:int(cvsettings.HEIGHT_PADDING_TOP + cvsettings.IMG_ROI_HEIGHT), :]) self.img_roi_bottom_hsv = cv2.cvtColor(self.img_roi_bottom, cv2.COLOR_BGR2HSV).copy() self.img_roi_top_hsv = cv2.cvtColor(self.img_roi_top, cv2.COLOR_BGR2HSV).copy() # Get our ROI's shape # Doesn't matter because both of them are the same shape self.roi_height, self.roi_width, self.roi_channels = self.img_roi_bottom.shape # Smooth image and convert to bianry image (threshold) # Filter out colors that are not within the RANGE value
def test_detect(): dev = AndroidDeviceMinicap() dev._adb.start_minitouch() time.sleep(3) d = SceneDetector('txxscene') old, new = None, None while True: # time.sleep(0.3) screen = dev.screenshot_cv2() h, w = screen.shape[:2] img = cv2.resize(screen, (w/2, h/2)) # find hsv hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS) _, _, V = cv2.split(hsv) V[V<150] = 0 cv2.imshow('V', V) _, _, L = cv2.split(hls) L[L<150] = 0 cv2.imshow('H', L) tic = time.clock() new = str(d.detect(img)) t = time.clock() - tic if new != old: print 'change to', new print 'cost time', t old = new for _, r in d.current_scene: x, y, x1, y1 = r cv2.rectangle(img, (x,y), (x1,y1), (0,255,0) ,2) cv2.imshow('test', img) cv2.waitKey(1)
def test_features(): from atx.drivers.android_minicap import AndroidDeviceMinicap cv2.namedWindow("preview") d = AndroidDeviceMinicap() # r, h, c, w = 200, 100, 200, 100 # track_window = (c, r, w, h) # oldimg = cv2.imread('base1.png') # roi = oldimg[r:r+h, c:c+w] # hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) # mask = cv2.inRange(hsv_roi, 0, 255) # roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180]) # cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX) # term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) while True: try: w, h = d._screen.shape[:2] img = cv2.resize(d._screen, (h/2, w/2)) cv2.imshow('preview', img) hist = cv2.calcHist([img], [0], None, [256], [0,256]) plt.plot(plt.hist(hist.ravel(), 256)) plt.show() # if img.shape == oldimg.shape: # # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt) # # x, y, w, h = track_window # cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2) # cv2.imshow('preview', img) # # cv2.imshow('preview', img) cv2.waitKey(1) except KeyboardInterrupt: break cv2.destroyWindow('preview')
def test_hsv_gradient(img): ## gradient test using hsv hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) s = hsv[:,:,2] s = s[:,:,np.newaxis] h = np.hstack([np.diff(s, axis=1), (s[:,0,:]-s[:,-1,:])[:,np.newaxis,:]]) v = np.vstack([np.diff(s, axis=0), (s[0,:,:]-s[-1,:,:])[np.newaxis,:,:]]) edge = (h**2 + v**2)**0.5 edge[edge<10] = 0 cv2.imshow('preview', edge) cv2.waitKey() edge = cv2.GaussianBlur(edge, (3,3), 1) cv2.imshow('preview', edge) cv2.waitKey()