我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用cv2.COLOR_RGB2HLS。
def hls_select(image, thresh=(0, 255)): # 1) Convert to HLS color space hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) H = hls[:, :, 0] L = hls[:, :, 1] S = hls[:, :, 2] # 2) Apply a threshold to the S channel thresh = (90, 255) binary = np.zeros_like(S) binary[(S > thresh[0]) & (S <= thresh[1])] = 1 # 3) Return a binary image of threshold result return binary # Define a function that applies Sobel x and y, # then computes the direction of the gradient # and applies a threshold.
def pipeline(img, s_thresh=(170, 255), sx_thresh=(20, 100)): img = np.copy(img) # Convert to HSV color space and separate the V channel hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float) l_channel = hsv[:,:,1] s_channel = hsv[:,:,2] # Sobel x sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) # Threshold x gradient sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1 # Threshold color channel s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1 # Stack each channel # Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might # be beneficial to replace this channel with something else. color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) return color_binary
def pipeline(img, s_thresh=(170, 255), sx_thresh=(20, 100)): img = np.copy(img) # Convert to HSV color space and separate the V channel hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float) l_channel = hsv[:,:,1] s_channel = hsv[:,:,2] # Sobel x sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) # Threshold x gradient sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1 # Threshold color channel s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1 # Stack each channel # Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might # be beneficial to replace this channel with something else. color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) return color_binary # Define a function that thresholds the S-channel of HLS # Use exclusive lower bound (>) and inclusive upper (<=)
def color_thresh(self, img, thresh=(0, 255)): # convert to HSV color space and separate the V channel hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float) s_channel = hsv[:,:,2] # threshold color channel s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= thresh[0]) & (s_channel <= thresh[1])] = 1 return s_binary # get binary image based on sobel gradient thresholding
def HLS_sobel(img, s_thresh=(120, 255), sx_thresh=(20, 255),l_thresh=(40,255)): img = np.copy(img) # Convert to HLS color space and separate the V channel hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float) #h_channel = hls[:,:,0] l_channel = hls[:,:,1] s_channel = hls[:,:,2] # Sobel x # sobelx = abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)) # l_channel_col=np.dstack((l_channel,l_channel, l_channel)) sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) # Threshold x gradient sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1 # Threshold saturation channel s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1 # Threshold lightness l_binary = np.zeros_like(l_channel) l_binary[(l_channel >= l_thresh[0]) & (l_channel <= l_thresh[1])] = 1 channels = 255*np.dstack(( l_binary, sxbinary, s_binary)).astype('uint8') binary = np.zeros_like(sxbinary) binary[((l_binary == 1) & (s_binary == 1) | (sxbinary==1))] = 1 binary = 255*np.dstack((binary,binary,binary)).astype('uint8') return binary,channels
def random_shadow(image): """ Generates and adds random shadow """ # (x1, y1) and (x2, y2) forms a line # xm, ym gives all the locations of the image x1, y1 = IMAGE_WIDTH * np.random.rand(), 0 x2, y2 = IMAGE_WIDTH * np.random.rand(), IMAGE_HEIGHT xm, ym = np.mgrid[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH] # mathematically speaking, we want to set 1 below the line and zero otherwise # Our coordinate is up side down. So, the above the line: # (ym-y1)/(xm-x1) > (y2-y1)/(x2-x1) # as x2 == x1 causes zero-division problem, we'll write it in the below form: # (ym-y1)*(x2-x1) - (y2-y1)*(xm-x1) > 0 mask = np.zeros_like(image[:, :, 1]) mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1 # choose which side should have shadow and adjust saturation cond = mask == np.random.randint(2) s_ratio = np.random.uniform(low=0.2, high=0.5) # adjust Saturation in HLS(Hue, Light, Saturation) hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB)
def extract_features(imgs, color_space='RGB', spatial_size=(32, 32), hist_bins=32, orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_feat=True, hist_feat=True, hog_feat=True): # Create a list to append feature vectors to features = [] # Iterate through the list of images for file in imgs: file_features = [] # Read in each one by one image = mpimg.imread(file) # apply color conversion if other than 'RGB' if color_space != 'RGB': if color_space == 'HSV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) elif color_space == 'LUV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV) elif color_space == 'HLS': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) elif color_space == 'YUV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV) elif color_space == 'YCrCb': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb) else: feature_image = np.copy(image) if spatial_feat == True: spatial_features = bin_spatial(feature_image, size=spatial_size) file_features.append(spatial_features) if hist_feat == True: # Apply color_hist() hist_features = color_hist(feature_image, nbins=hist_bins) file_features.append(hist_features) if hog_feat == True: # Call get_hog_features() with vis=False, feature_vec=True if hog_channel == 'ALL': hog_features = [] for channel in range(feature_image.shape[2]): hog_features.append(get_hog_features(feature_image[:,:,channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True)) hog_features = np.ravel(hog_features) else: hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True) # Append the new feature vector to the features list file_features.append(hog_features) features.append(np.concatenate(file_features)) # Return list of feature vectors return features # Define a function that takes an image, # start and stop positions in both x and y, # window size (x and y dimensions), # and overlap fraction (for both x and y)
def single_img_features(img, color_space='RGB', spatial_size=(32, 32), hist_bins=32, hist_range=(0, 256), orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_feat=True, hist_feat=True, hog_feat=True): img_features = [] # apply color conversion if other than 'RGB' if color_space != 'RGB': if color_space == 'HSV': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) elif color_space == 'LUV': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV) elif color_space == 'HLS': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) elif color_space == 'YUV': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV) elif color_space == 'YCrCb': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb) else: feature_image = np.copy(img) if spatial_feat == True: spatial_features = bin_spatial(feature_image, size=spatial_size) img_features.append(spatial_features) if hist_feat == True: # Apply color_hist() hist_features = color_hist(feature_image, nbins=hist_bins, bins_range=hist_range) img_features.append(hist_features) if hog_feat == True: # Call get_hog_features() with vis=False, feature_vec=True if hog_channel == 'ALL': hog_features = [] for channel in range(feature_image.shape[2]): hog_features.extend(get_hog_features(feature_image[:,:,channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True)) else: hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True) # Append the new feature vector to the features list img_features.append(hog_features) # Return list of feature vectors return np.concatenate(img_features) # Convert windows to heatmap numpy array.