我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用cv2.COLOR_RGB2HSV。
def __bound_contours(roi): """ returns modified roi(non-destructive) and rectangles that founded by the algorithm. @roi region of interest to find contours @return (roi, rects) """ roi_copy = roi.copy() roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV) # filter black color mask1 = cv2.inRange(roi_hsv, np.array([0, 0, 0]), np.array([180, 255, 125])) mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))) mask1 = cv2.Canny(mask1, 100, 300) mask1 = cv2.GaussianBlur(mask1, (1, 1), 0) mask1 = cv2.Canny(mask1, 100, 300) # mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))) # Find contours for detected portion of the image im2, cnts, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] # get largest five contour area rects = [] for c in cnts: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) x, y, w, h = cv2.boundingRect(approx) if h >= 15: # if height is enough # create rectangle for bounding rect = (x, y, w, h) rects.append(rect) cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 1); return (roi_copy, rects)
def get_color_medio(self, roi, a,b,imprimir = False): xl,yl,ch = roi.shape roiyuv = cv2.cvtColor(roi,cv2.COLOR_RGB2YUV) roihsv = cv2.cvtColor(roi,cv2.COLOR_RGB2HSV) h,s,v=cv2.split(roihsv) mask=(h<5) h[mask]=200 roihsv = cv2.merge((h,s,v)) std = np.std(roiyuv.reshape(xl*yl,3),axis=0) media = np.mean(roihsv.reshape(xl*yl,3), axis=0)-60 mediayuv = np.mean(roiyuv.reshape(xl*yl,3), axis=0) if std[0]<12 and std[1]<12 and std[2]<12: #if (std[0]<15 and std[2]<15) or ((media[0]>100 or media[0]<25) and (std[0]>10)): media = np.mean(roihsv.reshape(xl*yl,3), axis=0) # el amarillo tiene 65 de saturacion y sobre 200 if media[1]<60: #and (abs(media[0]-30)>10): # blanco return [-10,0,0] else: return media else: return None
def optical_flow(one, two): """ method taken from (https://chatbotslife.com/autonomous-vehicle-speed-estimation-from-dashboard-cam-ca96c24120e4) """ one_g = cv2.cvtColor(one, cv2.COLOR_RGB2GRAY) two_g = cv2.cvtColor(two, cv2.COLOR_RGB2GRAY) hsv = np.zeros((120, 320, 3)) # set saturation hsv[:,:,1] = cv2.cvtColor(two, cv2.COLOR_RGB2HSV)[:,:,1] # obtain dense optical flow paramters flow = cv2.calcOpticalFlowFarneback(one_g, two_g, flow=None, pyr_scale=0.5, levels=1, winsize=15, iterations=2, poly_n=5, poly_sigma=1.1, flags=0) # convert from cartesian to polar mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1]) # hue corresponds to direction hsv[:,:,0] = ang * (180/ np.pi / 2) # value corresponds to magnitude hsv[:,:,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX) # convert HSV to int32's hsv = np.asarray(hsv, dtype= np.float32) rgb_flow = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB) return rgb_flow
def dumper(model,kind,fname=None): if not fname: fname = '{}/models/{}-{}.h5'.format(ROOT, str(datetime.now()).replace(' ','-'),kind) try: with open(fname,'w') as f: model.save(fname) except IOError: raise IOError('Unable to open: {}'.format(fname)) return fname # def random_bright_shift(image): # print np.asarray(image,dtype=np.uint8) # image = np.asarray(image,dtype=np.uint8) # image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV) # random_bright = .25+np.random.uniform() # image1[:,:,2] = image1[:,:,2]*random_bright # image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB) # return image1
def image_brightness_adjust(image, brightness): """ Convert an image to HSV colour space, apply brightness adjustment Parameters ---------- image : numpy array The image brightness: float brightness adjusment factor Returns ------- image : numpy array The S channel of the image """ arr = cv2.cvtColor(image,cv2.COLOR_RGB2HSV) arr[:, :, 2] = arr[:, :, 2] * brightness return arr[:, :, 1]
def get_hsv_mask(img, debug=False): assert isinstance(img, numpy.ndarray), 'image must be a numpy array' assert img.ndim == 3, 'skin detection can only work on color images' logger.debug('getting hsv mask') lower_thresh = numpy.array([0, 50, 0], dtype=numpy.uint8) upper_thresh = numpy.array([120, 150, 255], dtype=numpy.uint8) img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) msk_hsv = cv2.inRange(img_hsv, lower_thresh, upper_thresh) msk_hsv[msk_hsv < 128] = 0 msk_hsv[msk_hsv >= 128] = 1 if debug: scripts.display('input', img) scripts.display('mask_hsv', msk_hsv) return msk_hsv.astype(float)
def scan(images, args): thresholds = args[0] try: image, x, y = images.get(timeout=0.3) except: return hsv = cv2.cvtColor(np.array(image, dtype=np.uint8), cv2.COLOR_RGB2HSV) image_size = hsv[:,:,0].size val = (len(np.where((hsv[:,:,0] > thresholds[0]) & (hsv[:,:,1] > thresholds[1]))[0])) / float(image_size) if val > TVAL: xvals = args[1].get() xvals.append(x) args[1].put(xvals) yvals = args[2].get() yvals.append(y) args[2].put(yvals) args[3].increment() images.task_done()
def accumulated_histogram(images, args): histogram = args[0] try: image, x, y = images.get(timeout=0.3) except: return hsv = cv2.cvtColor(np.array(image, dtype=np.uint8), cv2.COLOR_RGB2HSV) current_histogram = histogram.get() new_histogram = list(map(lambda x: cv2.calcHist([hsv[:,:,x]], [0], None, [256], [0, 256], hist=current_histogram[x], accumulate=True), range(3))) histogram.put(new_histogram) images.task_done()
def check_coords(handler, xy, thresholds, tval, read_size, mask_handler): image = handler.read_region((xy[0], xy[1]), 0, read_size) hsv = cv2.cvtColor(np.array(image, dtype=np.uint8), cv2.COLOR_RGB2HSV) image_size = hsv[:,:,0].size max_hue = max(thresholds[0]) min_hue = min(thresholds[0]) val = (len(np.where(((hsv[:,:,0] > max_hue) | (hsv[:,:,0] < min_hue)) & (hsv[:,:,1] > thresholds[1]) & (hsv[:,:,2] > 50))[0])) / float(image_size) if not val > tval: return None mask = None if mask_handler: mask = mask_handler.read_region((xy[0], xy[1]), 0, read_size) tumor = check_tumor_value(mask) return image, xy[0], xy[1], tumor
def change_brightness(img_arr): # print('change brightness called') adjusted_imgs = np.array([img_arr[0]]) for img_num in range(0, len(img_arr)): img = img_arr[img_num] # print('array access') # show_image(img) hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # print('rgb2hsv') # show_image(hsv) rando = np.random.uniform() # print('rando is', rando) hsv[:,:, 2] = hsv[:,:, 2] * (.25 + rando) new_img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) # print('hsv2rgb') # show_image(new_img) # new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2RGB) # show_images(img.reshape((1,) + img.shape), new_img.reshape((1,) + new_img.shape)) adjusted_imgs = np.append(adjusted_imgs, new_img.reshape((1,) + new_img.shape), axis=0) adjusted_imgs = np.delete(adjusted_imgs, 0, 0) return adjusted_imgs
def get_inside_boxes(image, v, h): height, width, channels = img.shape data = [] hsv_img = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) for i in range(0, int(height), int(h)): row = [] for j in range(0, int(width), int(v)): x = int(i + v/2) y = int(j + h/2) if x < height and y < width: row.append(img[x][y]) if len(row) > 0: data.append(row) data = np.round(np.array(data), decimals=0) labels, numLabels = scipy.ndimage.label(data) return data
def _brightness(image, min=0.5, max=2.0): ''' Randomly change the brightness of the input image. Protected against overflow. ''' hsv = cv2.cvtColor(image,cv2.COLOR_RGB2HSV) random_br = np.random.uniform(min,max) #To protect against overflow: Calculate a mask for all pixels #where adjustment of the brightness would exceed the maximum #brightness value and set the value to the maximum at those pixels. mask = hsv[:,:,2] * random_br > 255 v_channel = np.where(mask, 255, hsv[:,:,2] * random_br) hsv[:,:,2] = v_channel return cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
def histogram_eq(image): ''' Perform histogram equalization on the input image. See https://en.wikipedia.org/wiki/Histogram_equalization. ''' image1 = np.copy(image) image1 = cv2.cvtColor(image1, cv2.COLOR_RGB2HSV) image1[:,:,2] = cv2.equalizeHist(image1[:,:,2]) image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB) return image1
def alter_HSV(img, change_probability = 0.6): if random.random() < 1-change_probability: return img addToHue = random.randint(0,179) addToSaturation = random.gauss(60, 20) addToValue = random.randint(-50,50) hsvVersion = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) channels = hsvVersion.transpose(2, 0, 1) channels[0] = ((channels[0].astype(int) + addToHue)%180).astype(np.uint8) channels[1] = (np.maximum(0, np.minimum(255, (channels[1].astype(int) + addToSaturation)))).astype(np.uint8) channels[2] = (np.maximum(0, np.minimum(255, (channels[2].astype(int) + addToValue)))).astype(np.uint8) hsvVersion = channels.transpose(1,2,0) return cv2.cvtColor(hsvVersion, cv2.COLOR_HSV2RGB) #%%
def optical_flow(one, two): """ method taken from https://chatbotslife.com/autonomous-vehicle-speed-estimation-from-dashboard-cam-ca96c24120e4 input: image_current, image_next (RGB images) calculates optical flow magnitude and angle and places it into HSV image """ one_g = cv2.cvtColor(one, cv2.COLOR_RGB2GRAY) two_g = cv2.cvtColor(two, cv2.COLOR_RGB2GRAY) hsv = np.zeros((120, 320, 3)) # set saturation hsv[:,:,1] = cv2.cvtColor(two, cv2.COLOR_RGB2HSV)[:,:,1] # obtain dense optical flow paramters flow = cv2.calcOpticalFlowFarneback(one_g, two_g, flow=None, pyr_scale=0.5, levels=1, winsize=10, iterations=2, poly_n=5, poly_sigma=1.1, flags=0) # convert from cartesian to polar mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1]) # hue corresponds to direction hsv[:,:,0] = ang * (180/ np.pi / 2) # value corresponds to magnitude hsv[:,:,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX) # convert HSV to int32's hsv = np.asarray(hsv, dtype= np.float32) rgb_flow = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB) return rgb_flow
def hsv_augment(im, hue, saturation, value): """ Augments an image with additive hue, saturation and value. `im` should be 01c RGB in range 0-1. `hue`, `saturation` and `value` should be scalars between -1 and 1. Return value: a 01c RGB image. """ # Convert to HSV im = cv2.cvtColor(im, cv2.COLOR_RGB2HSV) # Rescale hue from 0-360 to 0-1. im[:, :, 0] /= 360. # Mask value == 0 black_indices = im[:, :, 2] == 0 # Add random hue, saturation and value im[:, :, 0] = (im[:, :, 0] + hue) % 1 im[:, :, 1] = im[:, :, 1] + saturation im[:, :, 2] = im[:, :, 2] + value # Pixels that were black stay black im[black_indices, 2] = 0 # Clip pixels from 0 to 1 im = np.clip(im, 0, 1) # Rescale hue from 0-1 to 0-360. im[:, :, 0] *= 360. # Convert back to RGB in 0-1 range. im = cv2.cvtColor(im, cv2.COLOR_HSV2RGB) return im
def rgb_to_hsv(colors): _colors = np.array([colors], dtype=np.uint8) # cv2.cvtColor only accept 2d array hsv_colors = cv2.cvtColor(_colors, cv2.COLOR_RGB2HSV)[0].astype(np.double) return hsv_colors
def convertRGBtoHSV(color): """ Convert an RGB to HSV """ clr = np.uint8([[color]]) return cv2.cvtColor(clr, cv2.COLOR_RGB2HSV)
def extract(images, args): thresholds = args[0] target_dir = args[1] image, x, y = images.get() hsv = cv2.cvtColor(np.array(image, dtype=np.uint8), cv2.COLOR_RGB2HSV) image_size = hsv[:,:,0].size val = (len(np.where((hsv[:,:,0] > thresholds[0]) & (hsv[:,:,1] > thresholds[1]))[0])) / float(image_size) if val > TVAL: image.save(os.path.join(target_dir, "%s_%s.jpg" % (x, y)), format="JPEG", quality=50) images.task_done()
def check_candidate(candidates, args): samples = args[0] thresholds = args[1] target_dir = args[2] name = args[3] try: image, mask, x, y = candidates.get(timeout=0.3) except Queue.Empty: #print 'timeout get candidate' pass else: tumor = check_in_mask(mask) hsv = cv2.cvtColor(np.array(image, dtype=np.uint8), cv2.COLOR_RGB2HSV) image_size = hsv[:,:,0].size val = (len(np.where((hsv[:,:,0] > thresholds[0]) & (hsv[:,:,1] > thresholds[1]))[0])) / float(image_size) if val > TVAL: if random() < VAL_TRAIN_PROP: target_dir = os.path.join(target_dir, "validation") else: target_dir = os.path.join(target_dir, "train") target_dir = os.path.join(target_dir, name) if (tumor > 0.0) and (tumor < 1.0): target_dir = os.path.join(target_dir, "boundaries") elif tumor == 0.0: target_dir = os.path.join(target_dir, "healthy") else: target_dir = os.path.join(target_dir, "tumor") image.save(os.path.join(target_dir, "%s_%s_%s.jpg" % (x, y, tumor)), format="JPEG", quality=50) try: samples.put("DONE", timeout=0.3) except: #print 'samples full' pass candidates.task_done()
def cvtRGB2HSV(frame): frame = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) return frame
def hsvPassShadowRemoval(src, shadowThreshold): blurLevel = 3 height, width = src.shape[:2] imgHSV = cv2.cvtColor(src, cv2.COLOR_RGB2HSV) gaussianBlur = cv2.GaussianBlur(imgHSV, (blurLevel, blurLevel), 0) hueImg, satImg, valImg = cv2.split(gaussianBlur) NSVDI = np.zeros((height, width, 1), np.uint8) count = height * width with np.errstate(divide='ignore'): # for i in range(0, height): # for j in range(0, width): # sat = int(satImg[i, j]) # val = int(valImg[i, j]) # NSVDI[i, j] = (satImg[i, j] - valImg[i, j]) / ((satImg[i, j] + valImg[i, j]) * 1.0) NSVDI = (satImg + valImg) / ((satImg - valImg) * 1) thresh = np.sum(NSVDI) avg = thresh / (count * 1.0) # for i in range(0, height): # for j in range(0, width): # if NSVDI[i, j] >= 0.25: # hueImg[i, j] = 255 # satImg[i, j] = 255 # valImg[i, j] = 255 # else: # hueImg[i, j] = 0 # satImg[i, j] = 0 # valImg[i, j] = 0 if shadowThreshold is None: avg = avg else: avg = shadowThreshold np.where(NSVDI > avg, 255, 0) _, threshold = cv2.threshold(NSVDI, avg, 255, cv2.THRESH_BINARY_INV) output = threshold return output
def ProcessImage(self, image): global autoMode # Get the red section of the image image = cv2.medianBlur(image, 5) image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # Swaps the red and blue channels! red = cv2.inRange(image, numpy.array((115, 127, 64)), numpy.array((125, 255, 255))) # Find the contours contours,hierarchy = cv2.findContours(red, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # Go through each contour foundArea = -1 foundX = -1 foundY = -1 for contour in contours: x,y,w,h = cv2.boundingRect(contour) cx = x + (w / 2) cy = y + (h / 2) area = w * h if foundArea < area: foundArea = area foundX = cx foundY = cy if foundArea > 0: ball = [foundX, foundY, foundArea] else: ball = None # Set drives or report ball status if autoMode: self.SetSpeedFromBall(ball) else: if ball: print 'Ball at %d,%d (%d)' % (foundX, foundY, foundArea) else: print 'No ball' # Set the motor speed from the ball position
def ProcessImage(self, image): # Get the red section of the image image = cv2.medianBlur(image, 5) image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # Swaps the red and blue channels! red = cv2.inRange(image, numpy.array((115, 127, 64)), numpy.array((125, 255, 255))) # Find the contours contours,hierarchy = cv2.findContours(red, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # Go through each contour foundArea = -1 foundX = -1 foundY = -1 for contour in contours: x,y,w,h = cv2.boundingRect(contour) cx = x + (w / 2) cy = y + (h / 2) area = w * h if foundArea < area: foundArea = area foundX = cx foundY = cy if foundArea > 0: ball = [foundX, foundY, foundArea] else: ball = None # Set drives or report ball status self.SetSpeedFromBall(ball) # Set the motor speed from the ball position
def change_one(img): print('before') show_image(img) hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) print('rgb2hsv') show_image(hsv) rando = np.random.uniform() # print('rando is', rando) hsv[:,:, 2] = hsv[:,:, 2] * (.25 + rando) new_img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) print('hsv2rgb') show_image(new_img)
def resize_file_images(img_src, dest_file, width, height, start=0, end=0): # print('started') img_arr = np.load(img_src) # print('resized_imgs shape', resized_imgs.shape) if end == 0: end = img_arr.shape[0] for i in range(start, end): if i % 500 == 0: print('index is', i) img = img_arr[i] ################################################################################ #remove the color change when dont want that resized = cv2.resize(img, (width, height)) # resized = cv2.resize(cv2.cvtColor(img, cv2.COLOR_RGB2HSV)[:, :, 1], (width, height)) # cv2.resize((cv2.cvtColor(img, cv2.COLOR_RGB2HSV))[:,:,1],(32,16)) resized = resized.reshape((1,) + resized.shape) if i == start: resized_imgs = resized else: resized_imgs = np.append(resized_imgs, resized, axis=0) np.save(dest_file, resized_imgs) print('final shape', resized_imgs.shape, 'saved to', dest_file)
def maskImg(image, lowArray, highArray): # Convert Image to HSV color space hsvImage = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # Convert to binary image using lowest and highest color values maskedImage = cv2.inRange(hsvImage, lowArray, highArray) kernel = np.ones((4,4),np.uint8) maskedImage = cv2.erode(maskedImage, kernel, iterations=1) # maskedImage = cv2.dilate(maskedImage, kernel, iterations=1) return maskedImage
def maskImg(image): #Convert image from RBG (red blue green) to HSV (hue shade value) maskedImage = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) #Convert image to binary using the predefined color arrays maskedImage = cv2.inRange(maskedImage, lowColor, highColor) #Removes white noise using an open transformation kernel = np.ones((4,4), np.uint8) #maskedImage = cv2.morphologyEx(maskedImage, cv2.MORPH_OPEN, kernel) return maskedImage
def maskImg(image): #Convert image from RBG (red blue green) to HSV (hue shade value) maskedImage = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) #Convert image to binary using the predefined color arrays maskedImage = cv2.inRange(maskedImage, lowColor, highColor) #Removes white noise using an open transformation kernel = np.ones((4,4), np.uint8) #maskedImage = cv2.morphologyEx(maskedImage, cv2.MORPH_OPEN, kernel) return maskedImage #Find and return two matching rectangular contours if they exist, otherwise return none.
def random_brightness(image): """ Randomly adjust brightness of the image. """ # HSV (Hue, Saturation, Value) is also called HSB ('B' for Brightness). hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) ratio = 1.0 + 0.4 * (np.random.rand() - 0.5) hsv[:,:,2] = hsv[:,:,2] * ratio return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
def rgb2hsv(frame): return cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
def image_HSV(img): # HSV brightness transform img = cv2.cvtColor(img,cv2.COLOR_RGB2HSV) brightness = np.random.uniform(0.5,1.1) img[:,:,2] = img[:,:,2]*brightness return cv2.cvtColor(img,cv2.COLOR_HSV2RGB)
def randomize_brightness(image): image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) random_brightness = .1 + np.random.uniform() image[:,:,2] = image[:,:,2] * random_brightness image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB) return image
def augment_brightness_camera_images(image): image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV) random_bright = .25+np.random.uniform() #print(random_bright) image1[:,:,2] = image1[:,:,2]*random_bright image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB) return image1
def getColorThresholdMask(image, hue, hueMargin = 20, satLimit = 100, valLimit = 30, swapBR = False): lowerFilter = np.array([hue - hueMargin, satLimit, valLimit], dtype = np.uint8) upperFilter = np.array([hue + hueMargin, 255, 255], dtype = np.uint8) if swapBR: hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) else: hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, lowerFilter, upperFilter) return mask #Finds the largest contour matching the specified color #Current color options are 'green' and 'red'
def render(self, img_rgb): # warming filter: increase red, decrease blue c_r, c_g, c_b = cv2.split(img_rgb) c_r = cv2.LUT(c_r, self.incr_ch_lut).astype(np.uint8) c_b = cv2.LUT(c_b, self.decr_ch_lut).astype(np.uint8) img_rgb = cv2.merge((c_r, c_g, c_b)) # increase color saturation c_h, c_s, c_v = cv2.split(cv2.cvtColor(img_rgb, cv2.COLOR_RGB2HSV)) c_s = cv2.LUT(c_s, self.incr_ch_lut).astype(np.uint8) return cv2.cvtColor(cv2.merge((c_h, c_s, c_v)), cv2.COLOR_HSV2RGB)
def render(self, img_rgb): # cooling filter: increase blue, decrease red c_r, c_g, c_b = cv2.split(img_rgb) c_r = cv2.LUT(c_r, self.decr_ch_lut).astype(np.uint8) c_b = cv2.LUT(c_b, self.incr_ch_lut).astype(np.uint8) img_rgb = cv2.merge((c_r, c_g, c_b)) # decrease color saturation c_h, c_s, c_v = cv2.split(cv2.cvtColor(img_rgb, cv2.COLOR_RGB2HSV)) c_s = cv2.LUT(c_s, self.decr_ch_lut).astype(np.uint8) return cv2.cvtColor(cv2.merge((c_h, c_s, c_v)), cv2.COLOR_HSV2RGB)
def get_newest_frame(self): if self.live == 1: if self.grab_status: with self.icam.RetrieveResult(200, py.TimeoutHandling_Return) as result: image = cv2.cvtColor(result.Array, cv2.COLOR_RGB2HSV) self.frame_count = self.frame_count + 1 self.__calc_frametime(time.time()) return image else: raise Exception('Camera not Grabbing') else: if self.cap.isOpened() or True: ret, frame = self.cap.read() self.frame_count = self.frame_count + 1 self.__calc_frametime(time.time()) return cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) else: print('gooaaaaa') self.cap.release() cv2.destroyAllWindows()
def augment_brightness_camera_images(image): image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV) random_bright = .25+np.random.uniform() image1[:,:,2] = image1[:,:,2]*random_bright image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB) return image1
def change_brightness(image): image1 = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) image1 = np.array(image1, dtype=np.float64) random_bright = 0.5 + np.random.uniform() image1[:, :, 2] = image1[:, :, 2]*random_bright image1[:, :, 2][image1[:, :, 2] > 255] = 255 image1 = np.array(image1, dtype=np.uint8) image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB) return image1
def view(self): """ Returns the image which the eye is currently seeing. Attribute self.rgb is set to the current image which the eye is seeing. """ # Rotate the samples points c = math.cos(self.orientation) s = math.sin(self.orientation) rot = np.array([[c, -s], [s, c]]) # XY plane counterclockwise global_coords = self.eye_coords.reshape(self.eye_coords.shape[0], -1) global_coords = np.matmul(rot, global_coords) # Scale/zoom the sample points global_coords *= self.scale # Position the sample points global_coords += np.array(self.position).reshape(2, 1) global_coords = tuple(global_coords) # Extract the view from the larger image channels = [] for c_idx in range(3): ch = scipy.ndimage.map_coordinates(self.image[:,:,c_idx], global_coords, mode='constant', # No-wrap, fill cval=255, # Fill value order=1) # Linear interp channels.append(ch.reshape(self.eye_dimensions)) self.rgb = rgb = np.dstack(channels) # Convert view to HSV and encode HSV to SDR. hsv = np.array(rgb, dtype=np.float32) hsv /= 255. # Performance Note: OpenCV2's cvtColor() is about 40x faster than # matplotlib.colors.rgb_to_hsv(). hsv = cv2.cvtColor(hsv, cv2.COLOR_RGB2HSV) hue_sdr = self.hue_encoder.encode(hsv[..., 0]) sat_sdr = self.sat_encoder.encode(hsv[..., 1]) val_sdr = self.val_encoder.encode(hsv[..., 2]) color_sdr = np.logical_and(np.logical_and(hue_sdr, sat_sdr), val_sdr) # Extract edge samples angles = scipy.ndimage.map_coordinates(self.edge_angles, global_coords, mode='constant', # No-wrap, fill cval=0, # Fill value order=0) # Take nearest value, no interp. mags = scipy.ndimage.map_coordinates(self.edge_magnitues, global_coords, mode='constant', # No-wrap, fill cval=0, # Fill value order=1) # Linear interp # Both the eye's orientation and the edge directions are measured # counterclockwise so subtracting them makes the resulting edge features # invariant with respect to relative angle between the eye and the # feature. angles -= self.orientation # Edge encoder does modulus for me. angles = angles.reshape(self.eye_dimensions) mags = mags.reshape(self.eye_dimensions) edge_sdr = self.edge_encoder.encode(angles, mags) self.optic_sdr.dense = np.dstack([color_sdr, edge_sdr]) return self.optic_sdr
def extract_features(imgs, color_space='RGB', spatial_size=(32, 32), hist_bins=32, orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_feat=True, hist_feat=True, hog_feat=True): # Create a list to append feature vectors to features = [] # Iterate through the list of images for file in imgs: file_features = [] # Read in each one by one image = mpimg.imread(file) # apply color conversion if other than 'RGB' if color_space != 'RGB': if color_space == 'HSV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) elif color_space == 'LUV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV) elif color_space == 'HLS': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) elif color_space == 'YUV': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV) elif color_space == 'YCrCb': feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb) else: feature_image = np.copy(image) if spatial_feat == True: spatial_features = bin_spatial(feature_image, size=spatial_size) file_features.append(spatial_features) if hist_feat == True: # Apply color_hist() hist_features = color_hist(feature_image, nbins=hist_bins) file_features.append(hist_features) if hog_feat == True: # Call get_hog_features() with vis=False, feature_vec=True if hog_channel == 'ALL': hog_features = [] for channel in range(feature_image.shape[2]): hog_features.append(get_hog_features(feature_image[:,:,channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True)) hog_features = np.ravel(hog_features) else: hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True) # Append the new feature vector to the features list file_features.append(hog_features) features.append(np.concatenate(file_features)) # Return list of feature vectors return features # Define a function that takes an image, # start and stop positions in both x and y, # window size (x and y dimensions), # and overlap fraction (for both x and y)
def single_img_features(img, color_space='RGB', spatial_size=(32, 32), hist_bins=32, hist_range=(0, 256), orient=9, pix_per_cell=8, cell_per_block=2, hog_channel=0, spatial_feat=True, hist_feat=True, hog_feat=True): img_features = [] # apply color conversion if other than 'RGB' if color_space != 'RGB': if color_space == 'HSV': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) elif color_space == 'LUV': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV) elif color_space == 'HLS': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) elif color_space == 'YUV': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV) elif color_space == 'YCrCb': feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb) else: feature_image = np.copy(img) if spatial_feat == True: spatial_features = bin_spatial(feature_image, size=spatial_size) img_features.append(spatial_features) if hist_feat == True: # Apply color_hist() hist_features = color_hist(feature_image, nbins=hist_bins, bins_range=hist_range) img_features.append(hist_features) if hog_feat == True: # Call get_hog_features() with vis=False, feature_vec=True if hog_channel == 'ALL': hog_features = [] for channel in range(feature_image.shape[2]): hog_features.extend(get_hog_features(feature_image[:,:,channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True)) else: hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True) # Append the new feature vector to the features list img_features.append(hog_features) # Return list of feature vectors return np.concatenate(img_features) # Convert windows to heatmap numpy array.