我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用cv2.createCLAHE()。
def enhance(image_path, clip_limit=3): image = cv2.imread(image_path) # convert image to LAB color model image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) # split the image into L, A, and B channels l_channel, a_channel, b_channel = cv2.split(image_lab) # apply CLAHE to lightness channel clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(8, 8)) cl = clahe.apply(l_channel) # merge the CLAHE enhanced L channel with the original A and B channel merged_channels = cv2.merge((cl, a_channel, b_channel)) # convert iamge from LAB color model back to RGB color model final_image = cv2.cvtColor(merged_channels, cv2.COLOR_LAB2BGR) return cv2_to_pil(final_image)
def deviation_from_mean(image): clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) clahe_output = clahe.apply(image) print(clahe_output) result = clahe_output.copy() result = result.astype('int') i = 0 j = 0 while i < image.shape[0]: j = 0 while j < image.shape[1]: sub_image = clahe_output[i:i+5,j:j+5] mean = np.mean(sub_image) sub_image = sub_image - mean result[i:i+5,j:j+5] = sub_image j = j+5 i = i+5 return result
def deviation_from_mean(image): clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) clahe_output = clahe.apply(image) print(clahe_output) result = clahe_output.copy() result = result.astype('int') i = 0 j = 0 while i < image.shape[0]: j = 0 while j < image.shape[1]: sub_image = clahe_output[i:i+3,j:j+3] mean = np.mean(sub_image) sub_image = sub_image - mean result[i:i+3,j:j+3] = sub_image j = j+3 i = i+3 return result
def _process_img(self, img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) gray = clahe.apply(gray) return gray
def histogram_equalization(images, adaptive=True): _images = np.array(images * 255, dtype = np.uint8) pool = ThreadPool(4) clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) def process_image(image): #print image.shape, image.dtype image = image.transpose(1,2,0) if adaptive: image[:,:,0] = clahe.apply(image[:,:,0]) image[:,:,1] = clahe.apply(image[:,:,1]) image[:,:,2] = clahe.apply(image[:,:,2]) else: image[:,:,0] = cv2.equalizeHist(image[:,:,0]) image[:,:,1] = cv2.equalizeHist(image[:,:,1]) image[:,:,2] = cv2.equalizeHist(image[:,:,2]) image = image.transpose(2,0,1) return image equalized = pool.map(process_image, _images) equalized = np.array(equalized, dtype=np.float32)/255. #visualize_data(np.append(images[:8],equalized[:8],axis=0).transpose(0,2,3,1)) return equalized
def clahe_augment(img): clahe_low = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(8,8)) clahe_medium = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8)) clahe_high = cv2.createCLAHE(clipLimit=5.0, tileGridSize=(8,8)) img_low = clahe_low.apply(img) img_medium = clahe_medium.apply(img) img_high = clahe_high.apply(img) augmented_img = np.array([img_low, img_medium, img_high]) augmented_img = np.swapaxes(augmented_img,0,1) augmented_img = np.swapaxes(augmented_img,1,2) return augmented_img
def standard_deviation_image(image): clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) clahe_output = clahe.apply(image) result = clahe_output.copy() i = 0 j = 0 while i < image.shape[0]: j = 0 while j < image.shape[1]: sub_image = clahe_output[i:i+20,j:j+25] var = np.var(sub_image) result[i:i+20,j:j+25] = var j = j+25 i = i+20 return result
def extract_bv(image): clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) contrast_enhanced_green_fundus = clahe.apply(image) # applying alternate sequential filtering (3 times closing opening) r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1) R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1) r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1) R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1) r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1) R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1) f4 = cv2.subtract(R3,contrast_enhanced_green_fundus) f5 = clahe.apply(f4) # removing very small contours through area parameter noise removal ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY) mask = np.ones(f5.shape[:2], dtype="uint8") * 255 im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: if cv2.contourArea(cnt) <= 200: cv2.drawContours(mask, [cnt], -1, 0, -1) im = cv2.bitwise_and(f5, f5, mask=mask) ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV) newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1) # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood # vessels and also in an interval of area fundus_eroded = cv2.bitwise_not(newfin) xmask = np.ones(image.shape[:2], dtype="uint8") * 255 x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) for cnt in xcontours: shape = "unidentified" peri = cv2.arcLength(cnt, True) approx = cv2.approxPolyDP(cnt, 0.04 * peri, False) if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100: shape = "circle" else: shape = "veins" if(shape=="circle"): cv2.drawContours(xmask, [cnt], -1, 0, -1) finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask) blood_vessels = cv2.bitwise_not(finimage) dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1) #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1) blood_vessels_1 = cv2.bitwise_not(dilated) return blood_vessels_1
def clahe_equalized(img): assert (img.shape[0] == 1) clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) img_equalized = clahe.apply( np.array(img, dtype=np.uint8)) return imgs_equalized
def adaptive_histogram_equalization(image): image = cv2.cvtColor(image, COLOR_SPACE) x, y, z = cv2.split(image) adaptive_histogram_equalizer = cv2.createCLAHE(clipLimit=0.01, tileGridSize=(4,4)) if INTENSITY_COMPONENT == 1: x = adaptive_histogram_equalizer.apply(x) elif INTENSITY_COMPONENT == 2: y = adaptive_histogram_equalizer.apply(y) elif INTENSITY_COMPONENT == 3: z = adaptive_histogram_equalizer.apply(z) return cv2.cvtColor(cv2.merge((x, y, z)), INVERSE_COLOR_SPACE)
def adaptive_histogram_equalization(image): adaptive_histogram_equalizer = cv2.createCLAHE(clipLimit=0.01, tileGridSize=(4,4)) return adaptive_histogram_equalizer.apply(image)
def imgHistEqualize(self, img, clipLimit=2.0, tileGridSize=(8,8)): clahe = cv2.createCLAHE(clipLimit, tileGridSize) img = clahe.apply(img) return img
def histogram_equalization(image, tile): if (tile < 0): tile = 0 elif (tile > 100): tile = 100 tile = int(tile / 10) img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb) clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(2 ** tile, 2 ** tile)) img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0]) img_out = cv2.cvtColor(img_yuv, cv2.COLOR_YCrCb2BGR) img = exposure.rescale_intensity(img_out) return img
def convert_range(data): # dst=cv2.convertScaleAbs(src=data, alpha=5000, beta=0) clahe = cv2.createCLAHE(clipLimit=10.0, tileGridSize=(20, 20)) dst = clahe.apply(data) return dst
def ContrastHist(Img): clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) return clahe.apply(Img)
def hisEqulColor(img): ycrcb=cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB) channels=cv2.split(ycrcb) # create a CLAHE object clahe = cv2.createCLAHE() channels[0] = clahe.apply(channels[0]) cv2.merge(channels,ycrcb) cv2.cvtColor(ycrcb,cv2.COLOR_YCR_CB2BGR,img)
def make_sets(): print "\n make_sets() - Enter" training_data = [] training_labels = [] prediction_data = [] prediction_labels = [] claheObject = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) for emotion in emotionsList: training, prediction = get_files(emotion) for item in training: image = cv2.imread(item) # read image gray = cv2.cvtColor( # convert to grayscale image, cv2.COLOR_BGR2GRAY) clahe_image = claheObject.apply(gray) landmarkVectorList = get_landmarks(clahe_image) if landmarkVectorList == "No face detected!": pass else: training_data.append(landmarkVectorList) training_labels.append(emotionsList.index(emotion)) for item in prediction: image = cv2.imread(item) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) clahe_image = claheObject.apply(gray) landmarkVectorList = get_landmarks(clahe_image) if landmarkVectorList == "No face detected!": pass else: prediction_data.append(landmarkVectorList) prediction_labels.append(emotionsList.index(emotion)) print "\n make_sets() - Exit" return training_data, training_labels, prediction_data, prediction_labels
def equalize_image_channel_adaptive(channel): """ Adaptive image channel equalization (CLAHE).""" if channel[0][0].shape == (3): raise AttributeError("More than one color channel.") clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) return clahe.apply(channel)
def clahe_equalized(imgs): assert (len(imgs.shape)==4) #4D arrays assert (imgs.shape[1]==1) #check the channel is 1 #create a CLAHE object (Arguments are optional). clahe = cv2.createCLAHE(clipLimit=2.3, tileGridSize=(8,8)) imgs_equalized = np.empty(imgs.shape) for i in range(imgs.shape[0]): imgs_equalized[i,0] = clahe.apply(np.array(imgs[i,0], dtype = np.uint8)) return imgs_equalized
def convert_image(self, image): """ Converts image into grayscale, resizes, and auto-adjust contrast """ converted_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) resized_image = cv2.resize(converted_image, (256, 256)) # Adaptive Histogram Equalization clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) img = clahe.apply(resized_image) return img
def __call__(self,image): im_out = image.copy() hist_equaliser = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) if len(image.shape) == 2: im_out = hist_equaliser.apply(im_out.astype('uint8')) elif len(image.shape) > 2: for channel in range(3): im_out[:,:,channel] = hist_equaliser.apply(im_out.astype('uint8')[:,:,channel]) return im_out
def do_GET(self): global count1 global count2 global count3 global x_roi global y_roi global xres global yres global camera if self.path.endswith('.mjpg'): self.send_response(200) self.send_header('Content-type','multipart/x-mixed-replace; boundary=--jpgboundary') self.end_headers() while True: #Kamerabilder einlesen img = vs.read() #img = cv2.flip(img,1) img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Anpassung der Grauverteilung (adaptives Verfahren) clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(8,8)) cl1 = clahe.apply(img_grey) #cl1 = cv2.equalizeHist(img_grey) clamp = np.uint8(np.interp(cl1, [count2, count3],[0,255])) # neue adaptive Anpassung mit den Grenzwerten -> speichern in neuem Bild equ = clahe.apply(clamp) #equ = cv2.equalizeHist(clamp) r,buf = cv2.imencode('.jpg', equ) self.wfile.write("--jpgboundary") self.send_header('Content-type','image/jpeg') self.send_header('Content-length',str(len(buf))) self.end_headers() self.wfile.write(bytearray(buf)) #print (count2) time.sleep(0.05) if self.path.endswith('.html'): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() self.wfile.write('<html><head></head><body>') self.wfile.write('<img src="http://127.0.0.1:8080/cam.mjpg"/>') self.wfile.write('</body></html>') return
def main_func(): img_path='snap.jpg' # THE PATH OF THE IMAGE TO BE ANALYZED font=cv2.FONT_HERSHEY_DUPLEX emotions = ["anger", "happy", "sadness"] #Emotion list clahe=cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8)) # Histogram equalization object face_det=dlib.get_frontal_face_detector() land_pred=dlib.shape_predictor("data/DlibPredictor/shape_predictor_68_face_landmarks.dat") SUPPORT_VECTOR_MACHINE_clf2 = joblib.load('data/Trained_ML_Models/SVM_emo_model_7.pkl') # Loading the SVM model trained earlier in the path mentioned above. pred_data=[] pred_labels=[] a=crop_face(img_path) img=cv2.imread(a) gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) clahe_gray=clahe.apply(gray) landmarks_vec = get_landmarks(clahe_gray,face_det,land_pred) #print(len(landmarks_vec)) #print(landmarks_vec) if landmarks_vec == "error": pass else: pred_data.append(landmarks_vec) np_test_data = np.array(pred_data) a=SUPPORT_VECTOR_MACHINE_clf2.predict(pred_data) #cv2.putText(img,'DETECTED FACIAL EXPRESSION : ',(8,30),font,0.7,(0,0,255),2,cv2.LINE_AA) #l=len('Facial Expression Detected : ') #cv2.putText(img,emotions[a[0]].upper(),(150,60),font,1,(255,0,0),2,cv2.LINE_AA) #cv2.imshow('test_image',img) #print(emotions[a[0]]) cv2.waitKey(0) cv2.destroyAllWindows() return emotions[a[0]]