我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.add()。
def addRectangulars(self, frame_from, corners_arr): add_frame = np.zeros(frame_from.shape, np.uint8) cv2.polylines(add_frame,[corners_arr],True,(0,255,255)) frame = cv2.add(add_frame, frame_from) return frame
def process_video(path_to_video): cap = cv2.VideoCapture(path_to_video) # Load video while True: ret, frame = cap.read() print frame if ret is False or (cv2.waitKey(30) & 0xff) == 27: break # Exit if the video ended mask = np.zeros_like(frame) # init mask contours = find_contours(frame) plates, plates_images, mask = find_plate_numbers(frame, contours, mask) print "Plate Numbers: %s" % ", ".join(plates) processed_frame = cv2.add(frame, mask) # Apply the mask to image cv2.imshow('frame', processed_frame) cv2.destroyAllWindows() cap.release() ########################################### # Run The Program ######################### ###########################################
def sumNormalizedFeatures(features, levels=9, startSize=(1983*8, 1088*8)): """ Normalizes the feature maps in argument features and combines them into one. Arguments: features : list of feature maps (images) levels : the levels of the Gaussian pyramid used to calculate the feature maps. startSize : the base size of the Gaussian pyramit used to calculate the feature maps. returns: a combined feature map. """ commonWidth = startSize[0] / 2**(levels/2 - 1) commonHeight = startSize[1] / 2**(levels/2 - 1) commonSize = commonWidth, commonHeight logger.info("Size of conspicuity map: %s", commonSize) consp = N(cv2.resize(features[0][1], commonSize)) for f in features[1:]: resized = N(cv2.resize(f[1], commonSize)) consp = cv2.add(consp, resized) return consp
def overlay_img(self): """Overlay the transparent, transformed image of the arc onto our CV image""" #overlay the arc on the image rows, cols, channels = self.transformed.shape roi = self.cv_image[0:rows, 0:cols] #change arc_image to grayscale arc2gray = cv2.cvtColor(self.transformed, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(arc2gray, 10, 255, cv2.THRESH_BINARY) mask_inv = cv2.bitwise_not(mask) #black out area of arc in ROI img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv) img2_fg = cv2.bitwise_and(self.transformed, self.transformed, mask=mask) #put arc on ROI and modify the main image dst = cv2.add(img1_bg, img2_fg) self.cv_image[0:rows, 0:cols] = dst
def draw_tracks(self, frame, debug=False): """Draw tracks Parameters ---------- frame : np.array Image frame debug : bool Debug mode (Default value = False) """ if debug is False: return # Create a mask image and color for drawing purposes mask = np.zeros_like(frame) color = [0, 0, 255] # Draw tracks for i, (new, old) in enumerate(zip(self.kp_cur, self.kp_ref)): a, b = new.ravel() c, d = old.ravel() mask = cv2.line(mask, (a, b), (c, d), color, 1) img = cv2.add(frame, mask) cv2.imshow("Feature Tracks", img)
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180), sat_shift_limit=(-255, 255), val_shift_limit=(-255, 255), u=0.5): if np.random.random() < u: image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(image) hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1]) h = cv2.add(h, hue_shift) sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1]) s = cv2.add(s, sat_shift) val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1]) v = cv2.add(v, val_shift) image = cv2.merge((h, s, v)) image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR) return image
def blob__Detec(image): img=copy(image) height, width, channels = img.shape new_img=np.ones((height,width,channels), np.uint8) HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) Yellow={'min':(20,100,100),'max':(30, 255, 255)} Blue={'min':(50,100,100),'max':(100,255,255)} Brown={'min':(0,100,0),'max':(20,255,255)} mask_b=cv2.inRange(HSV,Blue['min'],Blue['max']) mask_br=cv2.inRange(HSV,Brown['min'],Brown['max']) mask_y=cv2.inRange(HSV,Yellow['min'],Yellow['max']) blue=cv2.bitwise_and(img,img,mask=mask_b) yellow=cv2.bitwise_and(img,img,mask=mask_y) brown=cv2.bitwise_and(img,img,mask=mask_br) new_img=cv2.add(blue,brown) new_img=cv2.add(new_img,yellow) return new_img
def setFingerTemplate(big_image, name_template_file): global add_frame name_window = 'big image' cv2.namedWindow(name_window) cv2.setMouseCallback(name_window,save_corners) add_frame = np.zeros(big_image.shape, np.uint8) while(True): frame_with_rect = cv2.add(add_frame, big_image) cv2.imshow(name_window,frame_with_rect) cur_key = cv2.waitKey(1) if cur_key == 27: break if cur_key == ord('s') and (len(corners_x) == 2): template_img = big_image[corners_y[0]:corners_y[1], corners_x[0]:corners_x[1]] cv2.imwrite(name_template_file,template_img) break cv2.destroyAllWindows()
def addRectangulars(frame_from, corners_arr): add_frame = np.zeros(frame_from.shape, np.uint8) cv2.polylines(add_frame,[corners_arr],True,(0,255,255)) frame = cv2.add(add_frame, frame_from) return frame
def image_postprocessing(img, t_size_y, t_size_x, feedback, t): # resize image img = cv2.resize(img, (t_size_y, t_size_x)) # cut image img = img[t_size_y/2-1:-1,:] # filter image ret,img = cv2.threshold(img,160,255,cv2.THRESH_BINARY) #store if flag is set if feedback: cv2.imwrite('feedback/image_' + str(t) + '_filter.png', img) return img # add image and depth image # will store all stage of processing if flag for feedback is set
def detect_shirt(self): #self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8)) self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8)) cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY) fg=cv2.erode(self.dst,None,iterations=2) #cv2.imshow("fore",fg) bg=cv2.dilate(self.dst,None,iterations=3) _,bg=cv2.threshold(bg, 1,128,1) #cv2.imshow("back",bg) mark=cv2.add(fg,bg) mark32=np.int32(mark) cv2.watershed(self.norm_rgb,mark32) self.m=cv2.convertScaleAbs(mark32) _,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) #cv2.imshow("final_tshirt",self.m) cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE) return self.m,cntr
def get_init_process_img(roi_img): """ ????????????????????????????????????? :param roi_img: ndarray :return: ndarray """ h = cv2.Sobel(roi_img, cv2.CV_32F, 0, 1, -1) v = cv2.Sobel(roi_img, cv2.CV_32F, 1, 0, -1) img = cv2.add(h, v) img = cv2.convertScaleAbs(img) img = cv2.GaussianBlur(img, (3, 3), 0) ret, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY) kernel = np.ones((1, 1), np.uint8) img = cv2.erode(img, kernel, iterations=1) img = cv2.dilate(img, kernel, iterations=2) img = cv2.erode(img, kernel, iterations=1) img = cv2.dilate(img, kernel, iterations=2) img = auto_canny(img) return img
def showcolormap(self): import matplotlib.pyplot as plt obj=self.obj m=obj.Proxy.dist_on_skel #plt.imshow(m, cmap=plt.cm.spectral, interpolation='nearest') plt.imshow(m, cmap=plt.cm.PRGn, interpolation='nearest') plt.show() # # add special widgets #
def animpingpong(self): obj=self.Object res=None for t in obj.OutList: print t.Label img=t.ViewObject.Proxy.img.copy() if res==None: res=img.copy() else: #rr=cv2.subtract(res,img) #rr=cv2.add(res,img) aw=0.0+float(obj.aWeight)/100 bw=0.0+float(obj.bWeight)/100 print aw print bw if obj.aInverse: # b umsetzen ret, mask = cv2.threshold(img, 50, 255, cv2.THRESH_BINARY) img=cv2.bitwise_not(mask) rr=cv2.addWeighted(res,aw,img,bw,0) res=rr #b,g,r = cv2.split(res) cv2.imshow(obj.Label,res) #cv2.imshow(obj.Label +" b",b) #cv2.imshow(obj.Label + " g",g) #cv2.imshow(obj.Label + " r",r) res=img if not obj.matplotlib: cv2.imshow(obj.Label,img) else: from matplotlib import pyplot as plt # plt.subplot(121), plt.imshow(img,cmap = 'gray') plt.title(obj.Label), plt.xticks([]), plt.yticks([]) plt.show() self.img=img
def maskLogoOverImage(self): # Load two images img1 = cv2.imread('messi5.jpg') img2 = cv2.imread('opencv_logo.png') # I want to put logo on top-left corner, So I create a ROI rows,cols,channels = img2.shape roi = img1[0:rows, 0:cols ] # Now create a mask of logo and create its inverse mask also img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) mask_inv = cv2.bitwise_not(mask) # Now black-out the area of logo in ROI img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv) # Take only region of logo from logo image. img2_fg = cv2.bitwise_and(img2,img2,mask = mask) # Put logo in ROI and modify the main image dst = cv2.add(img1_bg,img2_fg) img1[0:rows, 0:cols ] = dst cv2.imshow('res',img1) cv2.waitKey(0) cv2.destroyAllWindows() ##################################################################################################################### # Prototypes & Convenient CLI/GUI Dispatcher to rebuild mental picture of where we are/repeat on new platforms. #####################################################################################################################
def read(self, dst=None): w, h = self.frame_size if self.bg is None: buf = np.zeros((h, w, 3), np.uint8) else: buf = self.bg.copy() self.render(buf) if self.noise > 0.0: noise = np.zeros((h, w, 3), np.int8) cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3) return True, buf
def process_single_image(images=[], plot_plates=False): ''' :param images: list (full path to images to be processed) ''' if images: img_n = 1 for path_to_image in images: t_start = time.time() img = cv2.imread(path_to_image) # Resizing of the image r = 400.0 / img.shape[1] dim = (400, int(img.shape[0] * r)) img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA) mask = np.zeros_like(img) # init mask contours = find_contours(img) # cv2.drawContours(img, contours, -1, (0, 255, 255)) # cv2.waitKey(0) plates, plates_images, mask = find_plate_numbers(img, contours, mask) print "Time needed to complete: %s" % (time.time() - t_start) print "Plate Numbers: %s" % ", ".join(plates) # Apply mask to image and plot image img = cv2.add(img, mask) if plot_plates: plot_plate_numbers(plates_images) cv2.imshow('Resized Original image_%s + Detected Plate Number' % img_n, img) img_n += 1 cv2.waitKey(0) cv2.destroyAllWindows() else: exit('Images are not provided!')
def blur_mask_old(img): assert isinstance(img, numpy.ndarray), 'img_col must be a numpy array' assert img.ndim == 3, 'img_col must be a color image ({0} dimensions currently)'.format(img.ndim) blur_mask = numpy.zeros(img.shape[:2], dtype=numpy.uint8) for mask, loc in get_masks(img): logger.debug('Checking Mask: {0}'.format(numpy.unique(mask))) logger.debug('SuperPixel Mask Percentage: {0}%'.format(int((100.0/255.0)*(numpy.sum(mask)/mask.size)))) img_fft, val, blurry = main.blur_detector(img[loc[0]:loc[2], loc[1]:loc[3]]) logger.debug('Blurry: {0}'.format(blurry)) if blurry: blur_mask = cv2.add(blur_mask, mask) result = numpy.sum(blur_mask)/(255.0*blur_mask.size) logger.info('{0}% of input image is blurry'.format(int(100*result))) return blur_mask, result
def gaborConspicuity(image, steps): """ Creates the conspicuity map for the channel `orientations'. """ gaborConspicuity_ = numpy.zeros((1088, 1983), numpy.uint8) for step in range(steps): theta = step * (math.pi/steps) gaborFilter = makeGaborFilter(dims=(10,10), lambd=2.5, theta=theta, psi=math.pi/2, sigma=2.5, gamma=.5) gaborFeatures = features(image = intensity(im), channel = gaborFilter) summedFeatures = sumNormalizedFeatures(gaborFeatures) #gaborConspicuity_ += N(summedFeatures) np.add(gaborConspicuity_, N(summedFeatures), out=gaborConspicuity_, casting="unsafe") return gaborConspicuity_
def processFrame(self): # If we are enhancing the image if self.enhance: # Frangi vesselness to highlight tubuar structures gray = cv2.cvtColor(self.sourceFrame, cv2.COLOR_BGR2GRAY) tub = tubes(gray, [5, 12]) tubular = cv2.cvtColor(tub, cv2.COLOR_GRAY2BGR) # Merge with original to ennhance tubular structures high = 0.3 rest = 1.0 - high colorized = cv2.addWeighted(self.sourceFrame, rest, tubular, high, 0.0) # colorized = cv2.add(self.sourceFrame, tubular) # Tile horizontally self.processedFrame = np.concatenate((self.sourceFrame, tubular, colorized), axis=1) else: self.processedFrame = self.sourceFrame; self.workingFrame = self.processedFrame.copy() # If we are tracking, track and show analysis if self.tracking is True: self.trackObjects() self.showBehavior()
def transform(image): ''' input: image: numpy array of shape (channels, height, width), in RGB code output: transformed: numpy array of shape (channels, height, width), in RGB code ''' transformed = image hue_shift_limit = (-50, 50) sat_shift_limit = (-5, 5) val_shift_limit = (-15, 15) if np.random.random() < 0.5: transformed = cv2.cvtColor(transformed, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(transformed) hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1]) h = cv2.add(h, hue_shift) sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1]) s = cv2.add(s, sat_shift) val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1]) v = cv2.add(v, val_shift) transformed = cv2.merge((h, s, v)) transformed = cv2.cvtColor(transformed, cv2.COLOR_HSV2BGR) return transformed
def process_matches(self, matches, f0, f1): """Process matches Parameters ---------- matches : Tuple of float (feature track id, feature index) f0 : List of Features Reference features f1 : List of Features Current features """ tracks_updated = {} # Update or add feature track for i in range(len(matches)): f0_idx, f1_idx = matches[i] feature0 = f0[f0_idx] feature1 = f1[f1_idx] if feature0.track_id is not None: self.update_track(feature0.track_id, feature1) self.debug("Update track [%d]" % feature0.track_id) else: self.add_track(feature0, feature1) self.debug("Add track [%d]" % feature0.track_id) tracks_updated[feature0.track_id] = 1 # Drop dead feature tracks tracks_tracking = list(self.tracks_tracking) for i in range(len(self.tracks_tracking)): track_id = tracks_tracking[i] if track_id not in tracks_updated: self.remove_track(track_id, True) self.debug("Tracking: " + str(self.tracks_tracking)) self.debug("Lost: " + str(self.tracks_lost)) self.debug("Buffer: " + str(self.tracks_buffer)) self.debug("")
def _split_channel_images(self): blue, green, red = cv2.split(self._image) split_channel_images = [ red, green, blue, cv2.add(red, green), cv2.add(red, blue), cv2.add(green, blue) ] return split_channel_images
def __create_gui(self): """Create GUI elements and add them to root widget""" self.video_frame = Tk.Frame(root, width=500, height=400) self.video_frame.config(background="gray") self.video_frame.pack() self.lmain = Tk.Label(self.video_frame) self.lmain.pack()
def __add_figure_to_frame(self, frame, figure_location): """This function is used to add a file from hard disk to the figure Algorithm source: http://docs.opencv.org/trunk/d0/d86/tutorial_py_image_arithmetics.html """ # Get size of frame height, width, channels = frame.shape # Only add icon when the frame is big enough if height >= 100 and width >= 100: # Load heart icon icon_heart = cv2.imread(figure_location) # Convert to RGB icon_heart = cv2.cvtColor(icon_heart, cv2.COLOR_BGR2RGB) # Create ROI rows, cols, channels = icon_heart.shape roi = frame[:rows, :cols, :] # Convert heart to greyscale icon_heart_gray = cv2.cvtColor(icon_heart, cv2.COLOR_RGB2GRAY) # Create mask and inverse mask with binary threshold ret, mask = cv2.threshold(icon_heart_gray, 10, 255, cv2.THRESH_BINARY) mask_inv = cv2.bitwise_not(mask) # Background: Original frame with inverse mask frame_bg = cv2.bitwise_and(roi, roi, mask=mask_inv) # Foreground: Heart with normal mask icon_heart_fg = cv2.bitwise_and(icon_heart, icon_heart, mask=mask) # Add heart icon to frame icon_heart_final = cv2.add(frame_bg, icon_heart_fg) frame[:rows, :cols, :] = icon_heart_final return frame # Setter and getter following
def saveAverageImage(kitti_base, pos_labels, shape, fname, avg_num=None): num_images = float(len(pos_labels)) avg_num = min(avg_num, num_images) if avg_num is None: avg_num = num_images # avg_img = np.zeros((shape[0],shape[1],3), np.float32) avg_img = np.zeros(shape, np.float32) progressbar = ProgressBar('Averaging ' + fname, max=len(pos_labels)) num = 0 for label in pos_labels: if num >= avg_num: break num += 1 progressbar.next() sample = getCroppedSampleFromLabel(kitti_base, label) # sample = np.float32(sample) resized = resizeSample(sample, shape, label) resized = auto_canny(resized) resized = np.float32(resized) avg_img = cv2.add(avg_img, resized / float(avg_num)) progressbar.finish() cv2.imwrite(fname, avg_img)
def crop_rectangle(img, pixel_rect): # Note: Need to add 1 to end coordinates because pixel rectangle corners are # inclusive. cropped = img[pixel_rect.y1:pixel_rect.y2+1, pixel_rect.x1:pixel_rect.x2+1, :] # cropped = img[pixel_rect.y1:pixel_rect.y2, pixel_rect.x1:pixel_rect.x2, :] return cropped # save_opencv_bounding_box_info :: String -> Map String gm.PixelRectangle
def average_image(pos_region_generator, shape, avg_num=None): pos_regions = list(pos_region_generator) num_images = float(len(pos_regions)) if avg_num is None: avg_num = num_images else: avg_num = min(avg_num, num_images) window_dims = (shape[1], shape[0]) # avg_img = np.zeros((shape[0],shape[1],3), np.float32) avg_img = np.zeros(shape, np.float32) progressbar = ProgressBar('Averaging ', max=avg_num) num = 0 for reg in pos_regions: if num >= avg_num: break num += 1 progressbar.next() resized = reg.load_cropped_resized_sample(window_dims) resized = auto_canny(resized) resized = np.float32(resized) avg_img = cv2.add(avg_img, resized / float(avg_num)) progressbar.finish() return avg_img
def brightness(img, alpha): return cv.add(img, alpha)
def read(self, dst=None): noise = np.zeros(self.render.sceneBg.shape, np.int8) cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3)
def setCorners(self): name_window = 'Set Corners' cv2.namedWindow(name_window) cv2.setMouseCallback(name_window, saveTabletCorners) cap = cv2.VideoCapture(0) ret, frame_from = cap.read() TTablet.m_CornersX = [] TTablet.m_CornersY = [] TTablet.m_AddFrame = np.zeros(frame_from.shape, np.uint8) #print ("start setCorners") while(cap.isOpened()): ret, frame_from = cap.read() frame_from = cv2.flip(frame_from, -1) frame = cv2.add(TTablet.m_AddFrame, frame_from) if ret==True: cv2.imshow(name_window,frame) #print ("fasdfasdf") if cv2.waitKey(1) & (len(TTablet.m_CornersX) > 3): break else: break # Release everything if job is finished cap.release() #out.release() cv2.destroyAllWindows()
def store_img(img, add): name = 'image_' + str(add) + '.png' cv2.imwrite(name, img)
def store_img(img, add, path): name = 'image_' + str(add) + '.png' cv2.imwrite(os.path.join(path, name), img)
def detect_shirt2(self): self.hsv=cv2.cvtColor(self.norm_rgb,cv.CV_BGR2HSV) self.hue,s,_=cv2.split(self.hsv) _,self.dst=cv2.threshold(self.hue,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) self.fg=cv2.erode(self.dst,None,iterations=3) self.bg=cv2.dilate(self.dst,None,iterations=1) _,self.bg=cv2.threshold(self.bg,1,128,1) mark=cv2.add(self.fg,self.bg) mark32=np.int32(mark) cv2.watershed(self.norm_rgb,mark32) m=cv2.convertScaleAbs(mark32) _,m=cv2.threshold(m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) cntr,h=cv2.findContours(m,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE) print len(cntr) #print cntr[0].shape #cntr[1].dtype=np.float32 #ret=cv2.contourArea(np.array(cntr[1])) #print ret #cntr[0].dtype=np.uint8 cv2.drawContours(m,cntr,-1,(255,255,255),3) cv2.imshow("mask_fg",self.fg) cv2.imshow("mask_bg",self.bg) cv2.imshow("mark",m)
def subtract_back(self,frm): #dst=self.__back__-self.__foreground__ temp=np.zeros((600,800),np.uint8) self.__foreground__=cv2.blur(self.__foreground__,(3,3)) dst=cv2.absdiff(self.__back__,self.__foreground__) #dst=cv2.adaptiveThreshold(dst,255,cv.CV_THRESH_BINARY,cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,5,10) val,dst=cv2.threshold(dst,0,255,cv.CV_THRESH_BINARY+cv.CV_THRESH_OTSU) fg=cv2.erode(dst,None,iterations=1) bg=cv2.dilate(dst,None,iterations=4) _,bg=cv2.threshold(bg,1,128,1) mark=cv2.add(fg,bg) mark32=np.int32(mark) #dst.copy(temp) #seq=cv.FindContours(cv.fromarray(dst),self.mem,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE) #cntr,h=cv2.findContours(dst,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE) #print cntr,h #cv.DrawContours(cv.fromarray(temp),seq,(255,255,255),(255,255,255),1,cv.CV_FILLED) cv2.watershed(frm, mark32) self.final_mask=cv2.convertScaleAbs(mark32) #print temp #--outputs--- #cv2.imshow("subtraction",fg) #cv2.imshow("thres",dst) #cv2.imshow("thres1",bg) #cv2.imshow("mark",mark) #cv2.imshow("final",self.final_mask)
def overlayimg(back, fore, x, y, w, h): # Load two images img1 = np.array(back) img2 = np.array(fore) # create new dimensions r = float((h)) / img2.shape[1] dim = ((w), int(img2.shape[1] * r)) # Now create a mask of box and create its inverse mask also img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) mask_inv = cv2.bitwise_not(mask) # resize box and masks resized_img2 = cv2.resize(img2, dim, interpolation=cv2.INTER_AREA) resized_mask = cv2.resize(mask, dim, interpolation=cv2.INTER_AREA) resized_mask_inv = cv2.resize(mask_inv, dim, interpolation=cv2.INTER_AREA) # I want to put box in co-ordinates, So I create a ROI rows, cols, channels = resized_img2.shape roi = img1[y:y+rows, x:x+cols] # Now black-out the area of box in ROI img1_bg = cv2.bitwise_and(roi, roi, mask=resized_mask_inv) # Take only region of box from box image. img2_fg = cv2.bitwise_and(resized_img2, resized_img2, mask=resized_mask) # Put box in ROI and modify the main image dst = cv2.add(img1_bg, img2_fg) img1[y:y+rows, x:x+cols] = dst return img1
def channel_embedding(origin_image_chan, watermark_img_chan): coeffs1_1, coeffs1_2, coeffs1_3, coeffs2_3 = dwt2(origin_image_chan, watermark_img_chan) embedding_image = cv2.add(cv2.multiply(ORIGIN_RATE, coeffs1_3[0]), cv2.multiply(WATERMARK_RATE, coeffs2_3[0])) embedding_image = idwt2(embedding_image, coeffs1_1[1], coeffs1_2[1], coeffs1_3[1]) np.clip(embedding_image, 0, 255, out=embedding_image) embedding_image = embedding_image.astype('uint8') return embedding_image
def prep_img_for_blob(img, pixel_means, target_size, max_area, min_size): """Mean subtract and scale an image for use in a blob.""" img = img.astype(np.float32, copy=False) img -= pixel_means img_shape = img.shape img_size_min = np.min(img_shape[0:2]) img_size_max = np.max(img_shape[0:2]) img_scale = float(target_size) / float(img_size_max) # Prevent the shorter sides from being less than MIN_SIZE if np.round(img_scale * img_size_min < min_size): img_scale = np.round(min_size / img_size_min) + 1 # Prevent the scaled area from being more than MAX_AREA if np.round(img_scale * img_size_min * img_scale * img_size_max) > max_area: img_scale = math.sqrt(float(max_area) / float(img_size_min * img_size_max)) # Resize the sample. img = cv2.resize(img, None, None, fx=img_scale, fy=img_scale, interpolation=cv2.INTER_LINEAR) # Randomly rotate the sample. img = cv2.warpAffine(img, cv2.getRotationMatrix2D((img.shape[1] / 2, img.shape[0] / 2), np.random.randint(-15, 15), 1), (img.shape[1], img.shape[0])) # Perform RGB Jittering h, w, c = img.shape zitter = np.zeros_like(img) for i in xrange(c): zitter[:, :, i] = np.random.randint(0, cfg.TRAIN.RGB_JIT, (h, w)) - cfg.TRAIN.RGB_JIT / 2 img = cv2.add(img, zitter) return img, img_scale