我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用cv2.bitwise_or()。
def markVisibleArea(self, originalImg): visibleImg = np.zeros(self.img.shape, np.uint8) x, y = self.current[0], self.current[1] lx, ly = -200, -200 #last coordinates points = [] for i in range(1083): nx, ny = self.findNearestObstacle(originalImg, x, y, i/3) #print nx, ny nx = int(nx) ny = int(ny) points.append((ny, nx)) if i != 0: cv2.line(visibleImg, (ny, nx), (ly, lx), 100, 1) lx, ly = nx, ny h, w = visibleImg.shape mask = np.zeros((h+2, w+2), np.uint8) cv2.floodFill(visibleImg, mask, (y, x), 100) for i in points: cv2.circle(visibleImg, i, 3, 255, 6) self.img = cv2.bitwise_or(self.img, visibleImg)
def extract_color( src, h_th_low, h_th_up, s_th, v_th ): hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(hsv) if h_th_low > h_th_up: ret, h_dst_1 = cv2.threshold(h, h_th_low, 255, cv2.THRESH_BINARY) ret, h_dst_2 = cv2.threshold(h, h_th_up, 255, cv2.THRESH_BINARY_INV) dst = cv2.bitwise_or(h_dst_1, h_dst_2) else: ret, dst = cv2.threshold(h, h_th_low, 255, cv2.THRESH_TOZERO) ret, dst = cv2.threshold(dst, h_th_up, 255, cv2.THRESH_TOZERO_INV) ret, dst = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY) ret, s_dst = cv2.threshold(s, s_th, 255, cv2.THRESH_BINARY) ret, v_dst = cv2.threshold(v, v_th, 255, cv2.THRESH_BINARY) dst = cv2.bitwise_and(dst, s_dst) dst = cv2.bitwise_and(dst, v_dst) return dst
def remove_noise_and_smooth(file_name): logging.info('Removing noise and smoothening image') img = cv2.imread(file_name, 0) filtered = cv2.adaptiveThreshold(img.astype(np.uint8), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 41, 3) kernel = np.ones((1, 1), np.uint8) opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel) closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel) img = image_smoothening(img) or_image = cv2.bitwise_or(img, closing) return or_image
def calculate_iou(img_mask, gt_mask): gt_mask *= 1.0 img_and = cv2.bitwise_and(img_mask, gt_mask) img_or = cv2.bitwise_or(img_mask, gt_mask) j = np.count_nonzero(img_and) i = np.count_nonzero(img_or) iou = float(float(j)/float(i)) return iou
def extractEdges(hue, intensity): kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) edges = cv2.Canny(intensity, 120, 140) hue_edges = cv2.Canny(cv2.GaussianBlur(hue, (5, 5), 0), 0, 255) combined_edges = cv2.bitwise_or(hue_edges, edges) _, mask = cv2.threshold(combined_edges, 40, 255, cv2.THRESH_BINARY) return cv2.erode(cv2.GaussianBlur(mask, (3, 3), 0), kernel, iterations=1)
def meldmask (mask_0, mask_1): mask = cv2.bitwise_or(mask_0, mask_1) return mask;
def skeletonize(image, size, structuring=cv2.MORPH_RECT): # determine the area (i.e. total number of pixels in the image), # initialize the output skeletonized image, and construct the # morphological structuring element area = image.shape[0] * image.shape[1] skeleton = np.zeros(image.shape, dtype="uint8") elem = cv2.getStructuringElement(structuring, size) # keep looping until the erosions remove all pixels from the # image while True: # erode and dilate the image using the structuring element eroded = cv2.erode(image, elem) temp = cv2.dilate(eroded, elem) # subtract the temporary image from the original, eroded # image, then take the bitwise 'or' between the skeleton # and the temporary image temp = cv2.subtract(image, temp) skeleton = cv2.bitwise_or(skeleton, temp) image = eroded.copy() # if there are no more 'white' pixels in the image, then # break from the loop if area == area - cv2.countNonZero(image): break # return the skeletonized image return skeleton
def bit_or(self, frame): self._ndarray = cv2.bitwise_or(self.ndarray, frame.ndarray) self._contours = None # NOT this frame with another frame
def detection(self, hsv_image): """Check for detection in the image """ mask = cv2.inRange(hsv_image, self.color_low, self.color_high) if self.baseline_cnt > 0: nmask = cv2.bitwise_not(mask) if self.baseline is None: rospy.loginfo("getting baseline for {}".format(self.name)) self.baseline = nmask else: self.baseline = cv2.bitwise_or(nmask, self.baseline) mask = cv2.bitwise_and(mask, self.baseline) count = cv2.countNonZero(mask) + self.baseline_fuzzy self.low_count = max(self.low_count, count) self.high_count = self.low_count + self.baseline_fuzzy self.baseline_cnt -= 1 return elif self.baseline is not None: mask = cv2.bitwise_and(mask, self.baseline) count = cv2.countNonZero(mask) if count > self.low_count and self.active is None: self.active = rospy.get_rostime() rospy.loginfo("{} ACTIVE ({})".format(self.name, count)) self.cloud.reset() if self.callbacks[0] is not None: self.callbacks[0](self.name) elif self.active is not None and count < self.high_count: rospy.loginfo("{} GONE ({})".format(self.name, count)) self.cloud.reset() self.active = None if self.callbacks[2] is not None: self.published = False self.report_count = 0 if self.callbacks[1] is not None: self.callbacks[1](self.name) # DEBUGGING to see what the masked image for the request is if self.debug: cv2.namedWindow(self.name, cv2.WINDOW_NORMAL) if self.baseline is not None: cv2.namedWindow(self.name+'_baseline', cv2.WINDOW_NORMAL) cv2.imshow(self.name+'_baseline', self.baseline) cv2.imshow(self.name, mask) cv2.waitKey(1) # to to see if we notify the location callback if self.is_active() and self.report_count > self.min_reports: now = rospy.get_rostime() if (self.active + self.stablize_time) < now: self.published = True point = PointStamped() center = self.cloud.find_center() point.header.seq = 1 point.header.stamp = now point.header.frame_id = self.frame_id point.point.x = center[0] point.point.y = center[1] point.point.z = center[2] if self.callbacks[2] is not None: self.callbacks[2](self.name, point)
def send_hazard_camera(self): """ Dont bump into things! """ self.zarj.pelvis.lean_body_to(0) self.zarj.neck.neck_control([0.5, 0.0, 0.0], True) rospy.sleep(1.0) cloud = self.zarj.eyes.get_stereo_cloud() forward, _ = self.zarj.eyes.get_cloud_image_with_details(cloud) forward = cv2.cvtColor(forward, cv2.COLOR_BGR2GRAY) forward = cv2.copyMakeBorder(forward, 0, 0, 560, 630, cv2.BORDER_CONSTANT, value=(0, 0, 0)) self.zarj.neck.neck_control([0.5, 1.0, 0.0], True) rospy.sleep(1.0) cloud = self.zarj.eyes.get_stereo_cloud() right, _ = self.zarj.eyes.get_cloud_image_with_details(cloud) right = cv2.copyMakeBorder(right, 0, 0, 1190, 0, cv2.BORDER_CONSTANT, value=(0, 0, 0)) right = cv2.cvtColor(right, cv2.COLOR_BGR2GRAY) self.zarj.neck.neck_control([0.5, -1.0, 0.0], True) rospy.sleep(1.0) cloud = self.zarj.eyes.get_stereo_cloud() left, _ = self.zarj.eyes.get_cloud_image_with_details(cloud) left = cv2.copyMakeBorder(left, 0, 0, 0, 1190, cv2.BORDER_CONSTANT, value=(0, 0, 0)) left = cv2.cvtColor(left, cv2.COLOR_BGR2GRAY) self.zarj.neck.neck_control([0.0, 0.0, 0.0], True) haz_cam = cv2.bitwise_or(forward, left) haz_cam = cv2.bitwise_or(haz_cam, right) haz_cam = cv2.cvtColor(haz_cam, cv2.COLOR_GRAY2BGR) haz_cam = PERSPECTIVE_HEAD_DOWN.build_rangefinding_image(haz_cam) pictsize = np.shape(haz_cam) resized = cv2.resize(haz_cam, (pictsize[1]/2, pictsize[0]/2), interpolation=cv2.INTER_AREA) (_, png) = cv2.imencode(".png", resized) msg = ZarjPicture("hazard", png) msg.time = rospy.get_time() self.zarj_comm.push_message(msg)
def __findLine(self): self.__grabImage(); if(self.currentImage is None): #grabbing image failed return -2.0 #Convert to Grayscale img = cv2.cvtColor(self.currentImage, cv2.COLOR_BGR2GRAY) #Blur to reduce noise img = cv2.medianBlur(img,25) #Do Thresholding h,img = cv2.threshold(img, self.thresh, self.maxValue, cv2.THRESH_BINARY_INV) img = cv2.blur(img,(2,2)) #Make image smaller img = cv2.resize(img, (self.horizontalRes, self.verticalRes)) #org_img = cv2.resize(org_img, (self.horizontalRes, self.verticalRes)) #Create skeleton size = np.size(img) skel = np.zeros(img.shape,np.uint8) element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3)) done = False while( not done): eroded = cv2.erode(img,element) temp = cv2.dilate(eroded,element) temp = cv2.subtract(img,temp) skel = cv2.bitwise_or(skel,temp) img = eroded.copy() zeros = size - cv2.countNonZero(img) if zeros==size: done = True #Do Line Detection lines = cv2.HoughLinesP(skel,1,np.pi/180,2, self.hough_minLineLength,self.hough_maxLineGap) #get minimum and maximum x-coordinate from lines x_min = self.horizontalRes+1.0 x_max = -1.0; if(lines != None and len(lines[0]) > 0): for x1,y1,x2,y2 in lines[0]: x_min = min(x_min, x1, x2) x_max = max(x_max, x1, x2) #cv2.line(org_img,(x1,y1),(x2,y2),(0,255,0),2) #write output visualization #cv2.imwrite("output-img.png",org_img); #find the middle point x of the line and return #return -1.0 if no lines found if(x_max == -1.0 or x_min == (self.horizontalRes+1.0) ): return -1.0 #no line found else: return (x_min + x_max) / 2.0