我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.moments()。
def image_callback(self, msg): image = self.bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8') hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) lower_yellow = numpy.array([18, 120, 200]) upper_yellow = numpy.array([28, 255, 255]) mask = cv2.inRange(hsv, lower_yellow, upper_yellow) h, w, d = image.shape search_top = 3*h/4 search_bot = 3*h/4 + 20 mask[0:search_top, 0:w] = 0 mask[search_bot:h, 0:w] = 0 M = cv2.moments(mask) if M['m00'] > 0: cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) cv2.circle(image, (cx, cy), 20, (0,0,255), -1) # BEGIN CONTROL err = cx - w/2 self.twist.linear.x = 0.2 self.twist.angular.z = -float(err) / 100 self.cmd_vel_pub.publish(self.twist) # END CONTROL cv2.imshow("window", image) cv2.waitKey(3)
def __init__(self, rubiks_parent, index, contour, heirarchy): self.rubiks_parent = rubiks_parent self.index = index self.contour = contour self.heirarchy = heirarchy peri = cv2.arcLength(contour, True) self.approx = cv2.approxPolyDP(contour, 0.1 * peri, True) self.area = cv2.contourArea(contour) self.corners = len(self.approx) self.width = None # compute the center of the contour M = cv2.moments(contour) if M["m00"]: self.cX = int(M["m10"] / M["m00"]) self.cY = int(M["m01"] / M["m00"]) else: self.cX = None self.cY = None
def camera_gesture_trigger(): # Capture frame-by-frame ret, frame = cap.read() # Our operations on the frame come here gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray,(5,5),0) ret,thresh1 = cv2.threshold(blur,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) max_area=0 for i in range(len(contours)): cnt=contours[i] area = cv2.contourArea(cnt) if(area>max_area): max_area=area ci=i cnt=contours[ci] hull = cv2.convexHull(cnt) moments = cv2.moments(cnt) cnt = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True) hull = cv2.convexHull(cnt,returnPoints = False) defects = cv2.convexityDefects(cnt,hull) if defects is not None: if defects.shape[0] >= 5: return 1 return 0
def _detect_bot(self, hsv_image): BOT_MIN = np.array([28,8,100], np.uint8) BOT_MAX = np.array([32,255,255], np.uint8) thresholded_image = cv2.inRange(hsv_image, BOT_MIN, BOT_MAX) thresholded_image = cv2.medianBlur(thresholded_image, 15) _, contours, hierarchy = cv2.findContours(thresholded_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if not contours: (bot_x, bot_y) = (-1000,-1000) else: bot = contours[0] M = cv2.moments(bot) if len(bot) > 2: bot_x = int(M['m10']/M['m00']) bot_y = int(M['m01']/M['m00']) else: (bot_x, bot_y) = (-1000,-1000) return thresholded_image, (bot_x, bot_y)
def blob_mean_and_tangent(contour): moments = cv2.moments(contour) area = moments['m00'] mean_x = moments['m10'] / area mean_y = moments['m01'] / area moments_matrix = np.array([ [moments['mu20'], moments['mu11']], [moments['mu11'], moments['mu02']] ]) / area _, svd_u, _ = cv2.SVDecomp(moments_matrix) center = np.array([mean_x, mean_y]) tangent = svd_u[:, 0].flatten().copy() return center, tangent
def insert_into_center(resized_digits): results = [] for img in resized_digits: i = np.zeros((28, 28)) # calculate center of mass of the pixels M = cv2.moments(img) try: xc = M['m10'] / M['m00'] yc = M['m01'] / M['m00'] except ZeroDivisionError: xc = 10 yc = 10 # translating the image so as to position # this point at the center of the 28x28 field. start_a = max(min(4 + (10 - int(yc)), 8), 0) start_b = max(min(4 + (10 - int(xc)), 8), 0) i[start_a:start_a+20, start_b:start_b+20] = img results.append(i) return results
def find_self(): low_white = np.array([0,0,255]) upper_white = np.array([1,255,255]) mask,mmx, mmy = get_mini_map_mask(low_white,upper_white) _, contours, _ = cv2.findContours(mask.copy(), 1,2) for cnt in contours: M = cv2.moments(cnt) print(cv2.contourArea(cnt)) if cv2.contourArea(cnt) == 4: #centroid from img moments cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) print(cx,cy) cv2.imshow('img', mask) cv2.waitKey(0)
def moment_score(contour): moments = cv2.moments(contour) hu = cv2.HuMoments(moments) # hu[6] should be close to 0 return 100 - (hu[6] * 100)
def augment_graph(frame, contour): if contour is None: return None, None moments = cv2.moments(contour) #Central mass of first order moments #if moments['m00']!=0: # cx = int(moments['m10']/moments['m00']) # cx = M10/M00 # cy = int(moments['m01']/moments['m00']) # cy = M01/M00 #centerMass = (cx,cy) #Draw center mass #circle (x,y),radius = cv2.minEnclosingCircle(contour) center = (int(x),int(y)) radius = int(radius) cv2.circle(frame,center,7,[100,0,255],2) cv2.circle(frame,center,radius,(92, 66, 244),5) return center, radius
def mapit(self, mode): # Find the centroid of the bounding box of the image (we know this by construction - testing the functions) w,h,c = self.img.shape outerEdge = np.array([(0,0), (0, h), (w,h), (w, 0)], dtype = np.int) M = cv2.moments(outerEdge) cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) cv2.circle(self.img, (cX, cY), 7, (255, 255, 255), -1) cv2.putText(self.img, "center", (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) cv2.rectangle(self.img, (cX-30, cY-30), (cY+30, cY+30),(0,255,0), 2)
def find(self, image): hsv_frame = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_frame, self.__hsv_bounds[0], self.__hsv_bounds[1]) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] if len(contours) == 0: return (False, False) largest_contour = max(contours, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(largest_contour) M = cv2.moments(largest_contour) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) return (center, radius)
def __init__(self, img_contour, contour): self.img_contour = img_contour self.contour = contour self.children = [] self.text = None # Centroid computation from the opencv docs on contour attributes. (i.e. I have no clue) moments = cv2.moments(contour) self.centroid = (int(moments['m10']/moments['m00']), int(moments['m01']/moments['m00']))
def identify_OD(image): newfin = cv2.dilate(image, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations=2) mask = np.ones(newfin.shape[:2], dtype="uint8") * 255 y1, ycontours, yhierarchy = cv2.findContours(newfin.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) prev_contour = ycontours[0] for cnt in ycontours: if cv2.contourArea(cnt) >= cv2.contourArea(prev_contour): prev_contour = cnt cv2.drawContours(mask, [cnt], -1, 0, -1) M = cv2.moments(prev_contour) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) #print(cx,cy) return (cx,cy)
def identify_OD(image): newfin = cv2.dilate(image, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations=2) mask = np.ones(newfin.shape[:2], dtype="uint8") * 255 y1, ycontours, yhierarchy = cv2.findContours(newfin.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) prev_contour = ycontours[0] for cnt in ycontours: if cv2.contourArea(cnt) >= cv2.contourArea(prev_contour): prev_contour = cnt cv2.drawContours(mask, [cnt], -1, 0, -1) M = cv2.moments(prev_contour) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) return (cx,cy)
def drawCentroid(vid, color_area, mask, showCentroid): _, contour, _ = cv2.findContours( mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) l=len(contour) area = np.zeros(l) # filtering contours on the basis of area rane specified globally for i in range(l): if cv2.contourArea(contour[i])>color_area[0] and cv2.contourArea(contour[i])<color_area[1]: area[i] = cv2.contourArea(contour[i]) else: area[i] = 0 a = sorted( area, reverse=True) # bringing contours with largest valid area to the top for i in range(l): for j in range(1): if area[i] == a[j]: swap( contour, i, j) if l > 0 : # finding centroid using method of 'moments' M = cv2.moments(contour[0]) if M['m00'] != 0: cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) center = (cx,cy) if showCentroid: cv2.circle( vid, center, 5, (0,0,255), -1) return center else: # return error handling values return (-1,-1) # This function helps in filtering the required colored objects from the background
def drawCentroid(vid, color_area, mask, showCentroid): _, contour, _ = cv2.findContours( mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) l=len(contour) area = np.zeros(l) # filtering contours on the basis of area rane specified globally for i in range(l): if cv2.contourArea(contour[i])>color_area[0] and cv2.contourArea(contour[i])<color_area[1]: area[i] = cv2.contourArea(contour[i]) else: area[i] = 0 a = sorted( area, reverse=True) # bringing contours with largest valid area to the top for i in range(l): for j in range(1): if area[i] == a[j]: swap( contour, i, j) if l > 0 : # finding centroid using method of 'moments' M = cv2.moments(contour[0]) if M['m00'] != 0: cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) center = (cx,cy) if showCentroid: cv2.circle( vid, center, 5, (0,0,255), -1) return center else: # return error handling values return (-1,-1)
def get_centroid(contour): m = cv2.moments(contour) x = int(m["m10"] / m["m00"]) y = int(m["m01"] / m["m00"]) return (x, y)
def sample_hard_negatives(img, roi_mask, out_dir, img_id, abn, patch_size=256, neg_cutoff=.35, nb_bkg=100, start_sample_nb=0, bkg_dir='background', verbose=False): '''WARNING: the definition of hns may be problematic. There has been study showing that the context of an ROI is also useful for classification. ''' bkg_out = os.path.join(out_dir, bkg_dir) basename = '_'.join([img_id, str(abn)]) img = add_img_margins(img, patch_size/2) roi_mask = add_img_margins(roi_mask, patch_size/2) # Get ROI bounding box. roi_mask_8u = roi_mask.astype('uint8') ver = (cv2.__version__).split('.') if int(ver[0]) < 3: contours,_ = cv2.findContours( roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) else: _,contours,_ = cv2.findContours( roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cont_areas = [ cv2.contourArea(cont) for cont in contours ] idx = np.argmax(cont_areas) # find the largest contour. rx,ry,rw,rh = cv2.boundingRect(contours[idx]) if verbose: M = cv2.moments(contours[idx]) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) print "ROI centroid=", (cx,cy); sys.stdout.flush() rng = np.random.RandomState(12345) # Sample hard negative samples. sampled_bkg = start_sample_nb while sampled_bkg < start_sample_nb + nb_bkg: x1,x2 = (rx - patch_size/2, rx + rw + patch_size/2) y1,y2 = (ry - patch_size/2, ry + rh + patch_size/2) x1 = crop_val(x1, patch_size/2, img.shape[1] - patch_size/2) x2 = crop_val(x2, patch_size/2, img.shape[1] - patch_size/2) y1 = crop_val(y1, patch_size/2, img.shape[0] - patch_size/2) y2 = crop_val(y2, patch_size/2, img.shape[0] - patch_size/2) x = rng.randint(x1, x2) y = rng.randint(y1, y2) if not overlap_patch_roi((x,y), patch_size, roi_mask, cutoff=neg_cutoff): patch = img[y - patch_size/2:y + patch_size/2, x - patch_size/2:x + patch_size/2] patch = patch.astype('int32') patch_img = toimage(patch, high=patch.max(), low=patch.min(), mode='I') filename = basename + "_%04d" % (sampled_bkg) + ".png" fullname = os.path.join(bkg_out, filename) patch_img.save(fullname) sampled_bkg += 1 if verbose: print "sampled a hns patch at (x,y) center=", (x,y) sys.stdout.flush()
def sample_blob_negatives(img, roi_mask, out_dir, img_id, abn, blob_detector, patch_size=256, neg_cutoff=.35, nb_bkg=100, start_sample_nb=0, bkg_dir='background', verbose=False): bkg_out = os.path.join(out_dir, bkg_dir) basename = '_'.join([img_id, str(abn)]) img = add_img_margins(img, patch_size/2) roi_mask = add_img_margins(roi_mask, patch_size/2) # Get ROI bounding box. roi_mask_8u = roi_mask.astype('uint8') ver = (cv2.__version__).split('.') if int(ver[0]) < 3: contours,_ = cv2.findContours( roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) else: _,contours,_ = cv2.findContours( roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cont_areas = [ cv2.contourArea(cont) for cont in contours ] idx = np.argmax(cont_areas) # find the largest contour. rx,ry,rw,rh = cv2.boundingRect(contours[idx]) if verbose: M = cv2.moments(contours[idx]) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) print "ROI centroid=", (cx,cy); sys.stdout.flush() # Sample blob negative samples. key_pts = blob_detector.detect((img/img.max()*255).astype('uint8')) rng = np.random.RandomState(12345) key_pts = rng.permutation(key_pts) sampled_bkg = 0 for kp in key_pts: if sampled_bkg >= nb_bkg: break x,y = int(kp.pt[0]), int(kp.pt[1]) if not overlap_patch_roi((x,y), patch_size, roi_mask, cutoff=neg_cutoff): patch = img[y - patch_size/2:y + patch_size/2, x - patch_size/2:x + patch_size/2] patch = patch.astype('int32') patch_img = toimage(patch, high=patch.max(), low=patch.min(), mode='I') filename = basename + "_%04d" % (start_sample_nb + sampled_bkg) + ".png" fullname = os.path.join(bkg_out, filename) patch_img.save(fullname) if verbose: print "sampled a blob patch at (x,y) center=", (x,y) sys.stdout.flush() sampled_bkg += 1 return sampled_bkg #### End of function definition ####
def __detect_bot(self, hsv_image): # Experimentally determined LED thresholds BOT_MIN = np.array([28,8,100], np.uint8) BOT_MAX = np.array([32,255,255], np.uint8) thresholded_image = cv2.inRange(hsv_image, BOT_MIN, BOT_MAX) thresholded_image = cv2.medianBlur(thresholded_image, 15) # cv2.imshow('Yellow Tresh', thresholded_image) # cv2.waitKey(1) contours, hierarchy = cv2.findContours(thresholded_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if not contours: (bot_x, bot_y) = (-1000,-1000) else: bot = contours[0] M = cv2.moments(bot) if len(bot) > 2: bot_x = int(M['m10']/M['m00']) bot_y = int(M['m01']/M['m00']) else: bot_x = self.current_location[0] bot_y = self.current_location[1] return thresholded_image, (bot_x, bot_y)
def detect_ball(frame): blurred = cv2.GaussianBlur(frame, (11, 11), 0) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, greenLower, greenUpper) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None # only proceed if at least one contour was found if len(cnts) == 0: return # find the largest contour in the mask, then use # it to compute the minimum enclosing circle and # centroid c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) if radius < 10: print('Too small') return return center, radius
def _get_centroids(self): if not self._contours: self._get_contours() self._centroids = [] for contour in self._contours: moments = cv2.moments(contour) if moments['m00'] != 0.0: # skip zero-area contours centroid_x = moments['m10'] / moments['m00'] centroid_y = moments['m01'] / moments['m00'] self._centroids.append((centroid_x, centroid_y))
def find_fairy_ring(self): run = 1 while run: play_screen = Screenshot.shoot(6,59,510,355,'hsv') # finding white on fairy ring inner circle low = np.array([107,92,93]) high = np.array([113,255,129]) mask = cv2.inRange(play_screen, low, high) kernel = np.ones((10,10), np.uint8) dilation = cv2.dilate(mask, kernel, iterations = 1) #closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) #_,contours,_ = cv2,findContours(closing.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) _,contours,_ = cv2.findContours(dilation, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) for con in contours: print("Area: {}".format(cv2.contourArea(con))) if cv2.contourArea(con) > 1.0: (x, y, w, h) = cv2.boundingRect(con) x += self.rs_x y += self.rs_y x1 = x y1 = y x2 = x + w y2 = y + h print("x1:{} y1:{} x2:{} y2:{}".format(x1,y1,x2,y2)) #print(cv2.contourArea(con)) #M = cv2.moments(con) # finds centroid #x,y = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) Mouse.randMove(x1,y1,x2,y2,3) time.sleep(5) if RS.findOptionClick(x1,y1,'cis'): run = 0 time.sleep(2) break #cv2.imshow('img', mask) #cv2.waitKey(000)
def find_ham_guard(): import random try: ps, psx, psy = RS.getPlayingScreen() lower_pink = np.array([154,0,0]) upper_pink = np.array([160,255,255]) mask = cv2.inRange(ps, lower_pink, upper_pink) _, contours, _ = cv2.findContours(mask.copy(), 1,2) for cnt in contours: if cv2.contourArea(cnt) <= 1: continue #print("Area: {}".format(cv2.contourArea(cnt))) M = cv2.moments(cnt) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) cx += psx cy += psy cx += random.randint(-20,20) cy += random.randint(-20,20) # Find bouding box coords Mouse.moveClick(cx,cy, 3) break RS.findOptionClick(cx,cy, 'pickpocket') except Exception as e: print(e) #cv2.imshow('img', mask) #cv2.waitKey(0)
def find_prayer_pot(): rs_bag, bagx, bagy = RS.get_bag('bag coords', 'hsv') # prayer potion color ranges low = np.array([78,140,0]) high= np.array([81,225,211]) mask = cv2.inRange(rs_bag, low, high) kernel = np.ones((5,5), np.uint8) dilation = cv2.dilate(mask, kernel, iterations = 1) _,contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for con in contours: x, y, w, h = cv2.boundingRect(con) cv2.rectangle(mask,(x,y), (x+w, y+h), (255,255,255),-1) _,contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for con in contours[::-1]: M = cv2.moments(con) mx, my = int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]) mx += RSX + bagx my += RSY + bagy mx += random.randint(-7,7) my += random.randint(-12,5) Mouse.moveClick(mx,my,1) #Mouse.moveTo(mx,my) break
def callback(self,data): try: imgOriginal = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print("==[CAMERA MANAGER]==", e) blurred = cv2.GaussianBlur(imgOriginal,(11,11),0) hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) # lower = np.array([60,90,70]) # hsv range for green # upper = np.array([90,175,255]) lower = np.array([60,70,70]) # hsv range for green upper = np.array([90,255,255]) mask = cv2.inRange(hsv, lower, upper) mask = cv2.erode(mask, None, iterations=7) mask = cv2.dilate(mask, None, iterations=7) output = cv2.bitwise_and(imgOriginal, imgOriginal, mask = mask) outputGrayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) if major_ver == '3': contours = cv2.findContours(outputGrayscale,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[1] elif major_ver == '2': contours = cv2.findContours(outputGrayscale,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[0] if len(contours) > 0: c = max(contours,key=cv2.contourArea) ((x,y),radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) treasureCenter = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) self.treasurePoint.x = treasureCenter[0] self.treasurePoint.y = treasureCenter[1] self.treasurePoint.flag = 1 self.pub.publish(self.treasurePoint) else: self.treasurePoint.flag = 0 self.pub.publish(self.treasurePoint) cv2.imshow("TreasureFilter", output) cv2.waitKey(3)
def findCenter(contour): M = cv2.moments(contour) x = int(M['m10'] / M['m00']) y = int(M['m01'] / M['m00']) return (x, y) #Calculates angle by using the focal length and pixel position
def analysis(self): self.Hull = cv2.convexHull(self.Contour) M = cv2.moments(self.Hull) try: cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) self.centorid = (cx, cy) except ZeroDivisionError as e: pass self.ContArea = cv2.contourArea(self.Hull)
def label_contour(image, c, i, color=(0, 255, 0), thickness=2): # compute the center of the contour area and draw a circle # representing the center M = cv2.moments(c) cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) # draw the contour and label number on the image cv2.drawContours(image, [c], -1, color, thickness) cv2.putText(image, "#{}".format(i + 1), (cX - 20, cY), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2) # return the image with the contour number drawn on it return image
def get_objects(self): # given the low and high RGB ranges if self.__low_rgb is None or self.__high_rgb is None: return [] # given the image, contours, and hierarchy image, contours, hierarchy = self.find_contours_in_range(self.__low_rgb, self.__high_rgb) # get the moment for each contour moments = [] for contour in contours: moments.append(cv2.moments(contour, False)) # get the camera_object for each contour camera_objects = [] for i, contour in enumerate(contours): x, y, w, h = cv2.boundingRect(contour) # get the moment for this contour moment = moments[i] # get the confidence in the Rectangle confidence = moment['m00'] / (w * h) # get the XY point of the moment point = (moment['m10'] / moment['m00'], moment['m01'] / moment['m00']) # setup the rectangle rectangle = (x, y, w, h) # now get the camera_object camera_object = (point, rectangle, confidence) # save the camera_object camera_objects.append(camera_object) return camera_objects
def classify_monitor_contour_set(contours): '''Not a general purpose function : given the expectation of a set of strongly related contours for one monitor...''' # First pass : compute the center of mass of every contour classified = {} for (i,c) in enumerate(contours): classified[i] = {} classified[i]['contour'] = c moments = M = cv2.moments(c) classified[i]['com'] = (int(M['m10']/M['m00']), int(M['m01']/M['m00'])) rect = contour_to_monitor_coords(c) (maxWidth, maxHeight, dest, Mwarp) = compute_warp(rect) classified[i]['rect'] = rect classified[i]['maxWidth'] = maxWidth classified[i]['maxHeight'] = maxHeight classified[i]['dest'] = dest classified[i]['Mwarp'] = Mwarp # Second pass : establish if c-o-m of every contour is within the first contour reference_contour = contours[0] for (i,c) in enumerate(contours): classified[i]['coherent'] = cv2.pointPolygonTest(reference_contour, classified[i]['com'], False) # Final pass : report on the set print('$'*80) for (i,c) in enumerate(contours): print('%d : c-o-m %s : coherent : %d mw %d mh %d' % (i, classified[i]['com'], classified[i]['coherent'], classified[i]['maxWidth'], classified[i]['maxHeight'], )) print('$'*80) # From the contours coherent to the reference contour, build an average/best estimator count = 0 rect = np.zeros((4, 2), dtype = "float32") for (i,c) in enumerate(contours): if classified[i]['coherent'] == 1: count += 1 for j in range(0,4): rect[j] += classified[i]['rect'][j] #pdb.set_trace() for j in range(0,4): # BUG to show Alison # rect[j] = (rect[j]/1.0*count).astype('uint8') rect[j] = (rect[j]/(1.0*count)).astype('uint32') time.sleep(2.5) return rect
def callback(self,data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) height, width, channels = cv_image.shape crop_img = cv_image[200:(height)/2+150][1:width] lower = np.array([0, 0, 79], dtype = "uint8") upper = np.array([40, 40, 191], dtype = "uint8") mask = cv2.inRange(crop_img, lower, upper) extraction = cv2.bitwise_and(crop_img, crop_img, mask = mask) m = cv2.moments(mask, False) try: x, y = m['m10']/m['m00'], m['m01']/m['m00'] except ZeroDivisionError: x, y = height/2, width/2 cv2.circle(extraction,(int(x), int(y)), 2,(0,255,0),3) cv2.imshow("Image window", np.hstack([crop_img,extraction])) cv2.waitKey(1) yaw = 1500 + (x - width/2) * 1.5 print "center=" + str(width/2) + "point=" + str(x) + "yaw=" + str(yaw) throttle = 1900 if (yaw > 1900): yaw = 1900 elif (yaw < 1100): yaw = 1100 msg = OverrideRCIn() msg.channels[0] = yaw msg.channels[1] = 0 msg.channels[2] = throttle msg.channels[3] = 0 msg.channels[4] = 0 msg.channels[5] = 0 msg.channels[6] = 0 msg.channels[7] = 0 self.pub.publish(msg)
def count_fingers(img): img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Otsu's thresholding after Gaussian filtering img = cv2.GaussianBlur(img, (5, 5), 0) ret, mask = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) cv2.imshow("Threshold", mask) (_, cnts, _) = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) list_far = [] list_end = [] if cnts: areas = [cv2.contourArea(c) for c in cnts] max_index = np.argmax(areas) cnt = cnts[max_index] M = cv2.moments(cnt) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) hull1 = cv2.convexHull(cnt) hull2 = cv2.convexHull(cnt, returnPoints=False) try: defects = cv2.convexityDefects(cnt, hull2) except Exception, e: defects = None print e counter = 0 if defects is not None: for i in range(defects.shape[0]): s, e, f, d = defects[i, 0] # start = tuple(cnt[s][0]) end = tuple(cnt[e][0]) far = tuple(cnt[f][0]) if d < 20000: continue if far[1] >= (cy+40): continue diff1 = abs(end[0]-far[0]) if diff1 > 100: continue cv2.line(img, end, far, (0, 0, 0), 2, 8) cv2.imshow("hand", img) cv2.waitKey(1) list_far.append(far) list_end.append(end) counter += 1 return mask, counter, hull1, (cx, cy), list_far, list_end
def count_fingers(hand_frame): hand_frame = cv2.cvtColor(hand_frame,cv2.COLOR_BGR2GRAY) # Otsu's thresholding after Gaussian filtering hand_frame = cv2.GaussianBlur(hand_frame,(5,5),0) ret,mask = cv2.threshold(hand_frame,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) (cnts,_)=cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) list_far=[] list_end=[] if cnts: areas = [cv2.contourArea(c) for c in cnts] max_index = np.argmax(areas) cnt=cnts[max_index] M = cv2.moments(cnt) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) hull1 = cv2.convexHull(cnt) hull2 = cv2.convexHull(cnt,returnPoints = False) try: defects = cv2.convexityDefects(cnt,hull2) except Exception, e: defects = None print e counter = 0 if defects is not None: for i in range(defects.shape[0]): s,e,f,d = defects[i,0] start = tuple(cnt[s][0]) end = tuple(cnt[e][0]) far = tuple(cnt[f][0]) if d<20000: continue if far[1] >= (cy+40): continue else: pass list_far.append(far) list_end.append(end) counter +=1 return mask,counter,hull1,(cx,cy),list_far,list_end
def run(self): bytes='' while not self.thread_cancelled: try: bytes+=self.stream.raw.read(1024) a = bytes.find('\xff\xd8') b = bytes.find('\xff\xd9') if a!=-1 and b!=-1: jpg = bytes[a:b+2] bytes= bytes[b+2:] img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR) # Convert BGR to HSV hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # define range of blue color in HSV #lower_blue = np.array([self.L_RED, self.L_GREEN, self.L_BLUE], np.uint8) #upper_blue = np.array([self.U_RED, self.U_GREEN, self.L_BLUE], np.uint8) # Threshold the HSV image to get only blue colors mask = cv2.inRange(hsv, np.array([53,187,37]), np.array([97,244,153])) # Bitwise-AND mask and original image res = cv2.bitwise_and(img,img, mask= mask) #### blurred = cv2.GaussianBlur(mask, (5, 5), 0) blurred = cv2.boxFilter(mask, 0, (7, 7), mask, (-1, -1), False, cv2.BORDER_DEFAULT) thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1] cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] cv2.filterSpeckles(mask, 0, 100, 25) ## cv2.filterSpeckles(mask, 0, 50, 25) ## cv2.filterSpeckles(mask, 0, 100, 100) for c in cnts: M = cv2.moments(c) if int(M["m00"]) != 0: cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) else: (cX, cY) = (0, 0) print(cX, cY) cv2.drawContours(res, [c], -1, (0, 255, 0), 2) cv2.circle(res, (cX, cY), 7, (255, 255, 255), 1) # table.putNumber("center X", cX) cv2.imshow('img',img) cv2.imshow('mask',mask) cv2.imshow('Final',res) cv2.imshow('cam',img) #sd.putNumber('Center X', cX) ##send the x value of the center #sd.putNumber('Center Y', cY) ##send the y value of the center ## print(sd.getNumber('Center Y'), sd.getNumber('Center X')) if cv2.waitKey(1) ==27: exit(0) except ThreadError: self.thread_cancelled = True
def run(self): bytes='' while not self.thread_cancelled: ####see lines 18, 80, 88 .... try: bytes+=self.stream.raw.read(1024) ##limit max bytes read in 1 itteration? need to read more on this a = bytes.find('\xff\xd8')##find start of stream of data b = bytes.find('\xff\xd9')##find our end of data stream if a!=-1 and b!=-1: ##so as long as we have a stream of data....do the following jpg = bytes[a:b+2] ##converts to image or a specific variable... bytes= bytes[b+2:] img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR) ##decode the data # Convert BGR to HSV hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) ##converting color format for easier proccessing/ math # define range of blue color in HSV #lower_blue = np.array([self.L_RED, self.L_GREEN, self.L_BLUE], np.uint8) #upper_blue = np.array([self.U_RED, self.U_GREEN, self.L_BLUE], np.uint8) # Threshold the HSV image to get only blue colors mask = cv2.inRange(hsv, np.array([53,187,37]), np.array([97,244,153])) ##get colors in the range of these HSV values # Bitwise-AND mask and original image res = cv2.bitwise_and(img,img, mask= mask) blurred = cv2.boxFilter(mask, 0, (7, 7), mask, (-1, -1), False, cv2.BORDER_DEFAULT) ##the next few line create outlines and thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1] ##remove any noise cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #find countors cnts = cnts[0] if imutils.is_cv2() else cnts[1] cv2.filterSpeckles(mask, 0, 100, 25) ##remove speckles aka random dots and white noise for c in cnts: M = cv2.moments(c) if int(M["m00"]) != 0: ##Checks for division by zero cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) else: (cX, cY) = (0, 0) cv2.drawContours(res, [c], -1, (0, 255, 0), 2) ##draw box/highlighting cv2.circle(res, (cX, cY), 7, (255, 255, 255), 1) ##draw box/highlighting ##Try-Catch for appending cX to table try: self.table.putNumber('centerX', cX) ##Adds cX to the networktables except KeyError: print("centerX failed.") cv2.imshow('img',img) ##display original image cv2.imshow('mask',mask) ##display masked image cv2.imshow('Final',res) ##show final image cv2.imshow('cam',img) ##see line 71/comments if cv2.waitKey(1) ==27: ##now we close if esc key is pressed exit(0) except ThreadError: self.thread_cancelled = True
def find_target(img, lower=np.array([110//2, 10*255//100, 15*255//100]), upper=np.array([180//2, 100*255//100, 100*255//100]), area_threshold=0.025 ** 2): """Given an image and thresholds, find the centre of mass of the target. All arguments must be np.arrays, except for area_threshold, and lower and upper must be a 3-array. """ #Converting from RGB to HSV. hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #Making the mask. mask = cv2.inRange(hsv, lower, upper) #Combining the mask with the frame res = cv2.bitwise_and(img, img, mask=mask) height, width = mask.shape # Get the information for the contours _, contours, __ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # sort the contours into a list areas = [] for idx, contour in enumerate(contours): area = cv2.contourArea(contour) if area/(height*width) > area_threshold: heapq.heappush(areas, (cv2.contourArea(contour), idx)) areas = heapq.nlargest(2, areas) areas_x = [] x_coord = 0 for _, idx in areas: contour = contours[idx] moments = cv2.moments(contour) x_coord += moments['m10']/moments['m00'] / len(areas) areas_x.append(moments['m10']/moments['m00']) cv2.drawContours(res, (contour, ), -1, (255, 0, 0), 1) if len(areas) > 0: cv2.line(res, (int(x_coord), 60), (int(x_coord), 180), (255,255,0), thickness=2, lineType=8, shift=0) target_sep = 0 if len(areas_x) > 1: # target sep returned as a % of image width, not in vision coordinates target_sep = abs(areas_x[0]-areas_x[1]) / width pos = 2 * x_coord / width - 1 return pos, res, len(areas), target_sep # Allow easy testing of captured sample images
def image_callback(self, msg): # convert ROS image to OpenCV image try: image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8') except CvBridgeError as e: print(e) # create hsv image of scene hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # find pink objects in the image lower_pink = numpy.array([139, 0, 240], numpy.uint8) upper_pink = numpy.array([159, 121, 255], numpy.uint8) mask = cv2.inRange(hsv, lower_pink, upper_pink) # dilate and erode with kernel size 11x11 cv2.morphologyEx(mask, cv2.MORPH_CLOSE, numpy.ones((11,11))) # find all of the contours in the mask image contours, heirarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) self.contourLength = len(contours) # Check for at least one target found if self.contourLength < 1: print "No target found" else: # target found ## Loop through all of the contours, and get their areas area = [0.0]*len(contours) for i in range(self.contourLength): area[i] = cv2.contourArea(contours[i]) #### Target #### the largest "pink" object target_image = contours[area.index(max(area))] # Using moments find the center of the object and draw a red outline around the object target_m = cv2.moments(target_image) self.target_u = int(target_m['m10']/target_m['m00']) self.target_v = int(target_m['m01']/target_m['m00']) points = cv2.minAreaRect(target_image) box = cv2.cv.BoxPoints(points) box = numpy.int0(box) cv2.drawContours(image, [box], 0, (0, 0, 255), 2) rospy.loginfo("Center of target is x at %d and y at %d", int(self.target_u), int(self.target_v)) self.target_found = True # set flag for depth_callback processing # show image with target outlined with a red rectangle cv2.imshow ("Target", image) cv2.waitKey(3) # This callback function handles processing Kinect depth image, looking for the depth value # at the location of the center of the pink target.
def segment(self): self.im_gray = cv2.medianBlur(self.im_gray, 5) # Apply adaptive threshold with binary_inv thresh = cv2.adaptiveThreshold(self.im_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2) # apply some dilation and erosion to join the gaps thresh = cv2.dilate(thresh, None, iterations=3) thresh = cv2.erode(thresh, None, iterations=2) # finding contours im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) ''' cropped is a dictionary with (cx, cy) centroid tuples as keys, and cropped images as values centroids is a list of the same centroid tuples, (cx, cy) - This was done because it was not possible to sort the dictionary directly using tuples as keys using the sort(dict) function. - Instead, (cx, cy) was stored in the centroids list, and the list in turn was sorted using centroids.sort(). - The list is then iterated upon to get tuples in order... - Each tuple iterated upon acts as a key for the dictionary, fetching the cropped images in order ''' cropped = {(0, 0): '0'} centroids = [(0, 0)] for cnt in contours: x, y, w, h = cv2.boundingRect(cnt) # finding centroid coordinates, so that it can be the basis of sorting cropped images M = cv2.moments(cnt) cx = int(M['m10'] / M['m00']) cy = int(M['m01'] / M['m00']) # storing centroid tuple and cropped image in dictionary cropped[(cx, cy)] = self.im_gray[y:y + h, x:x + w] # inserting centroid tuples to a list centroids.append((cx, cy)) # since (0, 0) was only a placeholder del cropped[(0, 0)] centroids.remove((0, 0)) # sorting the centroid list centroids.sort() segments = [] for c in centroids: segments.append(cropped[c]) return segments
def open_cw_bank(): """Finds the visiblest square of the chest in castle wars bank, wors better when viewing from above at shortest distance.""" # gets RS window's position rsx,rsy = position() # Takes screenshot, as Hue-saturated-value image play_window,psx,psy = getPlayingScreen() psx += rsx psy += rsy lower_gray = np.array([0,15,55]) upper_gray = np.array([10,25,125]) # Makes a black/white mask mask = cv2.inRange(play_window, lower_gray, upper_gray) # inverts selection #res = cv2.bitwise_and(play_window, play_window, mask=mask) kernel = np.ones((5,5), np.uint8) dilation = cv2.dilate(mask, kernel, iterations = 1) #cv2.imshow('img', dilation) #cv2.waitKey(0) # Finds contours _,contours,_ = cv2.findContours(dilation.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) try: # looks for center of grey color with biggest area, > 3000 for con in contours: if cv2.contourArea(con) > 3000: M = cv2.moments(con) # finds centroid cx,cy = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) psx += cx psy += cy # adds randomness to coords psx += random.randint(-17,17) psy += random.randint(-17,17) #move click chest Mouse.moveClick(psx,psy,1) RandTime.randTime(0,0,0,0,9,9) break except Exception as e: print("Bank NOT found!\nMove camera around!")