我们从Python开源项目中,提取了以下16个代码示例,用于说明如何使用cv2.CV_LOAD_IMAGE_UNCHANGED。
def post(self): global detector imstrjpg = self.get_argument('data', 'empty') if imstrjpg == 'emtpy': print 'EMPTY' return "" imstr = np.fromstring(imstrjpg, dtype=np.uint8) im = cv2.imdecode(imstr, cv2.CV_LOAD_IMAGE_UNCHANGED) scores, boxes = detector.detect(im) CONF_THRESH = 0.15 NMS_THRESH = 0.08 results = {} for cls_ind, cls in enumerate(CLASSES[1:]): cls_ind += 1 # because we skipped background cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)] cls_scores = scores[:, cls_ind] dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, NMS_THRESH) dets = dets[keep, :] results[cls] = dets self.write(cPickle.dumps(results)) self.finish()
def generate_detecton_label(label_root,stride,save_root): if not os.path.exists(save_root): os.makedirs(save_root) files = os.listdir(label_root) for file in files: label = cv2.imread(os.path.join(label_root,file),cv2.CV_LOAD_IMAGE_UNCHANGED) target_shape = label.shape[0]//stride,label.shape[1]//stride feature_label = np.zeros(target_shape,dtype = np.uint8) if label is None: print 'plese check label root path' h,w = label.shape for y in range(0,h-stride,stride): for x in range(0,w-stride,stride): patch = label[y:(y+stride),x:(x+stride)] num_pixels = np.sum((patch != 0)) if num_pixels >= 1: feature_label[y//stride][x//stride] = 1 cv2.imwrite(os.path.join(save_root,file),feature_label)
def generate(self,img_root,label_root,save_root): files = os.listdir(img_root) for file in files: file_path = os.path.join(img_root,file) label_path = os.path.join(label_root,os.path.splitext(file)[0]+'_label'+self.lab_ext) img = cv2.imread(file_path) if img is None: print 'please check img file path' exit() label = cv2.imread(label_path,cv2.CV_LOAD_IMAGE_UNCHANGED) if label is None: print 'please check label file ext' exit() self._generate_patches(img,label,save_root,file)
def predict_knn(image_file): image = cv2.imdecode(np.fromstring(image_file.read(), np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED) if image is not None: features = np.array([extract_color_histogram(image)]) loaded_model = pickle.load(open(MODEL_PATH + "/knn_model.sav", 'rb')) return loaded_model.predict(features)[0] else: raise "Failed"
def predict_mlp(image_file): image = cv2.imdecode(np.fromstring(image_file.read(), np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED) if image is not None: features = np.array([image_to_feature_vector(image)]) loaded_model = pickle.load(open(MODEL_PATH + "/mlp_model.sav", 'rb')) scaler = pickle.load(open(MODEL_PATH + "/scaler_model.sav", "rb")) features = scaler.transform(features) return loaded_model.predict(features)[0] else: raise "Failed"
def getGroundTruth(fileNameGT): ''' Returns the ground truth maps for roadArea and the validArea :param fileNameGT: ''' # Read GT assert os.path.isfile(fileNameGT), 'Cannot find: %s' % fileNameGT full_gt = cv2.imread(fileNameGT, cv2.CV_LOAD_IMAGE_UNCHANGED) #attention: OpenCV reads in as BGR, so first channel has Blue / road GT roadArea = full_gt[:,:,0] > 0 validArea = full_gt[:,:,2] > 0 return roadArea, validArea
def sift(imageval): file_bytes = np.asarray(bytearray(imageval), dtype=np.uint8) img_data_ndarray = cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED) gray = cv2.cvtColor(img_data_ndarray, cv2.COLOR_BGR2GRAY) #surf = cv2.SURF(400) sift = cv2.SIFT(40) kp, des = sift.detectAndCompute(gray,None) #kp, des = surf.detectAndCompute(gray,None) #print len(kp)
def surf(imageval): file_bytes = np.asarray(bytearray(imageval), dtype=np.uint8) img_data_ndarray = cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED) gray = cv2.cvtColor(img_data_ndarray, cv2.COLOR_BGR2GRAY) surf = cv2.SURF(40) #sift = cv2.SIFT(40) #kp, des = sift.detectAndCompute(gray,None) kp, des = surf.detectAndCompute(gray,None) #print len(kp)
def main(train_dir, test_dir, outputDir): ''' main method of computeBaseline :param train_dir: directory of training data (has to contain ground truth: gt_image_2), e.g., /home/elvis/kitti_road/training :param test_dir: directory with testing data (has to contain images: image_2), e.g., /home/elvis/kitti_road/testing :param outputDir: directory where the baseline results will be saved, e.g., /home/elvis/kitti_road/test_baseline_perspective ''' trainData_path_gt = os.path.join(train_dir, dataStructure.trainData_subdir_gt) print "Computing category specific location potential as a simple baseline for classifying the data..." print "Using ground truth data from: %s" % trainData_path_gt print "All categories = %s" %dataStructure.cats # Loop over all categories for cat in dataStructure.cats: cat_tags = cat.split('_') print "Computing on dataset: %s for class: %s" %(cat_tags[0],cat_tags[1]) trainData_fileList_gt = glob(os.path.join(trainData_path_gt, cat + '*' + dataStructure.gt_end)) trainData_fileList_gt.sort() assert len(trainData_fileList_gt)>0, 'Error: Cannot find ground truth data in %s' % trainData_path_gt # Compute location potential locationPotential = np.zeros(dataStructure.imageShape_max, 'f4') # Loop over all gt-files for particular category for trainData_fileName_gt in trainData_fileList_gt: full_gt = cv2.imread(trainData_fileName_gt, cv2.CV_LOAD_IMAGE_UNCHANGED) #attention: OpenCV reads in as BGR, so first channel has road GT trainData_file_gt = full_gt[:,:,0] > 0 #validArea = full_gt[:,:,2] > 0 assert locationPotential.shape[0] >= trainData_file_gt.shape[0], 'Error: Y dimension of locationPotential is too small: %d' %trainData_file_gt.shape[0] assert locationPotential.shape[1] >= trainData_file_gt.shape[1], 'Error: X dimension of locationPotential is too small: %d' %trainData_file_gt.shape[1] locationPotential[:trainData_file_gt.shape[0], :trainData_file_gt.shape[1]] += trainData_file_gt # Compute prop locationPotential = locationPotential/len(trainData_fileList_gt) locationPotential_uinit8 = (locationPotential*255).astype('u1') print "Done: computing location potential for category: %s." %cat if not os.path.isdir(outputDir): os.makedirs(outputDir) testData_fileList_im2 = glob(os.path.join(test_dir, dataStructure.testData_subdir_im2, cat_tags[0] + '_*'+ dataStructure.im_end)) testData_fileList_im2.sort() print "Writing location potential as perspective probability map into %s." %outputDir for testData_file_im2 in testData_fileList_im2: # Write output data (same format as images!) fileName_im2 = testData_file_im2.split('/')[-1] ts_str = fileName_im2.split(cat_tags[0])[-1] fn_out = os.path.join(outputDir, cat + ts_str) cv2.imwrite(fn_out, locationPotential_uinit8) print "Done: Creating perspective baseline."
def detect_barcode(imageval): # load the image and convert it to grayscale file_bytes = np.asarray(bytearray(imageval), dtype=np.uint8) img_data_ndarray = cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED) gray = cv2.cvtColor(img_data_ndarray, cv2.COLOR_BGR2GRAY) # compute the Scharr gradient magnitude representation of the images # in both the x and y direction gradX = cv2.Sobel(gray, ddepth = cv2.cv.CV_32F, dx = 1, dy = 0, ksize = -1) gradY = cv2.Sobel(gray, ddepth = cv2.cv.CV_32F, dx = 0, dy = 1, ksize = -1) # subtract the y-gradient from the x-gradient gradient = cv2.subtract(gradX, gradY) gradient = cv2.convertScaleAbs(gradient) # blur and threshold the image blurred = cv2.blur(gradient, (9, 9)) (_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY) # construct a closing kernel and apply it to the thresholded image kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7)) closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel) # perform a series of erosions and dilations closed = cv2.erode(closed, None, iterations = 4) closed = cv2.dilate(closed, None, iterations = 4) # find the contours in the thresholded image, then sort the contours # by their area, keeping only the largest one (cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) c = sorted(cnts, key = cv2.contourArea, reverse = True)[0] # compute the rotated bounding box of the largest contour rect = cv2.minAreaRect(c) box = np.int0(cv2.cv.BoxPoints(rect)) # draw a bounding box arounded the detected barcode and display the # image cv2.drawContours(img_data_ndarray, [box], -1, (0, 255, 0), 3) # cv2.imshow("Image", image) #cv2.imwrite("uploads/output-"+ datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") +".jpg",image) # cv2.waitKey(0) #outputfile = "uploads/output-" + time.strftime("%H:%M:%S") + ".jpg" outputfile = "uploads/output.jpg" cv2.imwrite(outputfile,img_data_ndarray)