Python skimage.feature 模块,hog() 实例源码

我们从Python开源项目中,提取了以下37个代码示例,用于说明如何使用skimage.feature.hog()

项目:udacity-detecting-vehicles    作者:wonjunee    | 项目源码 | 文件源码
def get_hog_features(img, orient, pix_per_cell, cell_per_block, 
                        vis=False, feature_vec=True):
    # Call with two outputs if vis==True
    if vis == True:
        features, hog_image = hog(img, orientations=orient, 
                                  pixels_per_cell=(pix_per_cell, pix_per_cell),
                                  cells_per_block=(cell_per_block, cell_per_block), 
                                  transform_sqrt=True, 
                                  visualise=vis, feature_vector=feature_vec)
        return features, hog_image
    # Otherwise call with one output
    else:      
        features = hog(img, orientations=orient, 
                       pixels_per_cell=(pix_per_cell, pix_per_cell),
                       cells_per_block=(cell_per_block, cell_per_block), 
                       transform_sqrt=True, 
                       visualise=vis, feature_vector=feature_vec)
        return features

# Define a function to compute binned color features
项目:harpreif    作者:harpribot    | 项目源码 | 文件源码
def __update_state(self):
        """
        Updates the state space (self.gamestate) after the suggested action is taken
        :return: None
        """
        jigsaw_id, place_id = self.decode_action()
        self.__update_placed_pieces(jigsaw_id, place_id)
        if self.state_type == 'hog':
            self.__render_gamestate()
        elif self.state_type == 'image':
            resized_discrete_im = np.digitize(
                            imresize(self.jigsaw_image, (self.state_height, self.state_width)),
                            self.bins)
            self.gamestate = np.array([resized_discrete_im]).transpose().swapaxes(0, 1)

        else:
            ValueError('The state type is not valid, enter "hog" or "image"')
项目:SBB4-damage-tracker    作者:whorn    | 项目源码 | 文件源码
def trainClassifier(foldername,classifierName):
    model = cv2.ml.KNearest_create()
    features = []
    labels = []
    os.chdir(foldername)
    for filename in glob.iglob('*.png'):
        features.append(cv2.imread((filename),-1))
        labels.append(filename[0])
    list_hog_fd = []
    for feature in features:
        fd = hog(feature.reshape((27, 35)), orientations=9, pixels_per_cell=(9, 7), cells_per_block=(1, 1), visualise=False)
        list_hog_fd.append(fd)
    hog_features = np.array(list_hog_fd, 'float64')
    os.chdir("..")
    clf = LinearSVC()
    clf.fit(hog_features, labels)
    joblib.dump(clf,classifierName, compress=3)
    os.chdir("..")
项目:SVM-classification-localization    作者:HandsomeHans    | 项目源码 | 文件源码
def getFeat(Data,mode): # get and save feature valuve
    num = 0  
    for data in Data:  
        image = np.reshape(data[0], (200, 200, 3)) 
        gray = rgb2gray(image)/255.0 # trans image to gray
        fd = hog(gray, orientations, pixels_per_cell, cells_per_block, block_norm, visualize, normalize)  
        fd = np.concatenate((fd, data[1])) # add label in the end of the array
        filename = list(data[2])  
        fd_name = filename[0].split('.')[0]+'.feat' # set file name  
        if mode == 'train':  
            fd_path = os.path.join('./features/train/', fd_name)  
        else:  
            fd_path = os.path.join('./features/test/', fd_name)  
        joblib.dump(fd, fd_path,compress=3) # save data to local  
        num += 1  
        print "%d saving: %s." %(num,fd_name)
项目:CS231A_Project    作者:afazel    | 项目源码 | 文件源码
def extract_pos_hog_features(path, num_samples):

    features = []
    cnt = 0
    for dirpath, dirnames, filenames in walk(path):
        for my_file in filenames:
            print path+my_file
            if cnt < num_samples:
                cnt = cnt + 1
                im = cv2.imread(path + my_file)
                print im.shape
                image = color.rgb2gray(im)
                image = image[17:145, 16:80]

                my_feature, _ = hog(image, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(2, 2), visualise=True)
                features.append(my_feature)
    return features
项目:CS231A_Project    作者:afazel    | 项目源码 | 文件源码
def extract_neg_hog_features(path, num_samples):

    features = []
    cnt = 0
    for dirpath, dirnames, filenames in walk(path):
        for my_file in filenames:
            if cnt < num_samples:
                cnt = cnt + 1
                im = cv2.imread(path + my_file)
                image = color.rgb2gray(im)
                image = image[17:145, 16:80]
                #cv2.imshow('test',image)
                #cv2.waitKey(0)
                my_feature, _ = hog(image, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(2, 2), visualise=True)
                features.append(my_feature)
    return features
项目:-    作者:YoPatapon    | 项目源码 | 文件源码
def getFeat(TrainData, TestData):
    for data in TestData:
        image = np.reshape(data[0].T, (32, 32, 3))
        gray = rgb2gray(image)/255.0
        fd = hog(gray, 9, [8, 8], [2, 2], 'L2-Hys', False, True)
        fd = np.concatenate((fd, data[1]))
        filename = list(data[2])
        fd_name = filename[0].split('.')[0]+'.feat'
        fd_path = os.path.join('./data/features/test/', fd_name)
        joblib.dump(fd, fd_path)
    print "Test features are extracted and saved."
    for data in TrainData:
        image = np.reshape(data[0].T, (32, 32, 3))
        gray = rgb2gray(image)/255.0
        fd = hog(gray, 9, [8, 8], [2, 2], 'L2-Hys', False, True)
        fd = np.concatenate((fd, data[1]))
        filename = list(data[2])
        fd_name = filename[0].split('.')[0]+'.feat'
        fd_path = os.path.join('./data/features/train/', fd_name)
        joblib.dump(fd, fd_path)
    print "Train features are extracted and saved."
项目:traffic-light-detection    作者:ranveeraggarwal    | 项目源码 | 文件源码
def extract_features():
    pos_img_path = positive_images_path
    neg_img_path = negative_images_path

    pos_feat_path = positive_features_path
    neg_feat_path = negative_features_path

    if not os.path.isdir(pos_feat_path):
        os.makedirs(pos_feat_path)

    if not os.path.isdir(neg_feat_path):
        os.makedirs(neg_feat_path)

    print "Extracting positive features"
    progress = 0.0
    for im_path in glob.glob(os.path.join(pos_img_path, "*")):
        im = imread(im_path)
        im_ycbcr = cv2.cvtColor(im, cv2.COLOR_RGB2YCR_CB)
        im = cv2.split(im_ycbcr)[0]
        feature_vector = hog(image=im, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), visualise=False)
        feature_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
        feature_path = os.path.join(pos_feat_path, feature_name)
        joblib.dump(feature_vector, feature_path)
        progress += 1.0
        update_progress(progress/float(len(glob.glob(os.path.join(pos_img_path, "*")))))

    print "Extracting negative features"
    progress = 0.0
    for im_path in glob.glob(os.path.join(neg_img_path, "*")):
        im = imread(im_path)
        im_ycbcr = cv2.cvtColor(im, cv2.COLOR_RGB2YCR_CB)
        im = cv2.split(im_ycbcr)[0]
        feature_vector = hog(image=im, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), visualise=False)
        feature_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
        feature_path = os.path.join(neg_feat_path, feature_name)
        joblib.dump(feature_vector, feature_path)
        progress += 1.0
        update_progress(progress/float(len(glob.glob(os.path.join(neg_img_path, "*")))))
项目:traffic-light-detection    作者:ranveeraggarwal    | 项目源码 | 文件源码
def test_classifier(img_path, roi_path):
    model_path = classifier_model_path
    # Load the classifier
    clf = joblib.load(model_path)

    max_win_y = 171
    max_win_x = 70

    detections = []

    regions = get_regions(roi_path)

    im = imread(img_path)
    im_ycbcr = cv2.cvtColor(im, cv2.COLOR_RGB2YCR_CB)
    im = cv2.split(im_ycbcr)[0]

    for region in regions:
        x = int(float(region[0])*1000)
        y = int(float(region[1])*1000)

        im_window = im[y: y + max_win_y, x: x + max_win_x]

        fd = hog(image=im_window, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), visualise=False)

        if len(fd) == 9234:
            prediction = clf.predict(fd.reshape(1, -1))

            if prediction == 1:
                print "Detection:: Location -> ({}, {})".format(x, y)
                print "Confidence Score {} \n".format(clf.decision_function(fd))
                detections.append((x, y, clf.decision_function(fd)))

    im = imread(img_path)
    im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)

    for (x_tl, y_tl, _) in detections:
        cv2.rectangle(im, (x_tl, y_tl), (x_tl+max_win_x, y_tl+max_win_y), (0, 255, 0), thickness=1)
    cv2.imwrite("result.png", im)
项目:object-detector    作者:penny4860    | 项目源码 | 文件源码
def describe(self, images):
        features = []
        for image in images:
            feature_vector = feature.hog(image, 
                                   orientations=self._orientations, 
                                   pixels_per_cell=self._pixels_per_cell,
                                   cells_per_block=self._cells_per_block, 
                                   transform_sqrt=True)
            features.append(feature_vector)
        features = np.array(features)
        return features
项目:harpreif    作者:harpribot    | 项目源码 | 文件源码
def __init__(self, original_image, initial_gamestate, grid_dim,
                 puzzle_pieces, image_dim, window, stride, num_channels, state_type):
        """

        :param original_image: The true output expected. It is used to give reward
        :param initial_gamestate: The start state for each episode. It is all zeros.
        :param grid_dim: The number of horizontal and vertical splits each, required to form the puzzle pieces
        :param puzzle_pieces: The dictionary of puzzle piece image as value for the puzzle_piece id as key
        :param image_dim: The dimension (row/col) of the original image. The image must be a square image.
        :param window: The window dimension for HOG based state space construction
        :param stride: The stride of the sliding window for HOG
        :param num_channels: The number of channels of the state space (= number of gradients given by HOG)
        :param state_type: 'hog' -> state is windowed HOG filter ,
                           'image' -> state is just the partially solved jigsaw image
        """
        self.state_type = state_type
        self.bins = np.array([x/float(NUM_BINS) for x in range(0, NUM_BINS, 1)])
        self.original_image = original_image
        self.jigsaw_image = np.zeros([image_dim, image_dim])
        self.initial_gamestate = initial_gamestate
        self.gamestate = initial_gamestate
        self.grid_dim = grid_dim
        self.puzzle_pieces = puzzle_pieces
        self.image_dim = image_dim
        self.state_height = self.gamestate.shape[0]
        self.state_width = self.state_height
        self.window = tuple(window)
        self.num_gradients = num_channels
        self.stride = stride
        self.action = None
        self.jigsaw_id_to_placed_location = dict()
        self.placed_location_to_jigsaw_id = dict()
        self.jigsaw_split = np.split(np.array(range(self.image_dim)), self.grid_dim)
        self.steps = 0
        self.terminal = False
        self.reward = 0.
项目:harpreif    作者:harpribot    | 项目源码 | 文件源码
def __render_gamestate(self):
        """
        Renders the new gamestate based on the changed board condition using HOG gradients over sliding window
        :return: None
        """
        slides = sliding_window(self.jigsaw_image, self.stride, self.window)

        hog_gradients = []
        for slide in slides:
            window_image = slide[2]

            gradient = np.array(hog(image=window_image,
                                    orientations=self.num_gradients,
                                    pixels_per_cell=self.window,
                                    cells_per_block=(1, 1), visualise=False))
            assert 0 <= np.max(gradient) <= 1, "Gradients are not normalized"
            assert gradient.size == self.num_gradients, "Gradient size not equal to desired size"
            gradient = gradient_discretizer(gradient, self.bins)
            hog_gradients.extend(gradient)

        hog_gradients = np.array(hog_gradients)

        hog_gradients = hog_gradients.reshape((self.state_height, self.state_width, self.num_gradients))

        assert self.gamestate.shape == hog_gradients.shape, "The state dimension is trying to be altered"
        self.gamestate = hog_gradients
项目:harpreif    作者:harpribot    | 项目源码 | 文件源码
def __init__(self, image_dir, checkpoint_dir, checkpoint_iter, num_actions, num_gradients, state_type):
        """

        :param image_dir: The test directory for images
        :param checkpoint_dir: The checkpoint containing the best learnt model weights and biases
        :param num_actions: Number of actions that the agent can take
        :param num_gradients: Number of gradients to be used for each window
        :param state_type: 'hog' for using windowed HOG gradient as state, 'image' for using raw images itself
        """
        self.state_type = state_type
        self.image_dir = image_dir
        self.bins = np.array([x / float(NUM_BINS) for x in range(0, NUM_BINS, 1)])
        self.sess = None
        self.checkpoint_dir = checkpoint_dir
        self.checkpoint_iter = checkpoint_iter
        self.num_actions = num_actions
        self.num_gradients = num_gradients
        if self.state_type == 'hog':
            self.input_channels = self.num_gradients
        elif self.state_type == 'image':
            self.input_channels = 1
        else:
            raise ValueError('State type not recognized, enter hog or image')

        self.input_height = len(range(0, IMAGE_HEIGHT - SLIDING_STRIDE, SLIDING_STRIDE))
        self.input_width = self.input_height
        self.imagenet = None
        # self.feature_dict = dict()
        self.state_height = self.input_height
        self.state_width = self.state_height
        self.save_transform = True
        self.im2f_loc = None
        self.feature_size = None
        Creator.__init__(self, self.input_channels, self.num_actions, self.input_height, self.input_width)
项目:harpreif    作者:harpribot    | 项目源码 | 文件源码
def __get_input_for_model(self, image):
        """
        Renders the new gamestate based on the changed board condition using HOG gradients over sliding window
        :return: None
        """
        state = None
        if self.state_type == 'hog':
            slides = sliding_window(image, SLIDING_STRIDE, WINDOW_SIZE)

            hog_gradients = []
            for slide in slides:
                window_image = slide[2]

                gradient = np.array(hog(window_image,
                                        orientations=self.num_gradients,
                                        pixels_per_cell=WINDOW_SIZE,
                                        cells_per_block=(1, 1), visualise=False))

                assert gradient.size == self.num_gradients, "Gradient size not equal to desired size"
                gradient = gradient_discretizer(gradient, self.bins)
                hog_gradients.extend(gradient)

            hog_gradients = np.array(hog_gradients)

            hog_gradients = hog_gradients.reshape((self.state_height, self.state_width, self.num_gradients))

            assert hog_gradients.shape == (self.input_height, self.input_width, self.input_channels), \
                "The state dimension is trying to be altered"
            state = hog_gradients

        elif self.state_type == 'image':
            resized_discrete_im = np.digitize(
                imresize(image, (self.state_height, self.state_width)),
                self.bins)
            state = np.array([resized_discrete_im]).transpose().swapaxes(0, 1)

        else:
            ValueError('The state type is not valid, enter "hog" or "image"')

        return state
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def generate_hog_features(image_arr):
    fd = hog(image_arr, orientations=8, pixels_per_cell=(16, 16),
                        cells_per_block=(2, 2), visualise=False)
    return fd
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def generate_hog_features(filename):
    input_image = io.imread(filename)
    gray_image = color.rgb2gray(input_image)
    # 87% for orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1)
    fd, hog_image = hog(gray_image, orientations=8, pixels_per_cell=(4, 4),
                        cells_per_block=(1, 1), visualise=True)
    hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
    return hog_image_rescaled
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def save_hog_image_comparison(filename):
    input_image = io.imread(filename)
    gray_image = color.rgb2gray(input_image)
    out_filename = "hog/" + filename

    # 87% for orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1)
    fd, hog_image = hog(gray_image, orientations=8, pixels_per_cell=(4, 4),
                        cells_per_block=(1, 1), visualise=True)
    # io.imsave("hog/" + filename, hog_image)
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)

    ax1.axis('off')
    ax1.imshow(gray_image, cmap=plt.cm.gray)
    ax1.set_title('Input image')
    ax1.set_adjustable('box-forced')

    # Rescale histogram for better display
    hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
    ax2.axis('off')
    ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
    ax2.set_title('Histogram of Oriented Gradients')
    ax1.set_adjustable('box-forced')
    plt.savefig(out_filename)
    plt.close()

    return hog_image
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def generate_hog_features(image_arr):
    fd = hog(image_arr, orientations=8, pixels_per_cell=(16, 16),
                        cells_per_block=(2, 2), visualise=False)
    # hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
    return fd
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def generate_hog_features(filename):
    input_image = io.imread(filename)
    gray_image = color.rgb2gray(input_image)
    # 87% for orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1)
    fd, hog_image = hog(gray_image, orientations=8, pixels_per_cell=(4, 4),
                        cells_per_block=(1, 1), visualise=True)
    hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
    return hog_image_rescaled
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def save_hog_image_comparison(filename):
    input_image = io.imread(filename)
    gray_image = color.rgb2gray(input_image)
    out_filename = "hog/" + filename

    # 87% for orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1)
    fd, hog_image = hog(gray_image, orientations=8, pixels_per_cell=(4, 4),
                        cells_per_block=(1, 1), visualise=True)
    # io.imsave("hog/" + filename, hog_image)
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)

    ax1.axis('off')
    ax1.imshow(gray_image, cmap=plt.cm.gray)
    ax1.set_title('Input image')
    ax1.set_adjustable('box-forced')

    # Rescale histogram for better display
    hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
    ax2.axis('off')
    ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
    ax2.set_title('Histogram of Oriented Gradients')
    ax1.set_adjustable('box-forced')
    plt.savefig(out_filename)
    plt.close()

    return hog_image
项目:ml-traffic    作者:Zepheus    | 项目源码 | 文件源码
def __init__(self, orientations=5, pixels_per_cell=(8, 8), cells_per_block=(3, 3), resize=96):
        self.transform = PrepCombiner([BWTransform(), ResizeTransform(resize)])
        self.orientations = orientations
        self.pixels_per_cell = pixels_per_cell
        self.cells_per_block = cells_per_block

    # Process the hog feature
项目:ml-traffic    作者:Zepheus    | 项目源码 | 文件源码
def process(self, im):
        greyscaled = im.prep(self.transform)
        fd = hog(greyscaled, orientations=self.orientations, pixels_per_cell=self.pixels_per_cell,
                 cells_per_block=self.cells_per_block, visualise=False)
        return fd
项目:lantern-detection    作者:gongxijun    | 项目源码 | 文件源码
def get_similarIamgeRect(*args):
    """
    ????????.
    :param img:
    :param lefttop_x:
    :param lefttop_y:
    :return:
    """
    kargs = args[0][0]
    img = kargs[0];
    lefttop_x = kargs[1];
    lefttop_y = kargs[2];
    min_wdw_sz = kargs[3];
    downscale = kargs[4]
    scale = kargs[5];

    # Calculate the HOG features
    right_y = lefttop_y + min_wdw_sz[1];
    right_x = lefttop_x + min_wdw_sz[0];
    fd = hog(img[lefttop_y:right_y, lefttop_x:right_x],
             orientations,
             pixels_per_cell,
             cells_per_block,
             visualize,
             normalize)
    prod = clf.predict_proba([fd])[0][1]
    prod = round(prod, 3);
    # print '------------------'
    if prod >= 0.90:  ##???????0.5?????
        #mutex.acquire();
        detections.append((int(lefttop_x * (downscale ** scale)), int(lefttop_y * (downscale ** scale)), prod,
                           int(min_wdw_sz[0] * (downscale ** scale)),
                           int(min_wdw_sz[1] * (downscale ** scale))));
        print  _thread.current_thread(), "Detection:: Location -> ({}, {})".format(lefttop_x, lefttop_y)
        print _thread.current_thread(), "Scale ->  {} | Confidence Score {} \n".format(scale, prod)
        #mutex.release();
项目:lantern-detection    作者:gongxijun    | 项目源码 | 文件源码
def get_similarIamgeRect(*args):
    """
    ????????.
    :param img:
    :param lefttop_x:
    :param lefttop_y:
    :return:
    """
    kargs = args[0]
    img = kargs[0];
    lefttop_x = kargs[1];
    lefttop_y = kargs[2];
    min_wdw_sz = kargs[3];
    downscale = kargs[4]
    scale = kargs[5];

    # Calculate the HOG features
    right_y = lefttop_y + min_wdw_sz[1];
    right_x = lefttop_x + min_wdw_sz[0];
    fd = hog(img[lefttop_y:right_y, lefttop_x:right_x],
             orientations,
             pixels_per_cell,
             cells_per_block,
             visualize,
             normalize)
    prod = clf.predict_proba([fd])[0][1]
    prod = round(prod, 3);
    # print '------------------'
    if prod >= 0.85:  ##???????0.5?????
        # mutex.acquire();
        detections.append((int(lefttop_x * (downscale ** scale)), int(lefttop_y * (downscale ** scale)), prod,
                           int(min_wdw_sz[0] * (downscale ** scale)),
                           int(min_wdw_sz[1] * (downscale ** scale))));
        # print  _thread.current_thread(), "Detection:: Location -> ({}, {})".format(lefttop_x, lefttop_y)
        # print _thread.current_thread(), "Scale ->  {} | Confidence Score {} \n".format(scale, prod)
        # mutex.release();
项目:lantern-detection    作者:gongxijun    | 项目源码 | 文件源码
def get_similarIamgeRect(*args):
    """
    ????????.
    :param img:
    :param lefttop_x:
    :param lefttop_y:
    :return:
    """
    kargs = args[0]
    img = kargs[0];
    lefttop_x = kargs[1];
    lefttop_y = kargs[2];
    min_wdw_sz = kargs[3];
    downscale = kargs[4]
    scale = kargs[5];

    # Calculate the HOG features
    right_y = lefttop_y + min_wdw_sz[1];
    right_x = lefttop_x + min_wdw_sz[0];
    fd = hog(img[lefttop_y:right_y, lefttop_x:right_x],
             orientations,
             pixels_per_cell,
             cells_per_block,
             visualize,
             normalize)
    prod = clf.predict_proba([fd])[0][1]
    prod = round(prod, 3);
    # print '------------------'
    if prod >= 0.85:  ##???????0.5?????
        mutex.acquire();
        detections.append((int(lefttop_x * (downscale ** scale)), int(lefttop_y * (downscale ** scale)), prod,
                           int(min_wdw_sz[0] * (downscale ** scale)),
                           int(min_wdw_sz[1] * (downscale ** scale))));
        print  _thread.current_thread(), "Detection:: Location -> ({}, {})".format(lefttop_x, lefttop_y)
        print _thread.current_thread(), "Scale ->  {} | Confidence Score {} \n".format(scale, prod)
        mutex.release();
项目:BanglaDigit    作者:sezan92    | 项目源码 | 文件源码
def ReadImages(ListName,FolderName,Label):
    global NumberList
    global responseData
    global trainData
    global hog
    global cv2
    global imutils
    global winSize
    global testData
    for image in ListName:
        img = cv2.imread(join(FolderName,image))
        img = cv2.resize(img,(28,28))   
        feature = HOG(cv2.cvtColor(img,cv2.COLOR_RGB2GRAY))
        trainData.append(feature.T)
        responseData.append(Label)
项目:activitynet-essentials    作者:alex-paterson    | 项目源码 | 文件源码
def get_hog(image):
        image = color.rgb2gray(image)
        imgplot = plt.imshow(image, cmap=plt.cm.gray)
        fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
            cells_per_block=(1, 1), visualise=True)
        hog_image_rescaled = exposure.rescale_intensity(hog_image,
            in_range=(0, 0.02))
        return hog_image_rescaled
项目:SBB4-damage-tracker    作者:whorn    | 项目源码 | 文件源码
def testClassifier(foldername,classifier):
    clf = joblib.load(classifier)
    os.chdir(foldername)
    correct = 0
    total = 0
    for filename in glob.iglob('*.png'):
        img = cv2.imread(filename,-1)
        roi = hog(img.reshape((27, 35)), orientations=9, pixels_per_cell=(9, 7), cells_per_block=(1, 1), visualise=False)
        preditcion = clf.predict(roi)
        if preditcion == filename[0]:
            correct += 1
        total += 1
    print(total)
    print(correct)
项目:SBB4-damage-tracker    作者:whorn    | 项目源码 | 文件源码
def imageToDamageArray(image_array):
    dmg = [[],[]]
    lastdamage = [0,0]
    for player in range(2):
        for time in range(len(image_array[1])):
            three_cell1 = CLF.predict([hog(image_array[player][time][4:39,2:29].reshape((27, 35)),orientations=9, pixels_per_cell=(9, 7), cells_per_block=(1, 1), visualise=False)])[0]
            three_cell2 = CLF.predict([hog(image_array[player][time][4:39,23:50].reshape((27, 35)),orientations=9, pixels_per_cell=(9, 7), cells_per_block=(1, 1), visualise=False)])[0]
            three_cell3 = CLF.predict([hog(image_array[player][time][4:39,48:75].reshape((27, 35)),orientations=9, pixels_per_cell=(9, 7), cells_per_block=(1, 1), visualise=False)])[0]
            two_cell1 = CLF.predict([hog(image_array[player][time][4:39,13:40].reshape((27, 35)),orientations=9, pixels_per_cell=(9, 7), cells_per_block=(1, 1), visualise=False)])[0]
            two_cell2 =  CLF.predict([hog(image_array[player][time][4:39,37:64].reshape((27, 35)),orientations=9, pixels_per_cell=(9, 7), cells_per_block=(1, 1), visualise=False)])[0]
            if three_cell1 != "-" and three_cell2 != "-" and three_cell3 != "-":
                if int(three_cell1)*100+int(three_cell2)*10-lastdamage[player]>50:
                    #print(three_cell1,three_cell2,three_cell3)
                    #print(two_cell1,two_cell2)
                    if two_cell1 != "-" and two_cell2 != "-" and two_cell1 != 0:
                        dmg[player].append(10*int(two_cell1)+int(two_cell2))
                        lastdamage[player]=dmg[player][-1]
                    else:
                        dmg[player].append("-")
                else:
                    dmg[player].append(100*int(three_cell1)+10*int(three_cell2)+int(three_cell3))
                    lastdamage[player]=dmg[player][-1]
            elif two_cell1 != "-" and two_cell2 != "-" and two_cell1!= 0:
                dmg[player].append(10*int(two_cell1)+int(two_cell2))
                lastdamage[player]=dmg[player][-1]
            elif three_cell1 == "-" and three_cell2 != "-" and three_cell3 == "-":
                dmg[player].append(int(three_cell2))
            else:
                dmg[player].append("-")
    return dmg
项目:SVM-classification-localization    作者:HandsomeHans    | 项目源码 | 文件源码
def getFeat(data):
    normalize = True
    visualize = False
    block_norm = 'L2-Hys'
    cells_per_block = [2,2]
    pixels_per_cell = [20,20]
    orientations = 9
    gray = rgb2gray(data)/255.0
    fd = hog(gray, orientations, pixels_per_cell, cells_per_block, block_norm, visualize, normalize)
    return fd
项目:SVM-classification-localization    作者:HandsomeHans    | 项目源码 | 文件源码
def getFeat(data):
    normalize = True
    visualize = False
    block_norm = 'L2-Hys'
    cells_per_block = [2,2]
    pixels_per_cell = [20,20]
    orientations = 9
    gray = rgb2gray(data)/255.0
    fd = hog(gray, orientations, pixels_per_cell, cells_per_block, block_norm, visualize, normalize)
    return fd
项目:SVM-classification-localization    作者:HandsomeHans    | 项目源码 | 文件源码
def getFeat(data):
    gray = rgb2gray(data)/255.0
    fd = hog(gray, orientations, pixels_per_cell, cells_per_block, block_norm, visualize, normalize)
    return fd
项目:csdm    作者:moliusimon    | 项目源码 | 文件源码
def _extract(self, images, coords, mapping, args):
        assert images.shape[1] == images.shape[2]
        n_inst = coords.shape[0]

        nb = args.get('num_bins', 8)
        win_sizes = args.get('window_sizes', 32)
        win_sizes = win_sizes if isinstance(win_sizes, np.ndarray) else np.ones((n_inst,), dtype=np.int32) * win_sizes

        # Prepare descriptors
        descriptors = np.zeros(tuple(coords.shape[:2])+(nb*4*4,), dtype=np.float32)

        # Fill descriptors
        coords, vis = np.copy(coords), np.zeros(coords.shape[:2], dtype=np.bool)
        for i, (c, mp, ws) in enumerate(zip(coords, mapping, win_sizes)):
            hsize, qsize = ws/2, ws/4

            # Pad image, set landmarks visibility
            im, c = np.pad(images[mp, ...], ((hsize, hsize), (hsize, hsize)), 'constant', constant_values=0), c+hsize
            ims = im.shape[0] - hsize
            vis[i, :] = (c[:, 0] >= hsize) & (c[:, 1] >= hsize) & (c[:, 0] < ims) & (c[:, 1] < ims)

            # Extract descriptors from each interest window
            for j, (jc, jv) in enumerate(zip(c, vis[i, :])):
                descriptors[i, j, :] = hog(
                    im[jc[0]-hsize:jc[0]+hsize, jc[1]-hsize:jc[1]+hsize],
                    orientations=nb,
                    pixels_per_cell=(qsize, qsize),
                    cells_per_block=(1, 1)
                ) if jv else 0

        # Normalize descriptors, return extracted information
        return descriptors.reshape((len(mapping), -1)), vis
项目:csdm    作者:moliusimon    | 项目源码 | 文件源码
def _extract(self, images, coords, mapping, args):
        assert images.shape[1] == images.shape[2]
        n_inst = coords.shape[0]

        nb = args.get('num_bins', 8)
        rotations = args.get('rotations', np.zeros((n_inst,), dtype=np.float32))
        win_sizes = args.get('window_sizes', 32)
        win_sizes = win_sizes if isinstance(win_sizes, np.ndarray) else np.ones((n_inst,), dtype=np.int32) * win_sizes

        # Prepare descriptors
        descriptors = np.zeros(tuple(coords.shape[:2])+(nb*4*4,), dtype=np.float32)

        # Fill descriptors
        coords, vis = np.copy(coords) - images.shape[1] / 2.0, np.empty(coords.shape[:2], dtype=np.bool)
        for i, (c, r, mp, ws) in enumerate(zip(coords, rotations, mapping, win_sizes)):
            hsize, qsize = ws/2, ws/4

            # Get maximum window half-size, rotate and pad image
            im = np.pad(
                rotate(images[mp, ...], 57.2957*r),
                ((hsize, hsize), (hsize, hsize)),
                'constant', constant_values=0
            )

            # Rotate geometry, set landmarks visibility
            ims = im.shape[0] - hsize
            c = np.dot(c, np.array([[np.cos(r), np.sin(r)], [-np.sin(r),  np.cos(r)]])) + im.shape[0] / 2.0
            vis[i, :] = (c[:, 0] >= hsize) & (c[:, 1] >= hsize) & (c[:, 0] < ims) & (c[:, 1] < ims)

            # Extract descriptors from each interest window
            for j, (jc, jv) in enumerate(zip(c, vis[i, :])):
                descriptors[i, j, :] = hog(
                    im[jc[0]-hsize:jc[0]+hsize, jc[1]-hsize:jc[1]+hsize],
                    orientations=nb,
                    pixels_per_cell=(qsize, qsize),
                    cells_per_block=(1, 1)
                ) if jv else 0

        # Normalize descriptors, return extracted information
        return descriptors.reshape((len(mapping), -1)), vis
项目:CS231A_Project    作者:afazel    | 项目源码 | 文件源码
def extract_pos_hog_features2(path, num_samples):
    features = []
    cnt = 0
    for dirpath, dirnames, filenames in walk(path):
        for my_file in filenames:
            if cnt < num_samples:
                cnt = cnt + 1
                im = cv2.imread(path + my_file)
                image = color.rgb2gray(im)
                my_feature, _ = hog(image, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(2, 2), visualise=True)
                features.append(my_feature)
    return features
项目:CS231A_Project    作者:afazel    | 项目源码 | 文件源码
def neg_hog_rand(path, num_samples, window_size, num_window_per_image):
    rows = window_size[0]
    cols = window_size[1]
    features = []
    cnt = 0
    for dirpath, dirnames, filenames in walk(path):
        for my_file in filenames:

            if cnt < num_samples:
                print cnt,my_file
                cnt = cnt + 1
                im = cv2.imread(path + my_file)
                image = color.rgb2gray(im)
                image_rows = image.shape[0]
                image_cols = image.shape[1]

                for i in range(0,num_window_per_image):
                    x_min = random.randrange(0,image_rows - rows)
                    y_min = random.randrange(0,image_cols - cols)

                    x_max = x_min + rows
                    y_max = y_min + cols

                    image_hog = image[x_min:x_max , y_min:y_max]

                    my_feature, _ = hog(image_hog, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(2, 2), visualise=True)
                    features.append(my_feature)
    return features
项目:CS231A_Project    作者:afazel    | 项目源码 | 文件源码
def detector(my_im, weight,bias, scale):
    window_size = [128, 64]
    block_size = 4
    cell_size = 8
    min_height = 128
    min_width = 64
    orient = 9
    thresh = 0

    total_block_size = block_size * cell_size; 

    curr_depth = 0

    for im in ip.createImagePyramid(my_im, scale, min_height, min_width):
        curr_depth +=1
        H = im.shape[0]
        W = im.shape[1]
        dim_size_feat = weight.shape[1];
        for h in xrange(0,H,total_block_size / 2):
            for w in xrange(0,W,total_block_size / 2):
                if ((window_size[1] + w <= W) and (window_size[0]+h) <= H):
                    fd, _ = hog(im[h:(window_size[0]+h), w:(window_size[1]+w)], orientations=orient, pixels_per_cell=(cell_size, cell_size),
                    cells_per_block=(block_size, block_size), visualise=True)

                    score_calc =  np.dot(np.reshape(fd, (1, dim_size_feat)) , np.transpose(weight)) + bias
                    if(score_calc[0][0] >= thresh):
                        print score_calc[0][0]
                        cv2.imshow("Detected Pedestrian", my_im)
                        cv2.waitKey(25)
                        return score_calc[0][0]

    return False