Python scipy.io 模块,loadmat() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.io.loadmat()

项目:pybot    作者:spillai    | 项目源码 | 文件源码
def _process_label(self, fn): 
        """
        TODO: Fix one-indexing to zero-index; 
        retained one-index due to uint8 constraint
        """
        mat = loadmat(fn, squeeze_me=True)
        _labels = mat['seglabel'].astype(np.uint8)
        # _labels -= 1 # (move to zero-index)

        labels = np.zeros_like(_labels)
        for (idx, name) in enumerate(mat['names']): 
            try: 
                value = SUNRGBDDataset.target_hash[name]
            except: 
                value = 0
            mask = _labels == idx+1
            labels[mask] = value
        return self._pad_image(labels)
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def load_shapes_examples(self, path_train, path_test):
            '''Load the data given with IOA code.
            The given data is two sets: training and test sets. the x is grey images of a shape (disc with a whole in the middle)
            over a background. The y is the segmentation (1 for the disc, 0 for the background).
            '''
            # Train
            train_set = sio.loadmat(path_train)
            train_set_x = train_set['x_train']
            train_set_y = train_set['y_train']

            # Test
            test_set = sio.loadmat(path_test)
            test_set_x = test_set['x_test']
            test_set_y = test_set['y_test']

            return [(train_set_x, train_set_y), (test_set_x, test_set_y)]
项目:faster-rcnn-resnet    作者:Eniac-Xie    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:Learning-to-navigate-without-a-map    作者:ToniRV    | 项目源码 | 文件源码
def load_mat_data(file_path):
    """Load Matlab data and return all data objects.

    Parameters
    ----------
    file_path : str
        the file path of the dataset

    Returns
    -------
    mat_data : dict
        The dictionary with all the data.
    """
    if not os.path.isfile(file_path):
        raise ValueError("The file is not existed %s" % (file_path))

    return sio.loadmat(file_path)
项目:py-faster-rcnn-tk1    作者:joeking11829    | 项目源码 | 文件源码
def from_mats(imdb_name, output_dir):
    import scipy.io as sio

    imdb = get_imdb(imdb_name)

    aps = []
    for i, cls in enumerate(imdb.classes[1:]):
        mat = sio.loadmat(os.path.join(output_dir, cls + '_pr.mat'))
        ap = mat['ap'][0, 0] * 100
        apAuC = mat['ap_auc'][0, 0] * 100
        print '!!! {} : {:.1f} {:.1f}'.format(cls, ap, apAuC)
        aps.append(ap)

    print '~~~~~~~~~~~~~~~~~~~'
    print 'Results (from mat files):'
    for ap in aps:
        print '{:.1f}'.format(ap)
    print '{:.1f}'.format(np.array(aps).mean())
    print '~~~~~~~~~~~~~~~~~~~'
项目:cortex    作者:rdevon    | 项目源码 | 文件源码
def get_data(self, source):
        '''Fetch the data from source.

        Genetic data is in the matrix format with size Subjec*SNP
        SNP can be either preprocessed or notprocessed
        Labels is a vector with diagnosis info
        Patients are coded with 1 and health control coded with 2

        Args:
            source (dict): file names of genetic data and labels
                {'snp' key for genetic data
                'labels' key for diagnosis }

        '''
        from utils.tools import get_paths
        data_path = get_paths()['$snp_data']
        print('Loading genetic data from %s' % data_path)
        X = loadmat(data_path + '/' + source['snp'])
        Y = loadmat(data_path + '/' + source['label'])
        X = np.float32(X[X.keys()[2]])
        Y = np.float32(Y[Y.keys()[0]])
        Y.resize(max(Y.shape,))
        return X, Y
项目:cortex    作者:rdevon    | 项目源码 | 文件源码
def get_data(self, source, mode):
        data_dict = io.loadmat(source)

        if mode == 'train':
            X = data_dict['train_data']
            Y = data_dict['train_labels']
        elif mode == 'valid':
            X = data_dict['val_data']
            Y = data_dict['val_labels']
        elif mode == 'test':
            X = data_dict['test_data']
            Y = data_dict['test_labels']
        else:
            raise ValueError()

        return X.astype(floatX), Y
项目:srep    作者:Answeror    | 项目源码 | 文件源码
def _get_data(path, preprocess):
    data = sio.loadmat(path)['gestures']
    data = [np.transpose(np.delete(segment.astype(np.float32), np.s_[7:192:8], 0))
            for segment in data.flat]
    if preprocess:
        data = list(Context.parallel(jb.delayed(preprocess)(segment, **PREPROCESS_KARGS)
                                     for segment in data))
    return data


#  @cached
#  def _get_data(path, bandstop, cut, downsample):
    #  data = sio.loadmat(path)['gestures']
    #  data = [np.transpose(np.delete(segment.astype(np.float32), np.s_[7:192:8], 0))
            #  for segment in data.flat]
    #  if bandstop:
        #  data = list(Context.parallel(jb.delayed(get_bandstop)(segment) for segment in data))
    #  if cut is not None:
        #  data = list(Context.parallel(jb.delayed(cut)(segment, framerate=FRAMERATE) for segment in data))
    #  if downsample > 1:
        #  data = [segment[::downsample].copy() for segment in data]
    #  return data
项目:py-faster-rcnn-resnet-imagenet    作者:tianzhi0549    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:py-faster-rcnn-resnet-imagenet    作者:tianzhi0549    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:brainpipe    作者:EtienneCmb    | 项目源码 | 文件源码
def loadfile(name):
    """Load a file without carrying of extension. The function return
    a dictionnary data.
    """
    fileName, fileExt = splitext(name)
    # Pickle :
    if fileExt == '.pickle':
        with open(name, "rb") as f:
            data = pickle.load(f)
    # Matlab :
    elif fileExt == '.mat':
        data = loadmat(name)
    # Numpy (single array)
    elif fileExt == '.npy':
        data = np.load(name)
    return data
项目:faster_rcnn_pytorch    作者:longcw    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(self._data_path,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:faster_rcnn_pytorch    作者:longcw    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
            'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:faster_rcnn_pytorch    作者:longcw    | 项目源码 | 文件源码
def _load_selective_search_IJCV_roidb(self, gt_roidb):
        IJCV_path = os.path.abspath(os.path.join(self.cache_path, '..',
                                                 'selective_search_IJCV_data',
                                                 'voc_' + self._year))
        assert os.path.exists(IJCV_path), \
               'Selective search IJCV data not found at: {}'.format(IJCV_path)

        top_k = self.config['top_k']
        box_list = []
        for i in xrange(self.num_images):
            filename = os.path.join(IJCV_path, self.image_index[i] + '.mat')
            raw_data = sio.loadmat(filename)
            box_list.append((raw_data['boxes'][:top_k, :]-1).astype(np.uint16))

        return self.create_roidb_from_box_list(box_list, gt_roidb)

    # evaluate detection results
项目:SSPP-DAN    作者:csehong    | 项目源码 | 文件源码
def __init__(self, matpath):
        try:
            self.matnet = sio.loadmat(matpath)
        except IOError as e:
            print('IO Error: {0}'.format(e))
        except Exception as e:
            print("Unexpected error: {0}".format(e))

        try:
            self.matlayers = self.matnet['layers'][0]
        except KeyError as e:
            print('Key Error in mat file: {0}'.format(e))
        except Exception as e:
            print("Unexpected error: {0}".format(e))

        if 'meta' in self.matnet.keys():
            self.matmeta = self.matnet['meta']
        self.layers = []
        self.meta = {}
        self.loadnet()
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def get_indian_pines_features_from_indian_pines_model():
    for i in range(10):
        class data: pass

        data.data_dir = os.path.expanduser('../hyperspectral_datas/indian_pines/data/')
        data.data_5x5_mean_std = sio.loadmat(data.data_dir + '/indian_pines_5x5_mean_std.mat')['data']
        data.labels_5x5_mean_std = sio.loadmat(data.data_dir + '/indian_pines_5x5_mean_std.mat')['labels']
        data.result_dir = '../result/indian_pines/bn_net_200/feature'
        mkdir_if_not_exist(data.result_dir)
        data.result_file = data.result_dir + '/ip_feature_ip_model_{}.mat'.format(i)
        data.iters = 2000000

        pretrained_model = data.result_dir + '/../model/5x5_mean_std_models_time_{}_iter_{}.caffemodel.h5'.format(i,
                                                                                                                  data.iters)
        deploy_file = data.result_dir + '/../proto/indian_pines_5x5_mean_std_deploy.prototxt'

        getFeature = feature.GetFeatureFromCaffe(deploy_file=deploy_file, pretrained_model=pretrained_model)
        getFeature.set_data(data.data_5x5_mean_std, data.labels_5x5_mean_std)
        getFeature.get_ip1()

        data.result_dict = {'data': getFeature.ip1_data, 'labels': getFeature.label}
        sio.savemat(data.result_file, data.result_dict)
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def get_salina_features_from_salina_model():
    for i in range(10):
        class data: pass

        data.data_dir = os.path.expanduser('~/hyperspectral_datas/salina/data/')
        data.data_5x5_mean_std = sio.loadmat(data.data_dir + '/salina_5x5_mean_std.mat')['data']
        data.labels_5x5_mean_std = sio.loadmat(data.data_dir + '/salina_5x5_mean_std.mat')['labels']
        data.result_dir = '../result/salina/bn_net_200/feature'
        mkdir_if_not_exist(data.result_dir)
        data.result_file = data.result_dir + '/salina_feature_salina_5x5_mean_std_model_{}.mat'.format(i)
        data.iters = 2000000

        pretrained_model = data.result_dir + '/../model/5x5_mean_std_models_time_{}_iter_{}.caffemodel.h5'.format(i,
                                                                                                                  data.iters)
        deploy_file = data.result_dir + '/../proto/salina_5x5_mean_std_deploy.prototxt'

        getFeature = feature.GetFeatureFromCaffe(deploy_file=deploy_file, pretrained_model=pretrained_model)
        getFeature.set_data(data.data_5x5_mean_std, data.labels_5x5_mean_std)
        getFeature.get_ip1()

        data.result_dict = {'data': getFeature.ip1_data, 'labels': getFeature.label}
        sio.savemat(data.result_file, data.result_dict)
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def get_indian_pines_features_from_salina_model():
    for i in range(10):
        class data: pass

        data.data_dir = os.path.expanduser('../hyperspectral_datas/indian_pines/data/')
        data.data_5x5_mean_std = sio.loadmat(data.data_dir + '/indian_pines_5x5_mean_std.mat')['data']
        data.labels_5x5_mean_std = sio.loadmat(data.data_dir + '/indian_pines_5x5_mean_std.mat')['labels']
        data.result_dir = '../result/salina/bn_net_200/feature'
        mkdir_if_not_exist(data.result_dir)
        data.result_file = data.result_dir + '/ip_feature_salina_model_{}.mat'.format(i)
        data.iters = 2000000

        pretrained_model = data.result_dir + '/../model/5x5_mean_std_models_time_{}_iter_{}.caffemodel.h5'.format(i,
                                                                                                                  data.iters)
        deploy_file = data.result_dir + '/../proto/salina_5x5_mean_std_deploy.prototxt'

        getFeature = feature.GetFeatureFromCaffe(deploy_file=deploy_file, pretrained_model=pretrained_model)
        getFeature.set_data(data.data_5x5_mean_std, data.labels_5x5_mean_std)
        getFeature.get_ip1()

        data.result_dict = {'data': getFeature.ip1_data, 'labels': getFeature.label}
        sio.savemat(data.result_file, data.result_dict)
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def get_salina_features_from_indian_pines_model():
    for i in range(10):
        class data: pass

        data.data_dir = os.path.expanduser('../hyperspectral_datas/salina/data/')
        data.data_5x5_mean_std = sio.loadmat(data.data_dir + '/salina_5x5_mean_std.mat')['data']
        data.labels_5x5_mean_std = sio.loadmat(data.data_dir + '/salina_5x5_mean_std.mat')['labels']
        data.result_dir = '../result/indian_pines/bn_net_200/feature'
        mkdir_if_not_exist(data.result_dir)
        data.result_file = data.result_dir + '/salina_feature_ip_model_{}.mat'.format(i)
        data.iters = 2000000

        pretrained_model = data.result_dir + '/../model/5x5_mean_std_models_time_{}_iter_{}.caffemodel.h5'.format(i,
                                                                                                                  data.iters)
        deploy_file = data.result_dir + '/../proto/indian_pines_5x5_mean_std_deploy.prototxt'

        getFeature = feature.GetFeatureFromCaffe(deploy_file=deploy_file, pretrained_model=pretrained_model)
        getFeature.set_data(data.data_5x5_mean_std, data.labels_5x5_mean_std)
        getFeature.get_ip1()

        data.result_dict = {'data': getFeature.ip1_data, 'labels': getFeature.label}
        sio.savemat(data.result_file, data.result_dict)
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def get_train_test_data(label_unique, dataset_name = 'indian_pines', spatial_info='5x5_mean_std', train_nums=200, data_set_dir=''):
    assert dataset_name in ['indian_pines', 'salina']
    assert spatial_info in ['1x1_mean', '3x3_mean', '3x3_mean_std', '5x5_mean', '5x5_mean_std']

    class data_set_info:pass
    data_set_info.data = sio.loadmat(data_set_dir + '/' + dataset_name + '_' + spatial_info + '.mat')['data']
    data_set_info.labels = sio.loadmat(data_set_dir + '/' + dataset_name + '_' + spatial_info + '.mat')['labels']
    data_set_info.h5train = data_set_dir + '/' + dataset_name + '_train_' + spatial_info + '.h5'
    data_set_info.h5test = data_set_dir + '/' + dataset_name + '_test_' + spatial_info + '.h5'
    (train_label, train_index, train_data), (test_label, test_index, test_data) = train_test_split.train_test_split(
        data_set_info.data, data_set_info.labels,
        label_unique=label_unique,
        train=train_nums)

    put_data_to_h5file({'data': train_data, 'labels': train_label, 'index': train_index}, data_set_info.h5train)
    put_data_to_h5file({'data': test_data, 'labels': test_label, 'index': test_index}, data_set_info.h5test)
    return len(test_label), max(label_unique)+1
项目:chainer-faster-rcnn    作者:mitmul    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
            'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:face-py-faster-rcnn    作者:playerkk    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:SimGAN_pytorch    作者:AlexHex7    | 项目源码 | 文件源码
def butchered_mp_normalized_matlab_helper(mat_file_path):
    """
    Normalized data is provided in matlab files in MPIIGaze Dataset and these are tricky to load with Python.
    This function was made with guessing and checking. Very frustrating.

    :param mat_file_path: Full path to MPIIGaze Dataset matlab file.
    :return: np array of images.
    """
    x = sio.loadmat(mat_file_path)
    y = x.get('data')
    z = y[0, 0]

    left_imgs = z['left']['image'][0, 0]
    right_imgs = z['right']['image'][0, 0]

    for img in np.concatenate((left_imgs, right_imgs)):
        Image.fromarray(img).resize((55, 35), resample=Image.ANTIALIAS).save(
            os.path.join(save_dir, '{}.png'.format(uuid.uuid4())))

    return
项目:Automatic_Group_Photography_Enhancement    作者:Yuliang-Zou    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def load_iros15(folder=IROS15_BASE_FOLDER, resolution=15, legs='all', part_proportions=(.7, .2), one_hot=True,
                shuffle=True):
    resolutions = (5, 11, 15)
    legs_names = ('LF', 'LH', 'RF', 'RH')
    assert resolution in resolutions
    folder += str(resolution)
    if legs == 'all': legs = legs_names
    base_name_by_leg = lambda leg: os.path.join(folder, 'trainingSet%sx%sFromSensor%s.mat'
                                                % (resolution, resolution, leg))

    datasets = {}
    for _leg in legs:
        dat = scio.loadmat(base_name_by_leg(_leg))
        data, target = dat['X'], to_one_hot_enc(dat['Y']) if one_hot else dat['Y']
        # maybe pre-processing??? or it is already done? ask...
        datasets[_leg] = Datasets.from_list(
            redivide_data([Dataset(data, target, info={'leg': _leg})],
                          partition_proportions=part_proportions, shuffle=shuffle))
    return datasets
项目:deep-fashion    作者:zuowang    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:RPN    作者:hfut721    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:Faster-RCNN_TF    作者:smallcorgi    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def svhn(datasets_dir=_get_datafolder_path()+"/svhn/", normalized=True, centered=False):
    zz = loadmat(datasets_dir+"train_32x32.mat")
    x_train = zz['X'].astype(np.float32)
    y_train = zz['y']
    x_train = x_train.transpose((2, 0, 1, 3)).reshape((3072, -1)).T
    zz = loadmat(datasets_dir+"test_32x32.mat")
    x_test = zz['X'].astype(np.float32)
    y_test = zz['y']
    x_test = x_test.transpose((2, 0, 1, 3)).reshape((3072, -1)).T
    if normalized:
        x_train = x_train / 256.0
        x_test = x_test / 256.0
    if centered:
        ave = x_train.sum(axis=0, keepdims=True)
        x_train = x_train - ave
        x_test = x_test - ave
    return x_train, y_train, x_test, y_test
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def oivetti(datasets_dir=_get_datafolder_path()+"/oivetti/", normalize=True):
    '''
    url: http://www.cs.nyu.edu/~roweis/data.html
    Olivetti Faces [data/olivettifaces.mat] [picture]
    Grayscale faces 8 bit [0-255], a few images of several different people.
    400 total images, 64x64 size.
    From the Oivetti database at ATT.
    '''
    fname="olivettifaces.mat"
    x_train = loadmat(datasets_dir+fname)['faces'].astype(np.float32)
    x_train = x_train.T
    x_train = x_train.reshape((400,64,64))
    x_train = np.transpose(x_train, (0,2,1)).reshape((400,-1))
    if normalize:
        x_train = x_train/256.0
    return x_train
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def omniglot_original(datasets_dir=_get_datafolder_path()+"/omniglot_original/"):
    # get omniglot dataset
    def combine(images):
        i_re = []
        for i in xrange(images.shape[0]):
            j_m = images[i][0].shape[0]
            for j in xrange(j_m):
                k_m = images[i][0][j][0].shape[0]
                for k in xrange(k_m):
                    i_re.append(images[i][0][j][0][k][0])
        return np.asarray(i_re)
    fname_train="data_background.mat"
    x_train = loadmat(datasets_dir+fname_train)['images']
    x_train = combine(x_train)
    fname_test="data_evaluation.mat"
    x_test = loadmat(datasets_dir+fname_test)['images']
    x_test = combine(x_test)
    return x_train, x_test
项目:FPN    作者:xmyqsh    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:TFFRCNN    作者:InterVideo    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(self._data_path,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:TFFRCNN    作者:InterVideo    | 项目源码 | 文件源码
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb)
项目:TFFRCNN    作者:InterVideo    | 项目源码 | 文件源码
def _load_selective_search_IJCV_roidb(self, gt_roidb):
        IJCV_path = os.path.abspath(os.path.join(self.cache_path, '..',
                                                 'selective_search_IJCV_data',
                                                 'voc_' + self._year))
        assert os.path.exists(IJCV_path), \
               'Selective search IJCV data not found at: {}'.format(IJCV_path)

        top_k = self.config['top_k']
        box_list = []
        for i in xrange(self.num_images):
            filename = os.path.join(IJCV_path, self.image_index[i] + '.mat')
            raw_data = sio.loadmat(filename)
            box_list.append((raw_data['boxes'][:top_k, :]-1).astype(np.uint16))

        return self.create_roidb_from_box_list(box_list, gt_roidb)

    # evaluate detection results
项目:vae_renyi_divergence    作者:YingzhenLi    | 项目源码 | 文件源码
def load_data_silhouettes(path, ratio = 0.9, seed = 0):
    import scipy.io
    imgs_filename = path + 'silhouettes/' \
        + 'caltech101_silhouettes_28_split1.mat'
    with open(imgs_filename, 'rb') as f:
        images = scipy.io.loadmat(imgs_filename)

        images_train = images['train_data'].astype('float32')
        images_test = images['test_data'].astype('float32')
        images_val = images['val_data'].astype('float32')
        #n_validation = images_val.shape[0]
        #images_train = np.vstack((images_train, images_val))

    # flip digits?
    images_train = 1.0 - images_train
    images_test = 1.0 - images_test

    return images_train, images_test#, n_validation
项目:tsnet    作者:coxlab    | 项目源码 | 文件源码
def save(self, fn):

        if not fn: return

        if os.path.isfile(fn): Ws = loadmat(fn)['Ws']
        else                 : Ws = np.zeros(0, dtype=np.object)

        for L in self.layers:

            #if not hasattr(L, 'W'): continue
            if L.__class__.__name__ != 'CONV': continue

            Ws     = np.append(Ws, np.zeros(1, dtype=np.object))
            Ws[-1] = L.W

        savemat(fn, {'Ws':Ws}, appendmat=False)
项目:3D-IWGAN    作者:EdwardSmith1884    | 项目源码 | 文件源码
def read_tensor(filename, varname='voxels'):
    """ return a 4D matrix, with dimensions point, x, y, z """
    assert(filename[-4:] == '.mat')
    mats = loadmat(filename)
    if varname not in mats:
        print ".mat file only has these matrices:",
        for var in mats: 
            print var,
        assert(False)

    voxels = mats[varname]
    dims = voxels.shape
    if len(dims) == 5:
        assert dims[1] == 1
        dims = (dims[0],) + tuple(dims[2:])
    elif len(dims) == 3:
        dims = (1) + dims
    else:
        assert len(dims) == 4    
    result = np.reshape(voxels, dims)
    return result
项目:3D-IWGAN    作者:EdwardSmith1884    | 项目源码 | 文件源码
def read_tensor(filename, varname='voxels'):
    """ return a 4D matrix, with dimensions point, x, y, z """
    assert(filename[-4:] == '.mat')
    mats = loadmat(filename)
    if varname not in mats:
        print ".mat file only has these matrices:",
        for var in mats: 
            print var,
        assert(False)

    voxels = mats[varname]
    dims = voxels.shape
    if len(dims) == 5:
        assert dims[1] == 1
        dims = (dims[0],) + tuple(dims[2:])
    elif len(dims) == 3:
        dims = (1) + dims
    else:
        assert len(dims) == 4    
    result = np.reshape(voxels, dims)
    return result
项目:3D-IWGAN    作者:EdwardSmith1884    | 项目源码 | 文件源码
def read_tensor(filename, varname='voxels'):
    """ return a 4D matrix, with dimensions point, x, y, z """
    assert(filename[-4:] == '.mat')
    mats = loadmat(filename)
    if varname not in mats:
        print ".mat file only has these matrices:",
        for var in mats: 
            print var,
        assert(False)

    voxels = mats[varname]
    dims = voxels.shape
    if len(dims) == 5:
        assert dims[1] == 1
        dims = (dims[0],) + tuple(dims[2:])
    elif len(dims) == 3:
        dims = (1) + dims
    else:
        assert len(dims) == 4    
    result = np.reshape(voxels, dims)
    return result
项目:main    作者:rmkemker    | 项目源码 | 文件源码
def read(fName):
    """File reader

    Parameters
    ----------
    fname : string, file path to read in (supports .npy, .sav, .pkl, and 
    .tif (GEOTIFF)) including file extension

    Returns
    -------
    output : output data (in whatever format)
    """    
    ext = fName[-3:]

    if ext == 'npy':
        return np.load(fName)        
    elif ext == 'sav' or ext == 'pkl':
        return pickle.load(open(fName, 'rb'))
    elif ext == 'mat':
        return loadmat(fName)
    elif ext == 'tif':
        return Open(fName).ReadAsArray()
    else:
        print('Unknown filename extension')
        return -1
项目:TattDL    作者:z-harry-sun    | 项目源码 | 文件源码
def from_mats(imdb_name, output_dir):
    import scipy.io as sio

    imdb = get_imdb(imdb_name)

    aps = []
    for i, cls in enumerate(imdb.classes[1:]):
        mat = sio.loadmat(os.path.join(output_dir, cls + '_pr.mat'))
        ap = mat['ap'][0, 0] * 100
        apAuC = mat['ap_auc'][0, 0] * 100
        print '!!! {} : {:.1f} {:.1f}'.format(cls, ap, apAuC)
        aps.append(ap)

    print '~~~~~~~~~~~~~~~~~~~'
    print 'Results (from mat files):'
    for ap in aps:
        print '{:.1f}'.format(ap)
    print '{:.1f}'.format(np.array(aps).mean())
    print '~~~~~~~~~~~~~~~~~~~'
项目:confusion    作者:abhimanyudubey    | 项目源码 | 文件源码
def test_data():
  a = io.loadmat('cars_test_annos_withlabels.mat')
  b = a['annotations'][0]
  with open('cars_test.txt', 'w') as fout:
    for t in b:
      outstr = './data/cars_test/%s %d' % (t[5][0],t[4][0][0])
      fout.write(outstr+'\n')
      print outstr
项目:confusion    作者:abhimanyudubey    | 项目源码 | 文件源码
def train_data():
  a = io.loadmat('devkit/cars_train_annos.mat')
  b = a['annotations'][0]
  with open('cars_train.txt', 'w') as fout:
    for t in b:
      outstr = './data/cars_train/%s %d' % (t[5][0],t[4][0][0])
      fout.write(outstr+'\n')
      print outstr
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def setup(self, pre_encode=False):
        sbd_path = get_data_path('sbd')
        voc_path = get_data_path('pascal')

        target_path = self.root + '/SegmentationClass/pre_encoded/'
        if not os.path.exists(target_path):
            os.makedirs(target_path)

        sbd_train_list = tuple(open(sbd_path + 'dataset/train.txt', 'r'))
        sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]

        self.files['train_aug'] = self.files['train'] + sbd_train_list

        if pre_encode:
            print("Pre-encoding segmentation masks...")
            for i in tqdm(sbd_train_list):
                lbl_path = sbd_path + 'dataset/cls/' + i + '.mat'
                lbl = io.loadmat(lbl_path)['GTcls'][0]['Segmentation'][0].astype(np.int32)
                lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())
                m.imsave(target_path + i + '.png', lbl)

            for i in tqdm(self.files['trainval']):
                lbl_path = self.root + '/SegmentationClass/' + i + '.png'
                lbl = self.encode_segmap(m.imread(lbl_path))
                lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())
                m.imsave(target_path + i + '.png', lbl)
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def main():
    fish = loadmat('./data/fish.mat')
    X = np.zeros((fish['X'].shape[0], fish['X'].shape[1] + 1))
    X[:,:-1] = fish['X']

    Y = np.zeros((fish['Y'].shape[0], fish['Y'].shape[1] + 1))
    Y[:,:-1] = fish['Y']

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    callback = partial(visualize, ax=ax)

    reg = deformable_registration(X, Y)
    reg.register(callback)
    plt.show()
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def main():
    fish = loadmat('./data/bunny.mat')
    X = fish['X']
    Y = X + 1

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    callback = partial(visualize, ax=ax)

    reg = rigid_registration(X, Y)
    reg.register(callback)
    plt.show()
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def main():
    fish = loadmat('./data/fish.mat')
    X = np.zeros((fish['X'].shape[0], fish['X'].shape[1] + 1))
    X[:,:-1] = fish['X']

    Y = np.zeros((fish['Y'].shape[0], fish['Y'].shape[1] + 1))
    Y[:,:-1] = fish['Y']

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    callback = partial(visualize, ax=ax)

    reg = rigid_registration(X, Y)
    reg.register(callback)
    plt.show()
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def main():
    fish = loadmat('./data/fish.mat')
    X = fish['X']
    Y = fish['Y']

    fig = plt.figure()
    fig.add_axes([0, 0, 1, 1])
    callback = partial(visualize, ax=fig.axes[0])

    reg = jrmpc_rigid([X, Y])
    reg.print_self()
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def main():
    fish = loadmat('./data/fish.mat')
    X = fish['X']
    Y = fish['Y']

    fig = plt.figure()
    fig.add_axes([0, 0, 1, 1])
    callback = partial(visualize, ax=fig.axes[0])

    reg = affine_registration(X, Y)
    reg.register(callback)
    plt.show()