Python scipy.io 模块,savemat() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.io.savemat()

项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def test():

    path_text_for = 'D171.png'    
    path_text_back ='D771.png'
    # image forground/background
    im_for = misc.imread(path_text_for)
    im_back = misc.imread(path_text_back)
    size = im_for.shape
    s = size[0]    # size of the image (squared matrix)
    # number of images
    nbr_ims = 10
    train = True
    # generating the images
    data,data_labels = generate_brodatz_texture(nbr_ims, s, im_back, im_for)
    if train: # train
        sio.savemat('../data/train.mat', dict([('x_train', data), ('y_train', data_labels)]))    
    else:     # test
        sio.savemat('../data/test.mat', dict([('x_test', data), ('y_test', data_labels)]) )
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def modelSaver(sess, modelSavePath, savePrefix, iteration, maxToKeep=5):
    allWeights = {}

    for name in [n.name for n in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]:
        param = sess.run(name)
        nameParts = re.split('[:/]', name)
        saveName = nameParts[-4]+'/'+nameParts[-3]+'/'+nameParts[-2]
        allWeights[saveName] = param

        # print "Name: %s Mean: %.3f Max: %.3f Min: %.3f std: %.3f" % (name,
        #                                                              param.mean(),
        #                                                              param.max(),
        #                                                              param.min(),
        #                                                              param.std())
        # if name == "depth/fcn2/weights:0":
        #     for j in range(outputChannels):
        #         print "ch: %d, max %e, min %e, std %e" % (
        #             j, param[:, :, :, j].max(), param[:, :, :, j].min(), param[:, :, :, j].std())

    # raw_input("done")

    sio.savemat(os.path.join(modelSavePath, savePrefix+'_%03d'%iteration), allWeights)
项目:brainpipe    作者:EtienneCmb    | 项目源码 | 文件源码
def savefile(name, *arg, **kwargs):
    """Save a file without carrying of extension.

    arg: for .npy extension
    kwargs: for .pickle or .mat extensions
    """
    name = _safetySave(name)
    fileName, fileExt = splitext(name)
    # Pickle :
    if fileExt == '.pickle':
        with open(name, 'wb') as f:
            pickle.dump(kwargs, f)
    # Matlab :
    elif fileExt == '.mat':
        data = savemat(name, kwargs)
    # Numpy (single array) :
    elif fileExt == '.npy':
        data = np.save(name, arg)
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def get_indian_pines_features_from_indian_pines_model():
    for i in range(10):
        class data: pass

        data.data_dir = os.path.expanduser('../hyperspectral_datas/indian_pines/data/')
        data.data_5x5_mean_std = sio.loadmat(data.data_dir + '/indian_pines_5x5_mean_std.mat')['data']
        data.labels_5x5_mean_std = sio.loadmat(data.data_dir + '/indian_pines_5x5_mean_std.mat')['labels']
        data.result_dir = '../result/indian_pines/bn_net_200/feature'
        mkdir_if_not_exist(data.result_dir)
        data.result_file = data.result_dir + '/ip_feature_ip_model_{}.mat'.format(i)
        data.iters = 2000000

        pretrained_model = data.result_dir + '/../model/5x5_mean_std_models_time_{}_iter_{}.caffemodel.h5'.format(i,
                                                                                                                  data.iters)
        deploy_file = data.result_dir + '/../proto/indian_pines_5x5_mean_std_deploy.prototxt'

        getFeature = feature.GetFeatureFromCaffe(deploy_file=deploy_file, pretrained_model=pretrained_model)
        getFeature.set_data(data.data_5x5_mean_std, data.labels_5x5_mean_std)
        getFeature.get_ip1()

        data.result_dict = {'data': getFeature.ip1_data, 'labels': getFeature.label}
        sio.savemat(data.result_file, data.result_dict)
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def get_salina_features_from_salina_model():
    for i in range(10):
        class data: pass

        data.data_dir = os.path.expanduser('~/hyperspectral_datas/salina/data/')
        data.data_5x5_mean_std = sio.loadmat(data.data_dir + '/salina_5x5_mean_std.mat')['data']
        data.labels_5x5_mean_std = sio.loadmat(data.data_dir + '/salina_5x5_mean_std.mat')['labels']
        data.result_dir = '../result/salina/bn_net_200/feature'
        mkdir_if_not_exist(data.result_dir)
        data.result_file = data.result_dir + '/salina_feature_salina_5x5_mean_std_model_{}.mat'.format(i)
        data.iters = 2000000

        pretrained_model = data.result_dir + '/../model/5x5_mean_std_models_time_{}_iter_{}.caffemodel.h5'.format(i,
                                                                                                                  data.iters)
        deploy_file = data.result_dir + '/../proto/salina_5x5_mean_std_deploy.prototxt'

        getFeature = feature.GetFeatureFromCaffe(deploy_file=deploy_file, pretrained_model=pretrained_model)
        getFeature.set_data(data.data_5x5_mean_std, data.labels_5x5_mean_std)
        getFeature.get_ip1()

        data.result_dict = {'data': getFeature.ip1_data, 'labels': getFeature.label}
        sio.savemat(data.result_file, data.result_dict)
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def get_indian_pines_features_from_salina_model():
    for i in range(10):
        class data: pass

        data.data_dir = os.path.expanduser('../hyperspectral_datas/indian_pines/data/')
        data.data_5x5_mean_std = sio.loadmat(data.data_dir + '/indian_pines_5x5_mean_std.mat')['data']
        data.labels_5x5_mean_std = sio.loadmat(data.data_dir + '/indian_pines_5x5_mean_std.mat')['labels']
        data.result_dir = '../result/salina/bn_net_200/feature'
        mkdir_if_not_exist(data.result_dir)
        data.result_file = data.result_dir + '/ip_feature_salina_model_{}.mat'.format(i)
        data.iters = 2000000

        pretrained_model = data.result_dir + '/../model/5x5_mean_std_models_time_{}_iter_{}.caffemodel.h5'.format(i,
                                                                                                                  data.iters)
        deploy_file = data.result_dir + '/../proto/salina_5x5_mean_std_deploy.prototxt'

        getFeature = feature.GetFeatureFromCaffe(deploy_file=deploy_file, pretrained_model=pretrained_model)
        getFeature.set_data(data.data_5x5_mean_std, data.labels_5x5_mean_std)
        getFeature.get_ip1()

        data.result_dict = {'data': getFeature.ip1_data, 'labels': getFeature.label}
        sio.savemat(data.result_file, data.result_dict)
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def get_salina_features_from_indian_pines_model():
    for i in range(10):
        class data: pass

        data.data_dir = os.path.expanduser('../hyperspectral_datas/salina/data/')
        data.data_5x5_mean_std = sio.loadmat(data.data_dir + '/salina_5x5_mean_std.mat')['data']
        data.labels_5x5_mean_std = sio.loadmat(data.data_dir + '/salina_5x5_mean_std.mat')['labels']
        data.result_dir = '../result/indian_pines/bn_net_200/feature'
        mkdir_if_not_exist(data.result_dir)
        data.result_file = data.result_dir + '/salina_feature_ip_model_{}.mat'.format(i)
        data.iters = 2000000

        pretrained_model = data.result_dir + '/../model/5x5_mean_std_models_time_{}_iter_{}.caffemodel.h5'.format(i,
                                                                                                                  data.iters)
        deploy_file = data.result_dir + '/../proto/indian_pines_5x5_mean_std_deploy.prototxt'

        getFeature = feature.GetFeatureFromCaffe(deploy_file=deploy_file, pretrained_model=pretrained_model)
        getFeature.set_data(data.data_5x5_mean_std, data.labels_5x5_mean_std)
        getFeature.get_ip1()

        data.result_dict = {'data': getFeature.ip1_data, 'labels': getFeature.label}
        sio.savemat(data.result_file, data.result_dict)
项目:Math412S2017    作者:ctralie    | 项目源码 | 文件源码
def plotLinePatches(P, name):
    plotPatches(P)
    plt.savefig("%sPatches.svg"%name, bbox_inches='tight')
    plt.clf()
    sio.savemat("P%s.mat"%name, {"P":P})

    plt.subplot(121)
    PDs = doRipsFiltration(P, 2, coeff = 2)
    print PDs[2]
    H1 = plotDGM(PDs[1], color = np.array([1.0, 0.0, 0.2]), label = 'H1', sz = 50, axcolor = np.array([0.8]*3))
    plt.hold(True)
    H2 = plotDGM(PDs[2], color = np.array([0.43, 0.67, 0.27]), marker = 'x', sz = 50, label = 'H2', axcolor = np.array([0.8]*3))
    plt.title("$\mathbb{Z}2$ Coefficients")

    plt.subplot(122)
    PDs = doRipsFiltration(P, 2, coeff = 3)
    print PDs[2]
    H1 = plotDGM(PDs[1], color = np.array([1.0, 0.0, 0.2]), label = 'H1', sz = 50, axcolor = np.array([0.8]*3))
    plt.hold(True)
    H2 = plotDGM(PDs[2], color = np.array([0.43, 0.67, 0.27]), marker = 'x', sz = 50, label = 'H2', axcolor = np.array([0.8]*3))
    plt.title("$\mathbb{Z}3$ Coefficients")
    plt.show()
项目:Particle-Picking-Cryo-EM    作者:hqythu    | 项目源码 | 文件源码
def main():
    MAT_PATH = './mat/train'
    LABEL_PATH = './label/train'

    train_x = np.zeros((0, 64, 64)).astype('uint8')
    train_y = np.zeros((0)).astype('uint8')
    for dirpath, dirnames, filenames in os.walk(MAT_PATH):
        print(dirpath)
        for filename in filenames:
            if filename == 'full.mat':
                img = sio.loadmat(os.path.join(dirpath, filename))['data']
                img_id = dirpath.split('/')[-1]
                label_file = os.path.join(LABEL_PATH, img_id + '.mat')
                labels = sio.loadmat(label_file)['label']
                X, y = sliding(img, labels)
                train_x = np.concatenate([train_x, X], axis=0)
                train_y = np.concatenate([train_y, y], axis=0)
    sio.savemat('data_new.mat', {
        'train_x': train_x,
        'train_y': train_y
    })
项目:Particle-Picking-Cryo-EM    作者:hqythu    | 项目源码 | 文件源码
def main():
    MAT_DIR = './mat_new/train'
    train_x = np.zeros((0, 64, 64)).astype('uint8')
    train_y = np.zeros((0)).astype('uint8')
    for dirpath, dirnames, filenames in os.walk(MAT_DIR):
        print(dirpath)
        for filename in filenames:
            if filename == 'train.mat':
                data = sio.loadmat(os.path.join(dirpath, filename))
                train_x = np.append(train_x, data['train_x'], axis=0)
                train_y = np.append(train_y, data['train_y'].reshape(-1),
                                    axis=0)
    sio.savemat('data_new.mat', {
        'train_x': train_x,
        'train_y': train_y
    })
项目:SPAWC2017    作者:Haoran-S    | 项目源码 | 文件源码
def test(X, model_location, save_name, n_input, n_output, n_hidden_1 = 200, n_hidden_2 = 80, n_hidden_3 = 80, binary=0):
    tf.reset_default_graph()
    x = tf.placeholder("float", [None, n_input])
    is_train = tf.placeholder("bool")
    input_keep_prob = tf.placeholder(tf.float32)
    hidden_keep_prob = tf.placeholder(tf.float32)
    weights, biases = ini_weights(n_input, n_hidden_1, n_hidden_2, n_hidden_3, n_output)
    pred = multilayer_perceptron(x, weights, biases, input_keep_prob, hidden_keep_prob)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, model_location)
        start_time = time.time()
        y_pred = sess.run(pred, feed_dict={x: np.transpose(X), input_keep_prob: 1, hidden_keep_prob: 1, is_train: False})
        testtime = time.time() - start_time
        # print("testing time: %0.2f s" % testtime)
        if binary==1:
            y_pred[y_pred >= 0.5] = 1
            y_pred[y_pred < 0.5] = 0
        sio.savemat(save_name, {'pred': y_pred})
    return testtime
项目:laplacian-meshes    作者:bmershon    | 项目源码 | 文件源码
def doLaplacianSolveWithConstraints(self, evt):
        anchorWeights = 1e8
        anchors = np.zeros((len(self.laplacianConstraints), 3))
        i = 0
        anchorsIdx = []
        for anchor in self.laplacianConstraints:
            anchorsIdx.append(anchor)
            anchors[i, :] = self.laplacianConstraints[anchor]
            i += 1

        #IGL Cotangent weights
        (L, M_inv, solver, deltaCoords) = makeLaplacianMatrixSolverIGLSoft(self.mesh.VPos, self.mesh.ITris, anchorsIdx, anchorWeights)
        self.mesh.VPos = solveLaplacianMatrixIGLSoft(solver, L, M_inv, deltaCoords, anchorsIdx, anchors, anchorWeights)

#        #My umbrella weights
#        L = makeLaplacianMatrixUmbrellaWeights(self.mesh.VPos, self.mesh.ITris, anchorsIdx, anchorWeights)
#        deltaCoords = L.dot(self.mesh.VPos)[0:self.mesh.VPos.shape[0], :]
#        self.mesh.VPos = np.array(solveLaplacianMatrix(L, deltaCoords, anchors, anchorWeights), dtype=np.float32)

        sio.savemat("anchors.mat", {'deltaCoords':deltaCoords, 'anchors':anchors, 'anchorsIdx':np.array(anchorsIdx)})
        self.mesh.needsDisplayUpdate = True
        self.mesh.updateIndexDisplayList()
        self.Refresh()
项目:pulse2percept    作者:uwescience    | 项目源码 | 文件源码
def savemoviefiles(filestr, data, path='savedImages/'):
    """Saves a brightness movie to .npy, .mat, and .avi format

    Parameters
    ----------
    filestr : str
        Name of the resulting files without file type (suffix .npy, .mat
        or .avi will be appended)
    data : array
        A 3-D NumPy array containing the data, such as the `data` field of
        a utils.TimeSeries object
    path : str, optional
        Path to directory where files should be saved.
        Default: savedImages/
    """
    np.save(path + filestr, data)  # save as npy
    sio.savemat(path + filestr + '.mat', dict(mov=data))  # save as matfile
    npy2movie(path + filestr + '.avi', data)  # save as avi
项目:TerpreT    作者:51alg    | 项目源码 | 文件源码
def save_to_mat(self, args):
        if OBJECTIVE_AS_CONSTRAINT:
            self.convert_objective_to_constraint()
        #self.penalize_new_states()
        #self.link_to_next_block()

        if len(args) > 1:
            out_filename = args[1]
        else:
            out_filename = 'A_b_obj.mat'

        pickled_var_dict = pkl.dumps(self.var_dict)

        sio.savemat(out_filename, {
            'A' : {'values' : np.array(self.A), 'dims' : [self.eq_count, self.total_var_length] },
            'b' : {'values' : np.array(self.b), 'dims' : [self.eq_count] },
            'objective' : {'values' : np.array(self.objective), 'dims' : [self.total_var_length] },
            'lb' : {'values' : [], 'dims' : [self.total_var_length]},
            'ub_minus_1' : {'values' : [], 'dims' : [self.total_var_length]},
            'int_flag' : {'values' : np.array(self.int_flag), 'dims' : [self.total_var_length] },
            'constraintLabels' : self.constr_labels,
            'var_dict_pkl' : pickled_var_dict
        })
项目:gps_superball_public    作者:young-geng    | 项目源码 | 文件源码
def _save_superball_controllers(self):
        import cPickle as pickle
        import numpy as np
        from scipy import io
        try:
            controllers = io.loadmat('init_controllers.mat')
        except IOError:
            controllers = {}
        for m in range(self._conditions):
            t = self.algorithm.cur[m].traj_distr
            if 'save' in self._hyperparams['algorithm']['init_traj_distr']:
                s = self._hyperparams['algorithm']['init_traj_distr']['save'][m]
            else:
                s = str(m)
            controllers.update({('K' + s): t.K, ('k' + s): t.k,
                                ('PS' + s): t.pol_covar,
                                ('cPS' + s): t.chol_pol_covar,
                                ('iPS' + s): t.inv_pol_covar})
        io.savemat(self._hyperparams['common']['experiment_dir'] + 'init_controllers.mat', controllers)
        io.savemat('init_controllers.mat', controllers)
项目:SFA    作者:Bollegala    | 项目源码 | 文件源码
def learnProjection(sourceDomain, targetDomain):
    """
    Learn the projection matrix and store it to a file. 
    """
    h = 50 # no. of latent dimensions.
    print "Loading the bipartite matrix...",
    coocData = sio.loadmat("../work/%s-%s/DSxDI.mat" % (sourceDomain, targetDomain))
    M = sp.lil_matrix(coocData['DSxDI'])
    (nDS, nDI) = M.shape
    print "Done."
    print "Computing the Laplacian...",
    D1 = sp.lil_matrix((nDS, nDS), dtype=np.float64)
    D2 = sp.lil_matrix((nDI, nDI), dtype=np.float64)
    for i in range(0, nDS):
        D1[i,i] = 1.0 / np.sqrt(np.sum(M[i,:].data[0]))
    for i in range(0, nDI):
        D2[i,i] = 1.0 / np.sqrt(np.sum(M[:,i].T.data[0]))
    B = (D1.tocsr().dot(M.tocsr())).dot(D2.tocsr())
    print "Done."
    print "Computing SVD...",
    ut, s, vt = sparsesvd(B.tocsc(), h)
    sio.savemat("../work/%s-%s/proj.mat" % (sourceDomain, targetDomain), {'proj':ut.T})
    print "Done."    
    pass
项目:DeepLearningOnGraph    作者:Conchylicultor    | 项目源码 | 文件源码
def main():

    # Global check
    assert(os.path.exists(dirDataIn))
    assert(os.path.exists(dirDataOut))

    # For each file
    filesList = os.listdir(dirDataIn)
    for filename in filesList:
        print('Try converting ', filename)
        name = filename.split('.')[0] # Little hack to get the filename
        if filename.endswith('.npy'): # Candidate
            matrix = np.load(dirDataIn + filename, fix_imports = True)
        elif filename.endswith('.txt'): # Candidate
            matrix = utils.loadLabelList(dirDataIn + filename)
        else:
            print('Wrong format, skiped')
            continue
        sio.savemat(dirDataOut + name + '.mat', {savedVariableName:matrix})
项目:procrustes    作者:bmershon    | 项目源码 | 文件源码
def doLaplacianSolveWithConstraints(self, evt):
        anchorWeights = 1e8
        anchors = np.zeros((len(self.laplacianConstraints), 3))
        i = 0
        anchorsIdx = []
        for anchor in self.laplacianConstraints:
            anchorsIdx.append(anchor)
            anchors[i, :] = self.laplacianConstraints[anchor]
            i += 1

        #IGL Cotangent weights
        (L, M_inv, solver, deltaCoords) = makeLaplacianMatrixSolverIGLSoft(self.mesh.VPos, self.mesh.ITris, anchorsIdx, anchorWeights)
        self.mesh.VPos = solveLaplacianMatrixIGLSoft(solver, L, M_inv, deltaCoords, anchorsIdx, anchors, anchorWeights)

#        #My umbrella weights
#        L = makeLaplacianMatrixUmbrellaWeights(self.mesh.VPos, self.mesh.ITris, anchorsIdx, anchorWeights)
#        deltaCoords = L.dot(self.mesh.VPos)[0:self.mesh.VPos.shape[0], :]
#        self.mesh.VPos = np.array(solveLaplacianMatrix(L, deltaCoords, anchors, anchorWeights), dtype=np.float32)

        sio.savemat("anchors.mat", {'deltaCoords':deltaCoords, 'anchors':anchors, 'anchorsIdx':np.array(anchorsIdx)})
        self.mesh.needsDisplayUpdate = True
        self.mesh.updateIndexDisplayList()
        self.Refresh()
项目:craftGBD    作者:craftGBD    | 项目源码 | 文件源码
def imdb_proposals_to_mat(net, imdb, output_dir):
    """Generate RPN proposals on all images in an imdb."""

    _t = Timer()
    imdb_boxes = [[] for _ in xrange(imdb.num_images)]
    for i in xrange(imdb.num_images):
        im = cv2.imread(imdb.image_path_at(i))
        _t.tic()
        imdb_boxes[i], scores = im_proposals(net, im)
        _t.toc()

        dets = np.hstack((imdb_boxes[i], scores))
        file_name = os.path.splitext(os.path.basename(imdb.image_path_at(i)))[0]
        boxes = np.array(_conv2list(dets))
        sio.savemat(output_dir + '/' + file_name, {'boxes': boxes})

        print 'im_proposals: {:d}/{:d} {:d} {:.3f}s' \
              .format(i + 1, imdb.num_images, boxes.shape[0], _t.average_time)

    return imdb_boxes
项目:gcn_metric_learning    作者:sk1712    | 项目源码 | 文件源码
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
    """
        timeseries   : timeseries table for subject (timepoints x regions)
        subject      : the subject short ID
        atlas_name   : name of the atlas used
        kind         : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
        save         : save the connectivity matrix to a file
        save_path    : specify path to save the matrix if different from subject folder

    returns:
        connectivity : connectivity matrix (regions x regions)
    """

    print("Estimating %s matrix for subject %s" % (kind, subject))

    if kind == 'lasso':
        # Graph Lasso estimator
        covariance_estimator = GraphLassoCV(verbose=1)
        covariance_estimator.fit(timeseries)
        connectivity = covariance_estimator.covariance_
        print('Covariance matrix has shape {0}.'.format(connectivity.shape))

    elif kind in ['tangent', 'partial correlation', 'correlation']:
        conn_measure = connectome.ConnectivityMeasure(kind=kind)
        connectivity = conn_measure.fit_transform([timeseries])[0]

    if save:
        subject_file = os.path.join(save_path, subject,
                                    subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
        sio.savemat(subject_file, {'connectivity': connectivity})

    return connectivity
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def save_mat(fn, d): 
    import scipy.io as io
    io.savemat(os.path.expanduser(fn), d)
项目:pixelinkds    作者:hgrecco    | 项目源码 | 文件源码
def save_mat(reader, output_filename):
    try:
        from scipy import io
    except ImportError:
        print('Please install scipy to use this format.')
        raise

    ts, data = reader.read_stack()
    io.savemat(output_filename, dict(timestamps=ts, data=data))
项目:Keras_FB    作者:InvidHead    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs=None):
        for k in self.params['metrics']:
            if k in logs:
                self.mesg+=(k+': '+str(logs[k])[:5]+' ')
                self.logs_epochs.setdefault(k, []).append(logs[k])



        if epoch+1>=self.stopped_epoch:
            self.model.stop_training = True
        logs = logs or {}
        self.epoch.append(epoch)
        self.t_epochs.append(time.time()-self.t_s)
        if self.savelog:
            sio.savemat((self.fexten if self.fexten else self.validateTitle(self.localtime))+'_logs_batches'+'.mat',{'log':np.array(self.logs_batches)})
            sio.savemat((self.fexten if self.fexten else self.validateTitle(self.localtime))+'_logs_batches'+'.mat',{'log':np.array(self.logs_epochs)})
        th.start_new_thread(self.get_fig,())



        self.t_send(self.mesg)
        return
项目:Keras_FB    作者:InvidHead    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs=None):
        for k in self.params['metrics']:
            if k in logs:
                self.mesg+=(k+': '+str(logs[k])[:5]+' ')
                self.logs_epochs.setdefault(k, []).append(logs[k])



        if epoch+1>=self.stopped_epoch:
            self.model.stop_training = True
        logs = logs or {}
        self.epoch.append(epoch)
        self.t_epochs.append(time.time()-self.t_s)
        if self.savelog:
            sio.savemat((self.fexten if self.fexten else self.validateTitle(self.localtime))+'_logs_batches'+'.mat',{'log':np.array(self.logs_batches)})
            sio.savemat((self.fexten if self.fexten else self.validateTitle(self.localtime))+'_logs_batches'+'.mat',{'log':np.array(self.logs_epochs)})
        th.start_new_thread(self.get_fig,())



        self.t_send(self.mesg)
        return
项目:Keras_FB    作者:InvidHead    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs=None):
        for k in self.params['metrics']:
            if k in logs:
                self.mesg+=(k+': '+str(logs[k])[:5]+' ')
                self.logs_epochs.setdefault(k, []).append(logs[k])
#==============================================================================

#==============================================================================
        if epoch+1>=self.stopped_epoch:
            self.model.stop_training = True
        logs = logs or {}
        self.epoch.append(epoch)
        self.t_epochs.append(time.time()-self.t_s)
        if self.savelog:
            sio.savemat((self.fexten if self.fexten else self.validateTitle(self.localtime))+'_logs_batches'+'.mat',{'log':np.array(self.logs_batches)})
            sio.savemat((self.fexten if self.fexten else self.validateTitle(self.localtime))+'_logs_batches'+'.mat',{'log':np.array(self.logs_epochs)})
        th.start_new_thread(self.get_fig,())
#==============================================================================

#==============================================================================
        self.t_send(self.mesg)
        return
#==============================================================================
#         
#==============================================================================
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def signalInterrupt(self, selfnum, frame):
        self.dataThread.stop()
        io.savemat('p300Data_jct_2_17_10.mat', {'data' : self.dataThread.data, 'timestamps' : self.timestamps, 'markers' : self.markers})

        gtk.main_quit()
项目:kaggle-planet    作者:ZijunDeng    | 项目源码 | 文件源码
def main():
    training_batch_size = 352
    validation_batch_size = 352

    net = get_res152(num_classes=num_classes, snapshot_path=os.path.join(
        ckpt_path, 'epoch_15_validation_loss_0.0772_iter_1000.pth')).cuda()
    net.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.311, 0.340, 0.299], [0.167, 0.144, 0.138])
    ])
    criterion = nn.MultiLabelSoftMarginLoss().cuda()

    train_set = MultipleClassImageFolder(split_train_dir, transform)
    train_loader = DataLoader(train_set, batch_size=training_batch_size, num_workers=16)
    batch_outputs, batch_labels = predict(net, train_loader)
    loss = criterion(batch_outputs, batch_labels)
    print 'training loss %.4f' % loss.cpu().data.numpy()[0]
    batch_outputs = batch_outputs.cpu().data.numpy()
    batch_labels = batch_labels.cpu().data.numpy()
    thretholds = find_best_threthold(batch_outputs, batch_labels)

    val_set = MultipleClassImageFolder(split_val_dir, transform)
    val_loader = DataLoader(val_set, batch_size=validation_batch_size, num_workers=16)
    batch_outputs, batch_labels = predict(net, val_loader)
    loss = criterion(batch_outputs, batch_labels)
    print 'validation loss %.4f' % loss.cpu().data.numpy()[0]
    batch_outputs = batch_outputs.cpu().data.numpy()
    batch_labels = batch_labels.cpu().data.numpy()
    sio.savemat('./val_output.mat', {'outputs': batch_outputs, 'labels': batch_labels})
    prediction = get_one_hot_prediction(batch_outputs, thretholds)
    evaluation = evaluate(prediction, batch_labels)
    print 'validation evaluation: accuracy %.4f, precision %.4f, recall %.4f, f2 %.4f' % (
        evaluation[0], evaluation[1], evaluation[2], evaluation[3])
项目:SlidingWindowVideoTDA    作者:ctralie    | 项目源码 | 文件源码
def saveRankings(idx, filename):
    R = []
    for i in range(len(idx)):
        for j in range(i+1, len(idx)):
            rel = 1
            if idx[i] < idx[j]:
                R.append([idx[i], idx[j], 1])
            else:
                R.append([idx[j], idx[i], -1])
    sio.savemat(filename, {"R":np.array(R)})
项目:SlidingWindowVideoTDA    作者:ctralie    | 项目源码 | 文件源码
def makePlot(X, I1Z2, I1Z3, I2):
    if X.size == 0:
        return
    #Self-similarity matrix
    XSqr = np.sum(X**2, 1).flatten()
    D = XSqr[:, None] + XSqr[None, :] - 2*X.dot(X.T)
    D[D < 0] = 0
    D = np.sqrt(D)
    (PScore, PScoreMod, HSubscore, QPScore) = getPeriodicityScores(I1Z2, I1Z3, I2)

    #PCA
    pca = PCA(n_components = 20)
    Y = pca.fit_transform(X)
    sio.savemat("PCA.mat", {"Y":Y})
    eigs = pca.explained_variance_

    plt.clf()
    plt.subplot(221)
    plt.imshow(D, cmap = 'afmhot', interpolation = 'nearest')
    plt.title('SSM')

    plt.subplot(222)
    H1 = plotDGM(I1Z3, color = np.array([1.0, 0.0, 0.2]), label = 'H1', sz = 50, axcolor = np.array([0.8]*3))
    plt.hold(True)
    H2 = plotDGM(I2, color = np.array([0.43, 0.67, 0.27]), marker = 'x', sz = 50, label = 'H2', axcolor = np.array([0.8]*3))
    plt.title("PScore = %g, QPScore = %g"%(PScore, QPScore))

    ax = plt.subplot(223)
    ax.set_title("PCA of Sliding Window Embedding")
    c = plt.get_cmap('Spectral')
    C = c(np.array(np.round(np.linspace(0, 255, Y.shape[0])), dtype=np.int32))
    C = C[:, 0:3]
    ax.scatter(Y[:, 0], Y[:, 1], c = C, edgecolor='none')
    ax.set_axis_bgcolor((0.15, 0.15, 0.15))
    ax.get_xaxis().set_ticks([])
    ax.get_yaxis().set_ticks([])
    ax.set_aspect('equal', 'datalim')

    plt.subplot(224)
    plt.bar(np.arange(len(eigs)), eigs)
    plt.title("Eigenvalues")
项目:AutoPortraitMatting    作者:PetroWu    | 项目源码 | 文件源码
def gen_data(name):
    reftracker = scio.loadmat('data/images_tracker.00047.mat')['tracker']
    desttracker = scio.loadmat('data/images_tracker/'+name+'.mat')['tracker']
    refpos = np.floor(np.mean(reftracker, 0))
    xxc, yyc = np.meshgrid(np.arange(1, 1801, dtype=np.int), np.arange(1, 2001, dtype=np.int))
    #normalize x and y channels
    xxc = (xxc - 600 - refpos[0]) * 1.0 / 600
    yyc = (yyc - 600 - refpos[1]) * 1.0 / 600
    maskimg = Image.open('data/meanmask.png')
    maskc = np.array(maskimg, dtype=np.float)
    maskc = np.pad(maskc, (600, 600), 'minimum')
    # warp is an inverse transform, and so src and dst must be reversed here
    tform = transform.estimate_transform('affine', desttracker + 600, reftracker + 600)

    img_data = skio.imread('data/images_data/'+name+'.jpg')
    # save org mat
    warpedxx = transform.warp(xxc, tform, output_shape=xxc.shape)
    warpedyy = transform.warp(yyc, tform, output_shape=xxc.shape)
    warpedmask = transform.warp(maskc, tform, output_shape=xxc.shape)
    warpedxx = warpedxx[600:1400, 600:1200, :]
    warpedyy = warpedyy[600:1400, 600:1200, :]
    warpedmask = warpedmask[600:1400, 600:1200, :]
    img_h, img_w, _ = img_data.shape
    mat = np.zeros((img_h, img_w, 6), dtype=np.float)
    mat[:, :, 0] = (img_data[2] * 1.0 - 104.008) / 255
    mat[:, :, 1] = (img_data[1] * 1.0 - 116.669) / 255
    mat[:, :, 2] = (img_data[0] * 1.0 - 122.675) / 255
    scio.savemat('portraitFCN_data/' + name + '.mat', {'img':mat})
    mat_plus = np.zeros((img_h, img_w, 6), dtype=np.float)
    mat_plus[:, :, 0:3] = mat
    mat_plus[:, :, 3] = warpedxx
    mat_plus[:, :, 4] = warpedyy
    mat_plus[:, :, 5] = warpedmask
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def forward_model(model, feeder, outputSavePath):
    with tf.Session() as sess:
        tfBatchDirs = tf.placeholder("float")
        tfBatchSS = tf.placeholder("float")
        keepProb = tf.placeholder("float")

        with tf.name_scope("model_builder"):
            print "attempting to build model"
            model.build(tfBatchDirs, tfBatchSS, keepProb=keepProb)
            print "built the model"

        init = tf.initialize_all_variables()

        sess.run(init)

        if not os.path.exists(outputSavePath):
            os.makedirs(outputSavePath)

        for i in range(int(math.floor(feeder.total_samples() / batchSize))):
            dirBatch, ssBatch, idBatch = feeder.next_batch()

            outputBatch = sess.run(model.outputDataArgMax, feed_dict={tfBatchDirs: dirBatch,
                                                                      tfBatchSS: ssBatch,
                                                                      keepProb: 1.0})
            outputBatch = outputBatch.astype(np.uint8)

            for j in range(len(idBatch)):
                outputFilePath = os.path.join(outputSavePath, idBatch[j]+'.mat')
                outputFileDir = os.path.dirname(outputFilePath)
                # print outputFileDir
                # print outputFilePath
                # raw_input("pause")

                if not os.path.exists(outputFileDir):
                    os.makedirs(outputFileDir)

                sio.savemat(outputFilePath, {"depth_map": outputBatch[j]})

                print "processed image %d out of %d"%(j+batchSize*i, feeder.total_samples())
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def forward_model(model, feeder, outputSavePath):
    with tf.Session() as sess:
        tfBatchImages = tf.placeholder("float", shape=[None, 512, 1024, 3])
        tfBatchSS = tf.placeholder("float", shape=[None, 512, 1024])
        tfBatchSSMask = tf.placeholder("float", shape=[None, 512, 1024])

        with tf.name_scope("model_builder"):
            print "attempting to build model"
            model.build(tfBatchImages, tfBatchSS, tfBatchSSMask)
            print "built the model"
        sys.stdout.flush()

        init = tf.initialize_all_variables()
        sess.run(init)

        for i in range(int(math.floor(feeder.total_samples() / batchSize))):
            imageBatch, ssBatch, ssMaskBatch, idBatch = feeder.next_batch()

            outputBatch = sess.run(model.output, feed_dict={tfBatchImages: imageBatch, tfBatchSS: ssBatch, tfBatchSSMask: ssMaskBatch})

            for j in range(len(idBatch)):
                outputFilePath = os.path.join(outputSavePath, idBatch[j]+'.mat')
                outputFileDir = os.path.dirname(outputFilePath)

                if not os.path.exists(outputFileDir):
                    os.makedirs(outputFileDir)

                sio.savemat(outputFilePath, {"dir_map": outputBatch[j]}, do_compression=True)

                print "processed image %d out of %d"%(j+batchSize*i, feeder.total_samples())
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def modelSaver(sess, modelSavePath, savePrefix, iteration, maxToKeep=5):
    allWeights = {}
    for name in [n.name for n in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]:
        param = sess.run(name)
        nameParts = re.split('[:/]', name)
        saveName = nameParts[-4]+'/'+nameParts[-3]+'/'+nameParts[-2]
        allWeights[saveName] = param

    weightsFileName = os.path.join(modelSavePath, savePrefix+'_%03d'%iteration)

    sio.savemat(weightsFileName, allWeights)
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def modelSaver(sess, modelSavePath, savePrefix, iteration, maxToKeep=5):
    allWeights = {}

    for name in [n.name for n in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]:
        param = sess.run(name)
        nameParts = split('[:/]', name)
        saveName = nameParts[-4]+'/'+nameParts[-3]+'/'+nameParts[-2]
        allWeights[saveName] = param

    savePath = os.path.join(modelSavePath, savePrefix+'_%03d'%iteration)
    sio.savemat(savePath, allWeights)
    print "saving model to %s" % savePath
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def set_and_save_indian_pines_proceed_data():
    dataset = HSI.HSIDataSet('indian_pines')
    dataset.get_data()
    dataset.get_labels()
    print ('data shape is: ', dataset.data.shape)  # 145,145,200
    print ('label shape is: ', dataset.labels.shape)  # 145, 145

    data, labels = np.array(dataset.data), np.array(dataset.labels)
    process = HSI_preprocess(name = 'indian_pines', dst_shape = (145,145,224))
    data_add_channel = process.add_channel(data)

    data_3x3_mean = process.get_mean_data(data = data_add_channel, patch_size = 3, var = False)
    data_3x3_mean = process.scale_to1(data_3x3_mean)
    sio.savemat(dataset.dir + '/indian_pines_3x3_mean.mat', {'data' : data_3x3_mean, 'labels' : labels})

    data_5x5_mean = process.get_mean_data(data = data_add_channel, patch_size = 5, var = False)
    data_5x5_mean = process.scale_to1(data_5x5_mean)
    sio.savemat(dataset.dir + '/indian_pines_5x5_mean.mat', {'data' : data_5x5_mean, 'labels' : labels})

    data_3x3_mean_std = process.get_mean_data(data = data_add_channel, patch_size = 3, var = True)
    data_3x3_mean_std = process.scale_to1(data_3x3_mean_std)
    sio.savemat(dataset.dir + '/indian_pines_3x3_mean_std.mat', {'data' : data_3x3_mean_std, 'labels' : labels})

    data_5x5_mean_std = process.get_mean_data(data = data_add_channel, patch_size = 5, var = True)
    data_5x5_mean_std = process.scale_to1(data_5x5_mean_std)
    sio.savemat(dataset.dir + '/indian_pines_5x5_mean_std.mat', {'data' : data_5x5_mean_std, 'labels' : labels})
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def set_and_save_indian_pines_proceed_1x1_data():
    dataset = HSI.HSIDataSet('indian_pines')
    dataset.get_data()
    dataset.get_labels()
    print ('data shape is: ', dataset.data.shape)  # 145,145,200
    print ('label shape is: ', dataset.labels.shape)  # 145, 145

    data, labels = np.array(dataset.data), np.array(dataset.labels)
    process = HSI_preprocess(name = 'indian_pines', dst_shape = (145,145,224))
    data_add_channel = process.add_channel(data)
    data_1x1 = process.scale_to1(data_add_channel)
    sio.savemat(dataset.dir + '/indian_pines_1x1_mean.mat', {'data' : data_1x1, 'labels' : labels})
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def set_and_save_salina_proceed_1x1_data():
    dataset = HSI.HSIDataSet('salina')
    dataset.get_data()
    dataset.get_labels()
    print ('data shape is: ', dataset.data.shape)  # 145,145,200
    print ('label shape is: ', dataset.labels.shape)  # 145, 145

    data, labels = np.array(dataset.data), np.array(dataset.labels)
    process = HSI_preprocess(name = 'salina', dst_shape=(512, 217, 224))
    data_add_channel = process.add_channel(data)
    data_1x1 = process.scale_to1(data_add_channel)
    sio.savemat(dataset.dir + '/salina_1x1_mean.mat', {'data' : data_1x1, 'labels' : labels})
项目:Sensor-Specific-Hyperspectral-Image-Feature-Learning    作者:MeiShaohui    | 项目源码 | 文件源码
def train_aviris_10_times(label_unique, args):
    for i in range(5):
        exp_info = Config.ExpConfigInfo(name=args.data_set, label_unique=label_unique,
                                        new_dir_name=args.dst_dir,
                                        gpus=args.gpu, net_name='bn_net', exp_index=i,
                                        spatial_info=args.spatial_info, train_nums=args.train_nums)
        # set hyperparameters
        exp_info.set_data()
        exp_info.max_iter = args.max_iter
        exp_info.set_final_model()
        # train
        proto_file.set_prototxt(exp_info, exp_info.test_nums, exp_info.max_class)
        job_file = 'job_file_gpu_{}.sh'.format(exp_info.gpus)

        with open(job_file, 'w') as f:
            # f.write('cd {}\n'.format(caffe_root))
            f.write(caffe_root + '/build/tools/caffe train \\\n')
            f.write('--solver="{}" \\\n'.format(exp_info.solver_file))
            f.write('--gpu {} 2>&1 | tee {}\n'.format(exp_info.gpus, exp_info.log_file))

        subprocess.check_call('bash {}'.format(job_file), shell=True)

        test_dict = Config.get_y_pred_from_model(model=exp_info, mode='test', score_layer_name='ip2')
        train_dict = Config.get_y_pred_from_model(model=exp_info, mode='train', score_layer_name='ip2')
        test_feature = Config.get_feature_from_model(model=exp_info, mode='test', score_layer_name='ip1')
        train_feature = Config.get_feature_from_model(model=exp_info, mode='train', score_layer_name='ip1')
        sio.savemat(exp_info.result_mat_file, {'train': train_dict, 'test': test_dict, 'train_feature': train_feature,
                                               'test_feature': test_feature})
项目:Deep-subspace-clustering-networks    作者:panji1990    | 项目源码 | 文件源码
def ae_feature_clustering(CAE, X):
    CAE.restore()

    #eng = matlab.engine.start_matlab()
    #eng.addpath(r'/home/pan/workspace-eclipse/deep-subspace-clustering/SSC_ADMM_v1.1',nargout=0)
    #eng.addpath(r'/home/pan/workspace-eclipse/deep-subspace-clustering/EDSC_release',nargout=0)

    Z = CAE.transform(X)

    sio.savemat('AE_YaleB.mat', dict(Z = Z) )

    return
项目:Fluid2d    作者:pvthinker    | 项目源码 | 文件源码
def diags2mat(tempdiag,data):
    filemat = '%s.mat'%tempdiag
    print('save data into %s'%filemat)
    io.savemat(filemat,data)
项目:Math412S2017    作者:ctralie    | 项目源码 | 文件源码
def plotCirclePatches():
    (Im, P) = getCirclePatches(40, 5)
    plt.clf()
    sio.savemat("PCircle.mat", {"P":P})
    PDs = doRipsFiltration(P, 2)
    print PDs[2]
    H1 = plotDGM(PDs[1], color = np.array([1.0, 0.0, 0.2]), label = 'H1', sz = 50, axcolor = np.array([0.8]*3))
    plt.hold(True)
    H2 = plotDGM(PDs[2], color = np.array([0.43, 0.67, 0.27]), marker = 'x', sz = 50, label = 'H2', axcolor = np.array([0.8]*3))
    plt.show()
项目:Networks    作者:dencesun    | 项目源码 | 文件源码
def paper_figure2():
    """

    graph data g come from Jure Leskovec(2016) Higher-order organization of complex networks Fig 2
    :return: None

    """
    # path for mac
    # data = '/Users/dencesun/Desktop/Networks/data/C-elegans-frontal.txt'
    # path for linux
    # data = '/home/sun/Desktop/Networks-master/data/C-elegans-frontal.txt'
    data = '../data/C-elegans-frontal.txt'
    DG = HigherOrderNetwork.create_network(data)
    A = HigherOrderNetwork.create_adjacency(DG)
    W = HigherOrderNetwork.motif_bifan(DG)
    W = HigherOrderNetwork.largest_connect_component(W)
    cluster, condv, condc, order = HigherOrderNetwork.spectral_partitioning(W)

    print("\n\npaper_figure2's result")
    print("largest_connect_component's shape: ", W.shape)
    print("C-elegans's result")
    print('condc: ', condc)
    print('cluster\n', cluster)
    # save as matlab file format
    # savemat('/Users/dencesun/Desktop/Networks/data/c-elegans.mat', {'W': W})
    # save path for linux
    savemat('/home/sun/Desktop/Networks-master/data/c-elegans.mat', {'W': W})
    print('complete save motif adjacency matrix in data')
项目:HSICNN    作者:jamesbing    | 项目源码 | 文件源码
def saveParams(fileName, params):
    data = {}
    for i in xrange(len(params)):
        if i % 2 == 0:
            data['W%d' %(i/2)] = params[i].get_value()
        else:
            data['b%d' %(i/2)] = params[i].get_value()
    sio.savemat(fileName, data)
项目:HSICNN    作者:jamesbing    | 项目源码 | 文件源码
def cal_kappa(result_mat):
    kappa = 0.0
    pe = 0.0
    pa = 0.0
    data = sio.loadmat(result_mat)
    actual = data['actual'][0]
    predict = data['predict'][0]
    classes = int(max(actual))
    result_matrix = np.zeros((classes, classes))

    for i in range(len(actual)):
        hengzuobiao = int(actual[i] - 1)
        zongzuobiao = int(predict[i] - 1)
        result_matrix[hengzuobiao][zongzuobiao] = result_matrix[hengzuobiao][zongzuobiao] + 1

    sum = 0.0
    for j in range(classes):
        pe_zong = result_matrix[0:classes,j].sum()
        pe_heng = result_matrix[j,0:classes].sum()
        pe = pe + pe_zong * pe_heng
    sum = result_matrix.sum()

    for k in range(classes):
        pa = pa + result_matrix[k][k]

    pe = pe / (sum * sum)
    pa = pa / sum

    kappa = (pa - pe)/(1-pe)

    save_file = result_mat + ".mat"
    sio.savemat(save_file,{'sum_result':result_matrix})

    return kappa
项目:HSICNN    作者:jamesbing    | 项目源码 | 文件源码
def writeToMAT(trainList, testList,trainPositions, testPositions, datasetName, train_ratio, neighbors):
    DataTr, CIdTr, PositionsTr = prepareMatList(trainList, trainPositions)
    DataTe, CIdTe, PositionsTe = prepareMatList(testList, testPositions)

    ltime = time.localtime()
    time_stamp = str(ltime[0]) + "_" + str(ltime[1]) + "_" + str(ltime[2]) + "_" + str(ltime[3]) + "_" + str(ltime[4])

    folderPath = "../experiments/" + datasetName + '_' + str(neighbors) + '_' + str(train_ratio) + "_" + time_stamp + "/"
    if not os.path.exists(folderPath):
        os.makedirs(folderPath)

    realPath = folderPath + datasetName + "_" + str(neighbors) + "_" + str(train_ratio)

    sio.savemat(realPath + '.mat',{'DataTr':DataTr, 'CIdTr':CIdTr, 'PositionsTr':PositionsTr,  'DataTe':DataTe, 'CIdTe':CIdTe, 'PositionsTe':PositionsTe})
    return realPath, neighbors
项目:object-detector    作者:penny4860    | 项目源码 | 文件源码
def write(self, data, filename, write_mode="w"):
        self._check_directory(filename)        
        io.savemat(filename, data)


# Todo : staticmethod??
项目:Neural-Headline-Generator-CN    作者:QuantumLiu    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs=None):
        for k in self.params['metrics']:
            if k in logs:
                self.mesg+=(k+': '+str(logs[k])[:5]+' ')
                self.logs_epochs.setdefault(k, []).append(logs[k])
#==============================================================================
#         except:
#             itchat.auto_login(hotReload=True,enableCmdQR=True)
#             itchat.dump_login_status()
#             self.t_send(self.mesg, toUserName='filehelper')
#==============================================================================
        if epoch+1>=self.stopped_epoch:
            self.model.stop_training = True
        logs = logs or {}
        self.epoch.append(epoch)
        self.t_epochs.append(time.time()-self.t_s)
        if self.savelog:
            sio.savemat((self.fexten if self.fexten else self.validateTitle(self.localtime))+'_logs_batches'+'.mat',{'log':np.array(self.logs_batches)})
            sio.savemat((self.fexten if self.fexten else self.validateTitle(self.localtime))+'_logs_epochs'+'.mat',{'log':np.array(self.logs_epochs)})
        th.start_new_thread(self.get_fig,())
#==============================================================================
#         try:
#             itchat.send(self.mesg, toUserName='filehelper')
#         except:
#             traceback.print_exc()
#             return
#==============================================================================
        self.t_send(self.mesg, toUserName='filehelper')
        return
#==============================================================================
#         
#==============================================================================
项目:object-classification    作者:HenrYxZ    | 项目源码 | 文件源码
def save(filename, arr):
    """
    Stores a numpy array in a file.

    Args:
        filename (string): The name for the file.
        arr (numpy array):

    Returns:
        void
    """
    data = {"stored": arr}
    sio.savemat(filename, data)
项目:location_tracking_ml    作者:cybercom-finland    | 项目源码 | 文件源码
def save(file_name, variable_name, value):
    sio.savemat(file_name, {variable_name:value})
项目:location_tracking_ml    作者:cybercom-finland    | 项目源码 | 文件源码
def export_to_octave(positionTracks):
    print('Creating Octave file.')
    pos = np.asarray(map(lambda l: list(l.itervalues()), positionTracks))
    sio.savemat('tracks.mat', {'pos': pos})