我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.examples.tutorials.mnist.input_data.read_data_sets()。
def main(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Placeholder that will be fed image data. x = tf.placeholder(tf.float32, [None, 784]) # Placeholder that will be fed the correct labels. y_ = tf.placeholder(tf.float32, [None, 10]) # Define weight and bias. W = weight_variable([784, 10]) b = bias_variable([10]) # Here we define our model which utilizes the softmax regression. y = tf.nn.softmax(tf.matmul(x, W) + b) # Define our loss. cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) # Define our optimizer. train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) # Define accuracy. correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) correct_prediction = tf.cast(correct_prediction, tf.float32) accuracy = tf.reduce_mean(correct_prediction)
def get_input(self): # Input data. # Load the training, validation and test data into constants that are # attached to the graph. self.mnist = input_data.read_data_sets('data', one_hot=True, fake_data=False) # Input placehoolders with tf.name_scope('input'): self.x = tf.placeholder(tf.float32, [None, 784], name='x-input') self.y_true = tf.placeholder(tf.float32, [None, 10], name='y-input') self.keep_prob = tf.placeholder(tf.float32, name='drop_out') # below is just for the sake of visualization with tf.name_scope('input_reshape'): image_shaped_input = tf.reshape(self.x, [-1, 28, 28, 1]) tf.image_summary('input', image_shaped_input, 10) return
def do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_set): """Runs one evaluation against the full epoch of data. Args: sess: The session in which the model has been trained. eval_correct: The Tensor that returns the number of correct predictions. images_placeholder: The images placeholder. labels_placeholder: The labels placeholder. data_set: The set of images and labels to evaluate, from input_data.read_data_sets(). """ # And run one epoch of eval. true_count = 0 # Counts the number of correct predictions. steps_per_epoch = data_set.num_examples // FLAGS.batch_size num_examples = steps_per_epoch * FLAGS.batch_size for step in xrange(steps_per_epoch): feed_dict = fill_feed_dict(data_set, images_placeholder, labels_placeholder) true_count += sess.run(eval_correct, feed_dict=feed_dict) precision = true_count / num_examples print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' % (num_examples, true_count, precision))
def __init__(self, args): """ Initialize the DataLoader :param args: all kinds of argument """ self.data_dir = MNIST_PATH from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets(self.data_dir, one_hot=True) self.X_train = mnist.train.images self.y_train = mnist.train.labels self.X_val = mnist.test.images self.y_val = mnist.test.labels print(self.X_train[0].shape) print(self.y_train[0].shape) print(self.X_val[0].shape) print(self.y_val[0].shape)
def main(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Placeholder that will be fed image data. x = tf.placeholder(tf.float32, [None, 784]) # Placeholder that will be fed the correct labels. y_ = tf.placeholder(tf.float32, [None, 10]) # Define weight and bias. W = weight_variable([784, 10]) b = bias_variable([10]) # Here we define our model which utilizes the softmax regression. y = tf.nn.softmax(tf.matmul(x, W) + b) # Define our loss. cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) # Define our optimizer. train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
def setUp(self): # Set up model tf.reset_default_graph() X = tf.placeholder(tf.float32, shape=[None, 784]) y = tf.placeholder(tf.float32, shape=[None, 10]) W_fc1 = weight_variable([784, 1024]) b_fc1 = bias_variable([1024]) h_fc1 = tf.nn.relu(tf.matmul(X, W_fc1) + b_fc1) W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) h_fc2 = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2) losses = -tf.reduce_sum(y*tf.log(h_fc2), reduction_indices=[1]) self.loss = tf.reduce_mean(losses) self.batch_size = tf.cast(tf.gather(tf.shape(losses), 0), tf.float32) self.var_list = [W_fc1, b_fc1, W_fc2, b_fc2] self.X = X self.y = y self.sess = tf.Session() self.sess.run(tf.initialize_all_variables()) self.mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
def __init__(self): # mnist??? self.mnist = input_data.read_data_sets('mnist/', one_hot=True) # ???? self.img_size = self.mnist.train.images[0].shape[0] # ?????????? self.batch_size = 64 # ?????? self.chunk_size = self.mnist.train.num_examples // self.batch_size # ?????? self.epoch_size = 300 # ????? self.sample_size = 25 # ??????????? self.units_size = 128 # ??? self.learning_rate = 0.001 # ???? self.smooth = 0.1
def mlp_mnist(): """test MLP with MNIST data and Sequential """ from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('/tmp/data', one_hot=True) training_data = np.array([image.flatten() for image in mnist.train.images]) training_label = mnist.train.labels valid_data = np.array([image.flatten() for image in mnist.validation.images]) valid_label = mnist.validation.labels input_dim = training_data.shape[1] label_size = training_label.shape[1] model = Sequential() model.add(Input(input_shape=(input_dim, ))) model.add(Dense(300, activator='selu')) model.add(Dropout(0.2)) model.add(Softmax(label_size)) model.compile('CCE', optimizer=SGD()) model.fit(training_data, training_label, validation_data=(valid_data, valid_label))
def cnn_mnist(): """test CNN with MNIST data and Sequential """ from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('/tmp/data', one_hot=True) training_data = np.array([image.reshape(28, 28, 1) for image in mnist.train.images]) training_label = mnist.train.labels valid_data = np.array([image.reshape(28, 28, 1) for image in mnist.validation.images]) valid_label = mnist.validation.labels label_size = training_label.shape[1] model =Sequential() model.add(Input(batch_input_shape=(None, 28, 28, 1))) model.add(Conv2d((3, 3), 1, activator='selu')) model.add(AvgPooling((2, 2), stride=2)) model.add(Conv2d((4, 4), 2, activator='selu')) model.add(AvgPooling((2, 2), stride=2)) model.add(Flatten()) model.add(Softmax(label_size)) model.compile('CCE', optimizer=SGD(lr=1e-2)) model.fit(training_data, training_label, validation_data=(valid_data, valid_label), verbose=2)
def model_mlp_mnist(): """test MLP with MNIST data and Model """ from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('/tmp/data', one_hot=True) training_data = np.array([image.flatten() for image in mnist.train.images]) training_label = mnist.train.labels valid_data = np.array([image.flatten() for image in mnist.validation.images]) valid_label = mnist.validation.labels input_dim = training_data.shape[1] label_size = training_label.shape[1] dense_1 = Dense(300, input_dim=input_dim, activator=None) dense_2 = Activation('selu')(dense_1) dropout_1 = Dropout(0.2)(dense_2) softmax_1 = Softmax(label_size)(dropout_1) model = Model(dense_1, softmax_1) model.compile('CCE', optimizer=Adadelta()) model.fit(training_data, training_label, validation_data=(valid_data, valid_label))
def create_dictionary_dl(lmbd, K=100, N=10000, dir_mnist='save_exp/mnist'): import os.path as osp fname = osp.join(dir_mnist, "D_mnist_K{}_lmbd{}.npy".format(K, lmbd)) if osp.exists(fname): D = np.load(fname) else: from sklearn.decomposition import DictionaryLearning mnist = input_data.read_data_sets('MNIST_data', one_hot=True) im = mnist.train.next_batch(N)[0] im = im.reshape(N, 28, 28) im = [imresize(a, (17, 17), interp='bilinear', mode='L')-.5 for a in im] X = np.array(im).reshape(N, -1) print(X.shape) dl = DictionaryLearning(K, alpha=lmbd*N, fit_algorithm='cd', n_jobs=-1, verbose=1) dl.fit(X) D = dl.components_.reshape(K, -1) np.save(fname, D) return D
def fill_feed_dict(data_set, images_pl, labels_pl): """Fills the feed_dict for training the given step. A feed_dict takes the form of: feed_dict = { <placeholder>: <tensor of values to be passed for placeholder>, .... } Args: data_set: The set of images and labels, from input_data.read_data_sets() images_pl: The images placeholder, from placeholder_inputs(). labels_pl: The labels placeholder, from placeholder_inputs(). Returns: feed_dict: The feed dictionary mapping from placeholders to values. """ # Create the feed_dict for the placeholders filled with the next # `batch size ` examples. images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size, FLAGS.fake_data) feed_dict = { images_pl: images_feed, labels_pl: labels_feed, } return feed_dict
def main(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) sess = tf.Session() batch_size = cfg.BATCH_SIZE parameter_path = cfg.PARAMETER_FILE lenet = Lenet() max_iter = cfg.MAX_ITER saver = tf.train.Saver() if os.path.exists(parameter_path): saver.restore(parameter_path) else: sess.run(tf.initialize_all_variables()) for i in range(max_iter): batch = mnist.train.next_batch(50) if i % 100 == 0: train_accuracy = sess.run(lenet.train_accuracy,feed_dict={ lenet.raw_input_image: batch[0],lenet.raw_input_label: batch[1] }) print("step %d, training accuracy %g" % (i, train_accuracy)) sess.run(lenet.train_op,feed_dict={lenet.raw_input_image: batch[0],lenet.raw_input_label: batch[1]}) save_path = saver.save(sess, parameter_path)
def mnist_batches(batch_size, size, num_preprocess_threads=1, is_training=True, data_count=55000): current_dir = os.path.dirname(os.path.abspath(__file__)) mnist_data_dir = os.path.join(current_dir, '../../MNIST-data') from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets(mnist_data_dir, one_hot=True) data, label = mnist.train.next_batch(data_count) data = data.reshape(data_count, 28, 28, 1) data_queue = tf.train.input_producer(data, shuffle=False, element_shape=(28, 28, 1), capacity=batch_size * 3) dequeued_image = data_queue.dequeue() dequeued_image = resize_image(dequeued_image, None, is_training, size, channels=1) label_queue = tf.train.input_producer(label, shuffle=False, element_shape=(10, ), capacity=batch_size * 3) return tf.train.batch( [dequeued_image, label_queue.dequeue()], batch_size=batch_size, capacity=batch_size * 3)
def __init__(self): self.img_rows = 28 self.img_cols = 28 self.channel = 1 #(XX_train, YY_train),(X_test, Y_test) = mnist.load_data() print "111111" trainData, trainLabels = loadData('./mnisttrain',1000) self.x_train = trainData #self.x_train = XX_train #self.x_train = input_data.read_data_sets("mnist",\ # one_hot=True).train.images print "222222" self.x_train = self.x_train.reshape(-1, self.img_rows,\ self.img_cols, 1).astype(np.float32) print "333333" self.DCGAN = DCGAN() self.discriminator = self.DCGAN.discriminator_model() self.adversarial = self.DCGAN.adversarial_model() self.generator = self.DCGAN.generator()
def worker_task(ps, batch_size=50): # Download MNIST. mnist = input_data.read_data_sets("MNIST_data", one_hot=True) # Initialize the model. net = model.SimpleCNN() keys = net.get_weights()[0] while True: # Get the current weights from the parameter server. weights = ray.get(ps.pull.remote(keys)) net.set_weights(keys, weights) # Compute an update and push it to the parameter server. xs, ys = mnist.train.next_batch(batch_size) gradients = net.compute_update(xs, ys) ps.push.remote(keys, gradients)
def evaluate(): """Eval MNIST for a number of steps.""" with tf.Graph().as_default() as g: # Get images and labels for MNIST. mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False) images = mnist.test.images labels = mnist.test.labels # Build a Graph that computes the logits predictions from the # inference model. logits = model.inference(images, keep_prob=1.0) # Calculate predictions. top_k_op = tf.nn.in_top_k(predictions=logits, targets=labels, k=1) # Create saver to restore the learned variables for eval. saver = tf.train.Saver() eval_once(saver, top_k_op)
def MNIST(one_hot=True, split=[1.0, 0.0, 0.0]): """Returns the MNIST dataset. Returns ------- mnist : DataSet DataSet object w/ convenienve props for accessing train/validation/test sets and batches. """ ds = input_data.read_data_sets('MNIST_data/', one_hot=one_hot) return Dataset(np.r_[ds.train.images, ds.validation.images, ds.test.images], np.r_[ds.train.labels, ds.validation.labels, ds.test.labels], split=split)
def train(self): # Import training data mnist = input_data.read_data_sets('/app/MNIST_data/', one_hot=True) # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=self.y)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) tf.global_variables_initializer().run() # Train for _ in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) self.sess.run(train_step, feed_dict={self.x: batch_xs, y_: batch_ys}) # Test trained model correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(self.sess.run(accuracy, feed_dict={self.x: mnist.test.images, y_: mnist.test.labels}))
def get_sequential_mnist_batch_dict_generator(in_pl, hid_pl, eps_z, pd, data_dir='data/mnist/', stage='train'): """ generator loads tensorflow's version of the mnist data set and fills placeholders with data/noise/zeros for training """ if stage == 'train': data = input_data.read_data_sets(data_dir).train else: data = input_data.read_data_sets(data_dir).validation d = {} while True: x = np.reshape(data.next_batch(pd['batch_size'])[0], (pd['batch_size'], 28, 28)) d[in_pl] = np.transpose(x, (1, 0, 2)) d[hid_pl] = np.zeros((pd['batch_size'], pd['hid_state_size'])) d[eps_z] = np.random.normal(size=(pd['seq_length'], pd['batch_size'], pd['z_dim'])) yield d
def load_data(path,shuffle=False): mnist = input_data.read_data_sets(path, one_hot=True) trainX = mnist.train.images # ndarray trainY = mnist.train.labels trainY = trainY.astype('float32') valX = mnist.validation.images valY = mnist.validation.labels valY = valY.astype('float32') testX = mnist.test.images testY = mnist.test.labels testY = testY.astype('float32') if shuffle: r = np.random.permutation(len(trainY)) trainX = trainX[r,:] trainY = trainY[r,:] r = np.random.permutation(len(valY)) valX = valX[r,:] valY = valY[r,:] r = np.random.permutation(len(testY)) testX = testX[r,:] testY = testY[r,:] return trainX, trainY, valX, valY, testX, testY
def main(_): config = FLAGS # TODO: hardcoded for mnist mnist = input_data.read_data_sets("MNIST_data", one_hot=True) config.x_dim = 784 with tf.Session() as sess: vae = VAE(config, sess, get_model_dir(config, ["test", "sample", "sample_manifold", "batch_size"])) if config.sample: vae.load() sample_image(vae) elif config.sample_manifold: vae.load() sample_manifold2d(vae, 20) elif config.test: vae.load() vae.test(mnist) else: vae.train(mnist)
def get_dataset(data_dir, preprocess_fcn=None, dtype=tf.float32, reshape=True): """Construct a DataSet. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`. `reshape` Convert shape from [num examples, rows, columns, depth] to [num examples, rows*columns] (assuming depth == 1) """ from tensorflow.examples.tutorials.mnist import input_data datasets = input_data.read_data_sets(data_dir, dtype=dtype, reshape=reshape) if preprocess_fcn is not None: train = _preprocess_dataset(datasets.train, preprocess_fcn, dtype, reshape) validation = _preprocess_dataset(datasets.validation, preprocess_fcn, dtype, reshape) test = _preprocess_dataset(datasets.test, preprocess_fcn, dtype, reshape) else: train = datasets.train validation = datasets.validation test = datasets.test height, width, channels = 28, 28, 1 return Datasets(train, validation, test, height, width, channels)
def main(*args): dataset_dir = "../data/mnist-tf" mnist = input_data.read_data_sets(dataset_dir) x_train = mnist.train.images.astype('float32') / 255. x_eval = mnist.validation.images.astype('float32') / 255. x_test = mnist.test.images.astype('float32') / 255. x_train = x_train.reshape((len(x_train), total_tensor_depth(x_train))) x_eval = x_eval.reshape((len(x_eval), total_tensor_depth(x_eval))) x_test = x_test.reshape((len(x_test), total_tensor_depth(x_test))) xp = experiment_fn("/tmp/polyaxon_logs/vae_mnist", {'images': x_train}, mnist.train.labels, {'images': x_eval}, mnist.validation.labels) xp.continuous_train_and_eval() encode(xp.estimator, x_test, mnist.test.labels) generate(xp.estimator)
def __init__(self, one_hot=True, is_flat=True, resize_dims=None, convert_to_rgb=False): self.mnist = input_data.read_data_sets('MNIST_data', one_hot=one_hot) self.one_hot = one_hot self.number = 99997 # XXX self.num_examples = self.mnist.test._num_examples # return images in [batch, row, col] if not is_flat: self.mnist = MNIST_Number._unflatten_mnist(self.mnist) # resizes images if resize_dims tuple is provided if resize_dims is not None: self.mnist = MNIST_Number.resize_mnist(self.mnist, resize_dims) # tile images as [img, img, img] if convert_to_rgb: self.mnist = MNIST_Number.bw_to_rgb_mnist(self.mnist)
def loadMNIST(fname): if not os.path.exists(fname): # download and preprocess MNIST dataset from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/",one_hot=True) trainData,validData,testData = {},{},{} trainData["image"] = mnist.train.images.reshape([-1,28,28]).astype(np.float32) validData["image"] = mnist.validation.images.reshape([-1,28,28]).astype(np.float32) testData["image"] = mnist.test.images.reshape([-1,28,28]).astype(np.float32) trainData["label"] = np.argmax(mnist.train.labels.astype(np.float32),axis=1) validData["label"] = np.argmax(mnist.validation.labels.astype(np.float32),axis=1) testData["label"] = np.argmax(mnist.test.labels.astype(np.float32),axis=1) os.makedirs(os.path.dirname(fname)) np.savez(fname,train=trainData,valid=validData,test=testData) os.system("rm -rf MNIST_data") MNIST = np.load(fname) trainData = MNIST["train"].item() validData = MNIST["valid"].item() testData = MNIST["test"].item() return trainData,validData,testData # generate training batch
def fill_feed_dict(data_set, images_pl, labels_pl, batch_size): """Fills the feed_dict for training the given step. Args: data_set: The set of images and labels, from input_data.read_data_sets() images_pl: The images placeholder, from placeholder_inputs(). labels_pl: The labels placeholder, from placeholder_inputs(). batch_size: Batch size of data to feed. Returns: feed_dict: The feed dictionary mapping from placeholders to values. """ # Create the feed_dict for the placeholders filled with the next # `batch size ` examples. images_feed, labels_feed = data_set.next_batch(batch_size, FLAGS.fake_data) feed_dict = { images_pl: images_feed, labels_pl: labels_feed, } return feed_dict
def get_mnist_data(data_path=data_path): from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets(data_path+'MNIST_data/', one_hot=False) X, Y = mnist.train.next_batch(mnist.train.num_examples) Xval, Yval = mnist.validation.next_batch(mnist.validation.num_examples) Xtest, Ytest = mnist.test.next_batch(mnist.test.num_examples) Y, Yval, Ytest = [np.array(y, dtype=int) for y in [Y, Yval, Ytest]] X = np.concatenate([X, Xval], 0) Y = np.concatenate([Y, Yval], 0) return X, Y, Xtest, Ytest
def load_mnist(data_dir, flatten=False, one_hot=True, normalize_range=False): from tensorflow.examples.tutorials.mnist import input_data # print data_dir mnist = input_data.read_data_sets(data_dir, one_hot=one_hot, reshape=flatten) def _extract_fn(x): X = x.images y = x.labels if not normalize_range: X *= 255.0 return (X, y) Xtrain, ytrain = _extract_fn(mnist.train) Xval, yval = _extract_fn(mnist.validation) Xtest, ytest = _extract_fn(mnist.test) return (Xtrain, ytrain, Xval, yval, Xtest, ytest)
def __load_mnist(self, n_classes, cnt_per_class, working_directory, batch_size): data_directory = os.path.join(working_directory, "MNIST") if not os.path.exists(data_directory): os.makedirs(data_directory) self.mnist = input_data.read_data_sets(data_directory, one_hot=True) # labelled_cnt = cnt_per_class * n_classes # How many labelled data points will we use for the semi-supervised learning process? self.X_train_few, self.y_train_few = extract_few_mnist_labels(cnt_per_class, n_classes, batch_size, self.mnist)
def data_mnist(datadir='/tmp/', train_start=0, train_end=60000, test_start=0, test_end=10000): """ Load and preprocess MNIST dataset :param datadir: path to folder where data should be stored :param train_start: index of first training set example :param train_end: index of last training set example :param test_start: index of first test set example :param test_end: index of last test set example :return: tuple of four arrays containing training data, training labels, testing data and testing labels. """ assert isinstance(train_start, int) assert isinstance(train_end, int) assert isinstance(test_start, int) assert isinstance(test_end, int) from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets(datadir, one_hot=True, reshape=False) X_train = np.vstack((mnist.train.images, mnist.validation.images)) Y_train = np.vstack((mnist.train.labels, mnist.validation.labels)) X_test = mnist.test.images Y_test = mnist.test.labels X_train = X_train[train_start:train_end] Y_train = Y_train[train_start:train_end] X_test = X_test[test_start:test_end] Y_test = Y_test[test_start:test_end] print('X_train shape:', X_train.shape) print('X_test shape:', X_test.shape) return X_train, Y_train, X_test, Y_test
def fill_feed_dict(data_set, images_pl, labels_pl): """Fills the feed_dict for training the given step. A feed_dict takes the form of: feed_dict = { <placeholder>: <tensor of values to be passed for placeholder>, .... } Args: data_set: The set of images and labels, from input_data.read_data_sets() images_pl: The images placeholder, from placeholder_inputs(). labels_pl: The labels placeholder, from placeholder_inputs(). Returns: feed_dict: The feed dictionary mapping from placeholders to values. """ # Create the feed_dict for the placeholders filled with the next # `batch size` examples. images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size, FLAGS.fake_data) feed_dict = { images_pl: images_feed, labels_pl: labels_feed, } return feed_dict
def __init__(self, batch_size=128, reshape=False, one_hot=False): # load sg_data set data_set = input_data.read_data_sets(Mnist._data_dir, reshape=reshape, one_hot=one_hot) self.batch_size = batch_size # save each sg_data set _train = data_set.train _valid = data_set.validation _test = data_set.test # member initialize self.train, self.valid, self.test = tf.sg_opt(), tf.sg_opt, tf.sg_opt() # convert to tensor queue self.train.image, self.train.label = \ _data_to_tensor([_train.images, _train.labels.astype('int32')], batch_size, name='train') self.valid.image, self.valid.label = \ _data_to_tensor([_valid.images, _valid.labels.astype('int32')], batch_size, name='valid') self.test.image, self.test.label = \ _data_to_tensor([_test.images, _test.labels.astype('int32')], batch_size, name='test') # calc total batch count self.train.num_batch = _train.labels.shape[0] // batch_size self.valid.num_batch = _valid.labels.shape[0] // batch_size self.test.num_batch = _test.labels.shape[0] // batch_size
def __init__(self, batch_size): from tensorflow.examples.tutorials.mnist import input_data self.mnist = input_data.read_data_sets('MNIST_data', one_hot=True) self.x = tf.placeholder(tf.float32, shape=[batch_size, 784]) self.feed_y = tf.placeholder(tf.float32, shape=[batch_size, 10]) self.y = ((2*self.feed_y)-1)
def test_saver(): from tensorflow.examples.tutorials.mnist import input_data mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True) x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) # cW = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.nn.softmax(tf.matmul(x, W) + b) y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) for i in range(1000): batch_xs, batch_ys = mnist_data.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saved_path = '/tmp/tensor_saved_test' with sess.as_default(): saver = tf.train.Saver() saver.save(sess, saved_path) saved_meta_path = saved_path + '.meta' sess2 = tf.Session() sess2.as_default() restorer = tf.train.import_meta_graph(saved_meta_path) restorer.restore(sess2, saved_path) print(sess2.run(accuracy, feed_dict={x: mnist_data.test.images, y_: mnist_data.test.labels})) saver = tf.train.Saver() # saver.save(sess2, saved_path)
def __init__(self): mnist = input_data.read_data_sets("./data", one_hot=True) self.test_ims, self.test_labels = mnist.test.next_batch(10000) self.test_ims = self.test_ims.reshape((10000, 1, 28, 28))
def main(_): mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) # GOLANG note that we must label the input-tensor! x = tf.placeholder(tf.float32, [None, 784], name="imageinput") W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.add(tf.matmul(x, W) , b) y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) sess = tf.InteractiveSession() tf.global_variables_initializer().run() # Train for _ in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) # GOLANG note that we must label the infer-operation!! infer = tf.argmax(y,1, name="infer") correct_prediction = tf.equal(infer, tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) builder = tf.saved_model.builder.SavedModelBuilder("mnistmodel") # GOLANG note that we must tag our model so that we can retrieve it at inference-time builder.add_meta_graph_and_variables(sess,["serve"]) builder.save()
def load_data(): """ Download MNIST data from TensorFlow package """ mnist = input_data.read_data_sets("MNIST_data", one_hot=True) train_data = mnist.train.images test_data = mnist.test.images valid_data = mnist.validation.images train_label = mnist.train.labels test_label = mnist.test.labels valid_label = mnist.validation.labels all_data = [train_data, test_data, valid_data] all_labels = [train_label, test_label, valid_label] return all_data, all_labels
def main(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
def main(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Placeholder that will be fed image data. x = tf.placeholder(tf.float32, [None, 784]) # Placeholder that will be fed the correct labels. y_ = tf.placeholder(tf.float32, [None, 10]) # Define weight and bias. W = weight_variable([784, 10]) b = bias_variable([10]) # Here we define our model which utilizes the softmax regression. y = tf.nn.softmax(tf.matmul(x, W) + b) # Define our loss. cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) # Define our optimizer. train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) # Define accuracy. correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) correct_prediction = tf.cast(correct_prediction, tf.float32) accuracy = tf.reduce_mean(correct_prediction) # Launch session. sess = tf.InteractiveSession() # Do the training. for i in range(1100): batch = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch[0], y_: batch[1]}) # See how model did. print("Test Accuracy %g" % sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
def main(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Placeholder that will be fed image data. x = tf.placeholder(tf.float32, [None, 784]) # Placeholder that will be fed the correct labels. y_ = tf.placeholder(tf.float32, [None, 10])
def main(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Placeholder that will be fed image data. x = tf.placeholder(tf.float32, [None, 784]) # Placeholder that will be fed the correct labels. y_ = tf.placeholder(tf.float32, [None, 10]) # Define weight and bias. W = weight_variable([784, 10]) b = bias_variable([10]) # Here we define our model which utilizes the softmax regression. y = tf.nn.softmax(tf.matmul(x, W) + b) # Define our loss. cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) # Define our optimizer. train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) # Define accuracy. correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) correct_prediction = tf.cast(correct_prediction, tf.float32) accuracy = tf.reduce_mean(correct_prediction) # Launch session. sess = tf.InteractiveSession() # Initialize variables. tf.global_variables_initializer().run() # Do the training. for i in range(1100): batch = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch[0], y_: batch[1]}) # See how model did. print("Test Accuracy %g" % sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
def mnist_mlp_ns(args): # write tensorflow models x = ns.placeholder(np.float32, [args.batch_size, 784]) t = ns.placeholder(np.float32, [args.batch_size, 10]) w = ns.Variable(np.zeros([784, 10])) b = ns.Variable(np.zeros([10])) y = ns.add(ns.matmul(x, w), b) cost = ns.reduce_mean(-ns.reduce_sum(ns.multiply(t, ns.log(ns.softmax(y))), axis=[1])) # transformer and computations with ExecutorFactory() as ex: updates = CommonSGDOptimizer(args.lrate).minimize(cost, cost.variables()) train_comp = ex.executor(ng.sequential([updates, cost]), x, t) ex.transformer.initialize() # train if args.random_data is not None: mnist = args.random_data mnist.reset(0) else: mnist = input_data.read_data_sets(args.data_dir, one_hot=True) ng_cost_vals = [] for idx in range(args.max_iter): batch_xs, batch_ys = mnist.train.next_batch(args.batch_size) cost_val = train_comp(batch_xs, batch_ys) ng_cost_vals.append(float(cost_val)) print("[Iter %s] Cost = %s" % (idx, cost_val)) return ng_cost_vals
def mnist_mlp_tf(args): # write tensorflow models x = tf.placeholder(tf.float32, [args.batch_size, 784]) t = tf.placeholder(tf.float32, [args.batch_size, 10]) w = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.matmul(x, w) + b cost = tf.reduce_mean(-tf.reduce_sum(tf.multiply(t, tf.log(tf.nn.softmax(y))), axis=[1])) init = tf.global_variables_initializer() # train in tensorflow as comparison with tf.Session() as sess: # train in tensorflow train_step = tf.train.GradientDescentOptimizer(args.lrate).minimize( cost) sess.run(init) if args.random_data is not None: mnist = args.random_data mnist.reset(0) else: mnist = input_data.read_data_sets(args.data_dir, one_hot=True) tf_cost_vals = [] for idx in range(args.max_iter): batch_xs, batch_ys = mnist.train.next_batch(args.batch_size) cost_val, _ = sess.run([cost, train_step], feed_dict={x: batch_xs, t: batch_ys}) tf_cost_vals.append(float(cost_val)) print("[Iter %s] Cost = %s" % (idx, cost_val)) return tf_cost_vals
def load_data(name, random_labels=False): """Load the data name - the name of the dataset random_labels - True if we want to return random labels to the dataset return object with data and labels""" print ('Loading Data...') C = type('type_C', (object,), {}) data_sets = C() if name.split('/')[-1] == 'MNIST': data_sets_temp = input_data.read_data_sets(os.path.dirname(sys.argv[0]) + "/data/MNIST_data/", one_hot=True) data_sets.data = np.concatenate((data_sets_temp.train.images, data_sets_temp.test.images), axis=0) data_sets.labels = np.concatenate((data_sets_temp.train.labels, data_sets_temp.test.labels), axis=0) else: d = sio.loadmat(os.path.join(os.path.dirname(sys.argv[0]), name + '.mat')) F = d['F'] y = d['y'] C = type('type_C', (object,), {}) data_sets = C() data_sets.data = F data_sets.labels = np.squeeze(np.concatenate((y[None, :], 1 - y[None, :]), axis=0).T) # If we want to assign random labels to the data if random_labels: labels = np.zeros(data_sets.labels.shape) labels_index = np.random.randint(low=0, high=labels.shape[1], size=labels.shape[0]) labels[np.arange(len(labels)), labels_index] = 1 data_sets.labels = labels return data_sets
def do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_set): """Runs one evaluation against the full epoch of data. Args: sess: The session in which the model has been trained. eval_correct: The Tensor that returns the number of correct predictions. images_placeholder: The images placeholder. labels_placeholder: The labels placeholder. data_set: The set of images and labels to evaluate, from input_data.read_data_sets(). """ # And run one epoch of eval. true_count = 0 # Counts the number of correct predictions. steps_per_epoch = data_set.num_examples // FLAGS.batch_size num_examples = steps_per_epoch * FLAGS.batch_size for step in xrange(steps_per_epoch): feed_dict = fill_feed_dict(data_set, images_placeholder, labels_placeholder) true_count += sess.run(eval_correct, feed_dict=feed_dict) precision = float(true_count) / num_examples sess.run(precision) print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' % (num_examples, true_count, precision))