我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用utils.GetListOfFeatureNamesAndSizes()。
def main(unused_argv): logging.set_verbosity(tf.logging.INFO) # convert feature_names and feature_sizes to lists of values feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( FLAGS.feature_names, FLAGS.feature_sizes) if FLAGS.frame_features: reader = readers.YT8MFrameFeatureReader(feature_names=feature_names, feature_sizes=feature_sizes) else: reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names, feature_sizes=feature_sizes) if FLAGS.output_file is "": raise ValueError("'output_file' was not specified. " "Unable to continue with inference.") if FLAGS.input_data_pattern is "": raise ValueError("'input_data_pattern' was not specified. " "Unable to continue with inference.") inference(reader, FLAGS.train_dir, FLAGS.input_data_pattern, FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k)
def main(unused_argv): logging.set_verbosity(tf.logging.INFO) # convert feature_names and feature_sizes to lists of values feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( FLAGS.feature_names, FLAGS.feature_sizes) if FLAGS.frame_features: reader = readers.YT8MFrameFeatureReader(feature_names=feature_names, feature_sizes=feature_sizes) else: reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names, feature_sizes=feature_sizes) if FLAGS.output_dir is "": raise ValueError("'output_dir' was not specified. " "Unable to continue with inference.") if FLAGS.input_data_pattern is "": raise ValueError("'input_data_pattern' was not specified. " "Unable to continue with inference.") inference(reader, FLAGS.model_checkpoint_path, FLAGS.input_data_pattern, FLAGS.output_dir, FLAGS.batch_size, FLAGS.top_k)
def main(unused_argv): logging.set_verbosity(tf.logging.INFO) # convert feature_names and feature_sizes to lists of values feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( FLAGS.feature_names, FLAGS.feature_sizes) if FLAGS.frame_features: reader = readers.YT8MFrameFeatureReader(feature_names=feature_names, feature_sizes=feature_sizes) else: reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names, feature_sizes=feature_sizes) if FLAGS.output_file is "": raise ValueError("'output_file' was not specified. " "Unable to continue with inference.") if FLAGS.input_data_pattern is "": raise ValueError("'input_data_pattern' was not specified. " "Unable to continue with inference.") inference(reader, FLAGS.checkpoint_file, FLAGS.train_dir, FLAGS.input_data_pattern, FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k)
def get_reader(): # Convert feature_names and feature_sizes to lists of values. feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( FLAGS.feature_names, FLAGS.feature_sizes) if FLAGS.frame_features: reader = readers.YT8MFrameFeatureReader( num_classes = FLAGS.truncated_num_classes, decode_zlib = FLAGS.decode_zlib, feature_names=feature_names, feature_sizes=feature_sizes) else: reader = readers.YT8MAggregatedFeatureReader( num_classes = FLAGS.truncated_num_classes, decode_zlib = FLAGS.decode_zlib, feature_names=feature_names, feature_sizes=feature_sizes) return reader
def get_reader(): # Convert feature_names and feature_sizes to lists of values. feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( FLAGS.feature_names, FLAGS.feature_sizes) if FLAGS.frame_features: reader = readers.YT8MFrameFeatureReader( num_classes = FLAGS.truncated_num_classes, feature_names=feature_names, feature_sizes=feature_sizes) else: reader = readers.YT8MAggregatedFeatureReader( num_classes = FLAGS.truncated_num_classes, decode_zlib = FLAGS.decode_zlib, feature_names=feature_names, feature_sizes=feature_sizes, feature_calcs=FLAGS.c_vars, feature_remove=FLAGS.r_vars) return reader
def get_reader(): # Convert feature_names and feature_sizes to lists of values. feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( FLAGS.feature_names, FLAGS.feature_sizes) if FLAGS.frame_features: reader = readers.YT8MFrameFeatureReader( feature_names=feature_names, feature_sizes=feature_sizes) else: reader = readers.YT8MAggregatedFeatureReader( feature_names=feature_names, feature_sizes=feature_sizes) return reader ############################################################ #By Dalong???????????????????????? #??? # ############################################################
def build_model(self): """Find the model and build the graph.""" # Convert feature_names and feature_sizes to lists of values. feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( FLAGS.feature_names, FLAGS.feature_sizes) if FLAGS.frame_features: if FLAGS.frame_only: reader = readers.YT8MFrameFeatureOnlyReader( feature_names=feature_names, feature_sizes=feature_sizes) else: reader = readers.YT8MFrameFeatureReader( feature_names=feature_names, feature_sizes=feature_sizes) else: reader = readers.YT8MAggregatedFeatureReader( feature_names=feature_names, feature_sizes=feature_sizes) # Find the model. model = find_class_by_name(FLAGS.model, [labels_embedding])() label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses_embedding])() optimizer_class = find_class_by_name(FLAGS.optimizer, [tf.train]) build_graph(reader=reader, model=model, optimizer_class=optimizer_class, clip_gradient_norm=FLAGS.clip_gradient_norm, train_data_pattern=FLAGS.train_data_pattern, label_loss_fn=label_loss_fn, base_learning_rate=FLAGS.base_learning_rate, learning_rate_decay=FLAGS.learning_rate_decay, learning_rate_decay_examples=FLAGS.learning_rate_decay_examples, regularization_penalty=FLAGS.regularization_penalty, num_readers=FLAGS.num_readers, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs) logging.info("%s: Built graph.", task_as_string(self.task)) return tf.train.Saver(max_to_keep=2, keep_checkpoint_every_n_hours=0.25)
def lstmoutput(self, model_input, vocab_size, num_frames): number_of_layers = FLAGS.lstm_layers lstm_sizes = map(int, FLAGS.lstm_cells.split(",")) feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( FLAGS.feature_names, FLAGS.feature_sizes) sub_inputs = [tf.nn.l2_normalize(x, dim=2) for x in tf.split(model_input, feature_sizes, axis = 2)] assert len(lstm_sizes) == len(feature_sizes), \ "length of lstm_sizes (={}) != length of feature_sizes (={})".format( \ len(lstm_sizes), len(feature_sizes)) outputs = [] for i in xrange(len(feature_sizes)): with tf.variable_scope("RNN%d" % i): sub_input = sub_inputs[i] lstm_size = lstm_sizes[i] ## Batch normalize the input stacked_lstm = tf.contrib.rnn.MultiRNNCell( [ tf.contrib.rnn.BasicLSTMCell( lstm_size, forget_bias=1.0, state_is_tuple=True) for _ in range(number_of_layers) ], state_is_tuple=True) output, state = tf.nn.dynamic_rnn(stacked_lstm, sub_input, sequence_length=num_frames, swap_memory=FLAGS.rnn_swap_memory, dtype=tf.float32) outputs.append(output) # concat final_output = tf.concat(outputs, axis=2) return final_output
def lstm(self, model_input, vocab_size, num_frames, sub_scope="", **unused_params): number_of_layers = FLAGS.lstm_layers lstm_sizes = map(int, FLAGS.lstm_cells.split(",")) feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( FLAGS.feature_names, FLAGS.feature_sizes) sub_inputs = [tf.nn.l2_normalize(x, dim=2) for x in tf.split(model_input, feature_sizes, axis = 2)] assert len(lstm_sizes) == len(feature_sizes), \ "length of lstm_sizes (={}) != length of feature_sizes (={})".format( \ len(lstm_sizes), len(feature_sizes)) states = [] for i in xrange(len(feature_sizes)): with tf.variable_scope(sub_scope+"RNN%d" % i): sub_input = sub_inputs[i] lstm_size = lstm_sizes[i] ## Batch normalize the input stacked_lstm = tf.contrib.rnn.MultiRNNCell( [ tf.contrib.rnn.BasicLSTMCell( lstm_size, forget_bias=1.0, state_is_tuple=True) for _ in range(number_of_layers) ], state_is_tuple=True) output, state = tf.nn.dynamic_rnn(stacked_lstm, sub_input, sequence_length=num_frames, swap_memory=FLAGS.rnn_swap_memory, dtype=tf.float32) states.extend(map(lambda x: x.c, state)) final_state = tf.concat(states, axis = 1) return final_state
def main(unused_argv): logging.set_verbosity(tf.logging.INFO) # convert feature_names and feature_sizes to lists of values feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( FLAGS.feature_names, FLAGS.feature_sizes) if FLAGS.frame_features: reader = readers.YT8MFrameFeatureReader(feature_names=feature_names, feature_sizes=feature_sizes) else: reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names, feature_sizes=feature_sizes) if FLAGS.output_file is "": raise ValueError("'output_file' was not specified. " "Unable to continue with inference.") if FLAGS.input_data_pattern is "": raise ValueError("'input_data_pattern' was not specified. " "Unable to continue with inference.") model = find_class_by_name(FLAGS.model, [frame_level_models, video_level_models])() transformer_fn = find_class_by_name(FLAGS.feature_transformer, [feature_transform]) build_graph(reader, model, input_data_pattern=FLAGS.input_data_pattern, batch_size=FLAGS.batch_size, transformer_class=transformer_fn) saver = tf.train.Saver(max_to_keep=3, keep_checkpoint_every_n_hours=10000000000) inference(saver, FLAGS.train_dir, FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k)
def get_reader(): # Convert feature_names and feature_sizes to lists of values. feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( FLAGS.feature_names, FLAGS.feature_sizes) if FLAGS.frame_features: reader = readers.YT8MFrameFeatureReader( feature_names=feature_names, feature_sizes=feature_sizes) else: reader = readers.YT8MAggregatedFeatureReader( feature_names=feature_names, feature_sizes=feature_sizes) return reader
def main(unused_argv): logging.set_verbosity(tf.logging.INFO) # convert feature_names and feature_sizes to lists of values feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( FLAGS.feature_names, FLAGS.feature_sizes) if FLAGS.frame_features: reader = readers.YT8MFrameFeatureReader(feature_names=feature_names, feature_sizes=feature_sizes) else: reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names, feature_sizes=feature_sizes) if FLAGS.output_file is "": raise ValueError("'output_file' was not specified. " "Unable to continue with inference.") if FLAGS.input_data_pattern is "": raise ValueError("'input_data_pattern' was not specified. " "Unable to continue with inference.") calculate_moments(reader, feature_names, feature_sizes, FLAGS.input_data_pattern, FLAGS.input_data_pattern2, FLAGS.input_data_pattern3, FLAGS.output_file, FLAGS.batch_size)