本文整理匯總了Python中tensorflow.flags.FLAGS屬性的典型用法代碼示例。如果您正苦於以下問題:Python flags.FLAGS屬性的具體用法?Python flags.FLAGS怎麽用?Python flags.FLAGS使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類tensorflow.flags
的用法示例。
在下文中一共展示了flags.FLAGS屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def main(unused_argv):
logging.set_verbosity(tf.logging.INFO)
# convert feature_names and feature_sizes to lists of values
feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
FLAGS.feature_names, FLAGS.feature_sizes)
if FLAGS.frame_features:
reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
feature_sizes=feature_sizes)
else:
reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
feature_sizes=feature_sizes)
if FLAGS.output_file is "":
raise ValueError("'output_file' was not specified. "
"Unable to continue with inference.")
if FLAGS.input_data_pattern is "":
raise ValueError("'input_data_pattern' was not specified. "
"Unable to continue with inference.")
inference(reader, FLAGS.train_dir, FLAGS.input_data_pattern,
FLAGS.output_file, FLAGS.batch_size, FLAGS.top_k)
示例2: build_model
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def build_model(self, model, reader):
"""Find the model and build the graph."""
label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses])()
optimizer_class = find_class_by_name(FLAGS.optimizer, [tf.train])
build_graph(reader=reader,
model=model,
optimizer_class=optimizer_class,
clip_gradient_norm=FLAGS.clip_gradient_norm,
train_data_pattern=FLAGS.train_data_pattern,
label_loss_fn=label_loss_fn,
base_learning_rate=FLAGS.base_learning_rate,
learning_rate_decay=FLAGS.learning_rate_decay,
learning_rate_decay_examples=FLAGS.learning_rate_decay_examples,
regularization_penalty=FLAGS.regularization_penalty,
num_readers=FLAGS.num_readers,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs)
return tf.train.Saver(max_to_keep=0, keep_checkpoint_every_n_hours=5)
示例3: calculate_loss
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def calculate_loss(self, predictions, labels, weights=None, **unused_params):
with tf.name_scope("loss_xent"):
epsilon = 10e-6
if FLAGS.label_smoothing:
float_labels = smoothing(labels)
else:
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
1 - float_labels) * tf.log(1 - predictions + epsilon)
cross_entropy_loss = tf.negative(cross_entropy_loss)
if weights is not None:
print cross_entropy_loss, weights
weighted_loss = tf.einsum("ij,i->ij", cross_entropy_loss, weights)
print "create weighted_loss", weighted_loss
return tf.reduce_mean(tf.reduce_sum(weighted_loss, 1))
else:
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
示例4: get_input_evaluation_tensors
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def get_input_evaluation_tensors(reader,
data_pattern,
batch_size=256):
logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
with tf.name_scope("eval_input"):
files = gfile.Glob(data_pattern)
if not files:
print data_pattern, files
raise IOError("Unable to find the evaluation files.")
logging.info("number of evaluation files: " + str(len(files)))
files.sort()
filename_queue = tf.train.string_input_producer(
files, shuffle=False, num_epochs=1)
eval_data = reader.prepare_reader(filename_queue)
return tf.train.batch(
eval_data,
batch_size=batch_size,
capacity=3 * FLAGS.batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
示例5: get_input_data_tensors
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def get_input_data_tensors(reader,
data_pattern,
batch_size=256,
num_epochs=None):
logging.info("Using batch size of " + str(batch_size) + " for training.")
with tf.name_scope("train_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find training files. data_pattern='" +
data_pattern + "'.")
logging.info("Number of training files: %s.", str(len(files)))
files.sort()
filename_queue = tf.train.string_input_producer(
files, num_epochs=num_epochs, shuffle=False)
training_data = reader.prepare_reader(filename_queue)
return tf.train.batch(
training_data,
batch_size=batch_size,
capacity=FLAGS.batch_size * 4,
allow_smaller_final_batch=True,
enqueue_many=True)
示例6: create_model
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
input_size = vocab_size
output_size = FLAGS.hidden_size
with tf.name_scope("rbm"):
self.weights = tf.Variable(tf.truncated_normal([input_size, output_size],
stddev=1.0 / math.sqrt(float(input_size))), name="weights")
self.v_bias = tf.Variable(tf.zeros([input_size]), name="v_bias")
self.h_bias = tf.Variable(tf.zeros([output_size]), name="h_bias")
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(self.weights))
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(self.v_bias))
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(self.h_bias))
示例7: main
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def main(unused_argv):
logging.set_verbosity(tf.logging.INFO)
# convert feature_names and feature_sizes to lists of values
feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
FLAGS.feature_names, FLAGS.feature_sizes)
if FLAGS.frame_features:
reader = readers.YT8MFrameFeatureReader(feature_names=feature_names,
feature_sizes=feature_sizes)
else:
reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names,
feature_sizes=feature_sizes)
if FLAGS.output_dir is "":
raise ValueError("'output_dir' was not specified. "
"Unable to continue with inference.")
if FLAGS.input_data_pattern is "":
raise ValueError("'input_data_pattern' was not specified. "
"Unable to continue with inference.")
inference(reader, FLAGS.model_checkpoint_path, FLAGS.input_data_pattern,
FLAGS.output_dir, FLAGS.batch_size, FLAGS.top_k)
示例8: calculate_loss_mix2
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def calculate_loss_mix2(self, predictions, predictions_class, predictions_encoder, labels, **unused_params):
with tf.name_scope("loss_mix2"):
float_labels = tf.cast(labels, tf.float32)
float_encoders = float_labels
for i in range(FLAGS.encoder_layers):
var_i = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % i)
weight_i = tf.constant(var_i[:-1,:],dtype=tf.float32)
bias_i = tf.reshape(tf.constant(var_i[-1,:],dtype=tf.float32),[-1])
float_encoders = tf.nn.xw_plus_b(float_encoders,weight_i,bias_i)
if i<FLAGS.encoder_layers-1:
float_encoders = tf.nn.relu(float_encoders)
else:
hidden_mean = tf.reduce_mean(float_encoders,axis=1,keep_dims=True)
hidden_std = tf.sqrt(tf.reduce_mean(tf.square(float_encoders-hidden_mean),axis=1,keep_dims=True))
float_encoders = (float_encoders-hidden_mean)/(hidden_std+1e-6)
#float_encoders = tf.nn.sigmoid(float_encoders)
cross_entropy_encoder = 0.1*self.calculate_mseloss(predictions_encoder,float_encoders)
cross_entropy_loss = self.calculate_loss(predictions,labels)
return cross_entropy_encoder+cross_entropy_loss, float_encoders
#return cross_entropy_encoder, float_encoders
示例9: calculate_loss
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def calculate_loss(self, predictions, labels, **unused_params):
bound = FLAGS.softmax_bound
vocab_size_1 = bound
with tf.name_scope("loss_softmax"):
epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
labels_1 = float_labels[:,:vocab_size_1]
predictions_1 = predictions[:,:vocab_size_1]
cross_entropy_loss = CrossEntropyLoss().calculate_loss(predictions_1,labels_1)
lables_2 = float_labels[:,vocab_size_1:]
predictions_2 = predictions[:,vocab_size_1:]
# l1 normalization (labels are no less than 0)
label_rowsum = tf.maximum(
tf.reduce_sum(lables_2, 1, keep_dims=True),
epsilon)
label_append = 1.0-tf.reduce_max(lables_2, 1, keep_dims=True)
norm_float_labels = tf.concat((tf.div(lables_2, label_rowsum),label_append),axis=1)
predictions_append = 1.0-tf.reduce_sum(predictions_2, 1, keep_dims=True)
softmax_outputs = tf.concat((predictions_2,predictions_append),axis=1)
softmax_loss = norm_float_labels * tf.log(softmax_outputs + epsilon) + (
1 - norm_float_labels) * tf.log(1 - softmax_outputs + epsilon)
softmax_loss = tf.negative(tf.reduce_sum(softmax_loss, 1))
return tf.reduce_mean(softmax_loss) + cross_entropy_loss
示例10: calculate_loss
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def calculate_loss(self, predictions, labels, **unused_params):
bound = FLAGS.softmax_bound
vocab_size_1 = bound
with tf.name_scope("loss_softmax"):
epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
labels_1 = float_labels[:,:vocab_size_1]
predictions_1 = predictions[:,:vocab_size_1]
cross_entropy_loss = CrossEntropyLoss().calculate_loss(predictions_1,labels_1)
lables_2 = float_labels[:,vocab_size_1:]
predictions_2 = predictions[:,vocab_size_1:]
# l1 normalization (labels are no less than 0)
label_rowsum = tf.maximum(
tf.reduce_sum(lables_2, 1, keep_dims=True),
epsilon)
label_append = 1.0-tf.reduce_max(lables_2, 1, keep_dims=True)
norm_float_labels = tf.concat((tf.div(lables_2, label_rowsum),label_append),axis=1)
predictions_append = 1.0-tf.reduce_sum(predictions_2, 1, keep_dims=True)
softmax_outputs = tf.concat((predictions_2,predictions_append),axis=1)
softmax_loss = norm_float_labels * tf.log(softmax_outputs + epsilon) + (
1 - norm_float_labels) * tf.log(1 - softmax_outputs + epsilon)
softmax_loss = tf.negative(tf.reduce_sum(softmax_loss, 1))
return tf.reduce_mean(softmax_loss) + cross_entropy_loss
示例11: get_forward_parameters
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def get_forward_parameters(vocab_size=4716):
t_vars = tf.trainable_variables()
h1_vars_weight = [var for var in t_vars if 'hidden_1' in var.name and 'weights' in var.name]
h1_vars_biases = [var for var in t_vars if 'hidden_1' in var.name and 'biases' in var.name]
h2_vars_weight = [var for var in t_vars if 'hidden_2' in var.name and 'weights' in var.name]
h2_vars_biases = [var for var in t_vars if 'hidden_2' in var.name and 'biases' in var.name]
o1_vars_weight = [var for var in t_vars if 'output_1' in var.name and 'weights' in var.name]
o1_vars_biases = [var for var in t_vars if 'output_1' in var.name and 'biases' in var.name]
o2_vars_weight = [var for var in t_vars if 'output_2' in var.name and 'weights' in var.name]
o2_vars_biases = [var for var in t_vars if 'output_2' in var.name and 'biases' in var.name]
h1_vars_biases = tf.reshape(h1_vars_biases[0],[1,FLAGS.hidden_size_1])
h2_vars_biases = tf.reshape(h2_vars_biases[0],[1,FLAGS.hidden_size_2])
o1_vars_biases = tf.reshape(o1_vars_biases[0],[1,FLAGS.hidden_size_1])
o2_vars_biases = tf.reshape(o2_vars_biases[0],[1,vocab_size])
vars_1 = tf.concat((h1_vars_weight[0],h1_vars_biases),axis=0)
vars_2 = tf.concat((h2_vars_weight[0],h2_vars_biases),axis=0)
vars_3 = tf.concat((o1_vars_weight[0],o1_vars_biases),axis=0)
vars_4 = tf.concat((o2_vars_weight[0],o2_vars_biases),axis=0)
return [vars_1,vars_2,vars_3,vars_4]
示例12: main
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def main(unused_argv):
logging.set_verbosity(tf.logging.INFO)
if not FLAGS.json_prediction_files_pattern:
raise ValueError(
"The flag --json_prediction_files_pattern must be specified.")
if not FLAGS.csv_output_file:
raise ValueError("The flag --csv_output_file must be specified.")
logging.info("Looking for prediction files with pattern: %s",
FLAGS.json_prediction_files_pattern)
file_paths = gfile.Glob(FLAGS.json_prediction_files_pattern)
logging.info("Found files: %s", file_paths)
logging.info("Writing submission file to: %s", FLAGS.csv_output_file)
with gfile.Open(FLAGS.csv_output_file, "w+") as output_file:
output_file.write(get_csv_header())
for file_path in file_paths:
logging.info("processing file: %s", file_path)
with gfile.Open(file_path) as input_file:
for line in input_file:
json_data = json.loads(line)
output_file.write(to_csv_row(json_data))
output_file.flush()
logging.info("done")
示例13: calculate_loss
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_xent"):
epsilon = 10e-6
alpha = FLAGS.alpha
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = 2*(alpha*float_labels * tf.log(predictions + epsilon) + (1-alpha)*(
1 - float_labels) * tf.log(1 - predictions + epsilon))
cross_entropy_loss = tf.negative(cross_entropy_loss)
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
示例14: get_reader
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def get_reader():
# Convert feature_names and feature_sizes to lists of values.
feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
FLAGS.feature_names, FLAGS.feature_sizes)
if FLAGS.frame_features:
reader = readers.YT8MFrameFeatureReader(
feature_names=feature_names, feature_sizes=feature_sizes)
else:
reader = readers.YT8MAggregatedFeatureReader(
feature_names=feature_names, feature_sizes=feature_sizes)
return reader
示例15: write_to_record
# 需要導入模塊: from tensorflow import flags [as 別名]
# 或者: from tensorflow.flags import FLAGS [as 別名]
def write_to_record(video_ids, video_labels, video_rgbs, video_audios, video_predictions, video_num_frames, filenum, num_examples_processed):
writer = tf.python_io.TFRecordWriter(FLAGS.output_dir + '/' + 'predictions-%04d.tfrecord' % filenum)
for i in range(num_examples_processed):
video_id = video_ids[i]
video_label = np.nonzero(video_labels[i,:])[0]
video_rgb = video_rgbs[i,:]
video_audio = video_audios[i,:]
video_prediction = video_predictions[i,:]
video_num_frame = video_num_frames[i]
example = get_output_feature(video_id, video_label, video_rgb, video_audio, video_prediction, video_num_frame)
serialized = example.SerializeToString()
writer.write(serialized)
writer.close()