本文整理汇总了Python中utils.utils方法的典型用法代码示例。如果您正苦于以下问题:Python utils.utils方法的具体用法?Python utils.utils怎么用?Python utils.utils使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.utils方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: trainer
# 需要导入模块: import utils [as 别名]
# 或者: from utils import utils [as 别名]
def trainer(network,number_of_images):
#find error like squared error but better
cross_entropy=tf.nn.softmax_cross_entropy_with_logits_v2(logits=network,labels=labels_ph)
#now minize the above error
#calculate the total mean of all the errors from all the nodes
cost=tf.reduce_mean(cross_entropy)
tf.summary.scalar("cost", cost)#for tensorboard visualisation
#Now backpropagate to minimise the cost in the network.
optimizer=tf.train.AdamOptimizer().minimize(cost)
#print(optimizer)
session.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(model_save_name, graph=tf.get_default_graph())
merged = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep=4)
counter=0
for epoch in range(epochs):
tools = utils()
for batch in range(int(number_of_images / batch_size)):
counter+=1
images, labels = tools.batch_dispatch()
if images == None:
break
loss,summary = session.run([cost,merged], feed_dict={images_ph: images, labels_ph: labels})
print('loss', loss)
session.run(optimizer, feed_dict={images_ph: images, labels_ph: labels})
print('Epoch number ', epoch, 'batch', batch, 'complete')
writer.add_summary(summary,counter)
saver.save(session, os.path.join(model_save_name))
示例2: trainer
# 需要导入模块: import utils [as 别名]
# 或者: from utils import utils [as 别名]
def trainer(network,number_of_images):
#find error like squared error but better
cross_entropy=tf.nn.softmax_cross_entropy_with_logits_v2(logits=network,labels=labels_ph)
#now minize the above error
#calculate the total mean of all the errors from all the nodes
cost=tf.reduce_mean(cross_entropy)
tf.summary.scalar("cost", cost)#for tensorboard visualisation
#Now backpropagate to minimise the cost in the network.
optimizer=tf.train.AdamOptimizer().minimize(cost)
#print(optimizer)
session.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(model_save_name, graph=tf.get_default_graph())
merged = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep=4)
counter=0
for epoch in range(epochs):
tools = utils()
for batch in range(int(number_of_images / batch_size)):
counter+=1
images, labels = tools.batch_dispatch()
if images == None:
break
loss,summary = session.run([cost,merged], feed_dict={images_ph: images, labels_ph: labels})
print('loss', loss)
session.run(optimizer, feed_dict={images_ph: images, labels_ph: labels})
print('Epoch number ', epoch, 'batch', batch, 'complete')
writer.add_summary(summary,counter)
saver.save(session, model_save_name)
示例3: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import utils [as 别名]
def main(_):
"""Begins the execution of the program
Args:
_ : Tensorflow flags app instance
"""
if FLAGS.create != "":
dataset = utils.Dataset(FLAGS)
dataset.create_records(FLAGS.create)
exit()
if not FLAGS.test:
priliminary_checks(FLAGS)
idx = get_runid(FLAGS)
create_rundirs(FLAGS, idx)
dump_model_params(FLAGS)
log_config(idx, FLAGS.__flags)
if FLAGS.archi:
net = nnet.Model(FLAGS, is_training=False)
net.test_graph()
exit()
FLAGS.h = 600 if FLAGS.dataset == 'maps' else 256
FLAGS.w = FLAGS.h
if FLAGS.train or FLAGS.resume:
net = nnet.Model(FLAGS, is_training=True)
net.train()
print ' - Done training the network...'
else:
print ' - Testing the model...'
net = nnet.Model(FLAGS, is_training=False)
net.test(FLAGS.test_source)
示例4: main
# 需要导入模块: import utils [as 别名]
# 或者: from utils import utils [as 别名]
def main():
util = utils.utils()
#loading MITRE ATT&CK enterpise matrix from file.
mitre_attack_matrix = json.loads(util.read_attack_matrix())['objects']
parse_mitre_attack_tactics(mitre_attack_matrix)
parse_mitre_attack_techniques(mitre_attack_matrix)
示例5: read_triviaqa_data
# 需要导入模块: import utils [as 别名]
# 或者: from utils import utils [as 别名]
def read_triviaqa_data(qajson):
data = utils.utils.read_json(qajson)
# read only documents and questions that are a part of clean data set
if data['VerifiedEval']:
clean_data = []
for datum in data['Data']:
if datum['QuestionPartOfVerifiedEval']:
if data['Domain'] == 'Web':
datum = read_clean_part(datum)
clean_data.append(datum)
data['Data'] = clean_data
return data