本文整理汇总了Python中tensorflow.GPUOptions方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.GPUOptions方法的具体用法?Python tensorflow.GPUOptions怎么用?Python tensorflow.GPUOptions使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.GPUOptions方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_run_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def _get_run_config(config):
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=\
config['per_process_gpu_memory_fraction'],
allow_growth=config['gpu_allow_growth'])
sess_config = tf.ConfigProto(
gpu_options=gpu_options,
log_device_placement=config['log_device_placement'])
run_config = tf.estimator.RunConfig(
model_dir=config['model_dir'],
tf_random_seed=config['tf_random_seed'],
save_summary_steps=config['save_summary_steps'],
save_checkpoints_steps=config['save_checkpoints_steps'],
save_checkpoints_secs=config['save_checkpoints_secs'],
keep_checkpoint_max=config['keep_checkpoint_max'],
keep_checkpoint_every_n_hours=config['keep_checkpoint_every_n_hours'],
log_step_count_steps=config['log_step_count_steps'],
session_config=sess_config)
return run_config
示例2: create_session_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def create_session_config(self):
"""create session_config
"""
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95,
allow_growth=True)
# set number of GPU devices
device_count = {"GPU": self.config.gpu_count}
session_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
device_count=device_count,
gpu_options=gpu_options)
return session_config
示例3: get_session
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def get_session():
tf.reset_default_graph()
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
# This was the default provided in the starter code.
#session = tf.Session(config=tf_config)
# Use this if I want to see what is on the GPU.
#session = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Use this for limiting memory allocated for the GPU.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
print("AVAILABLE GPUS: ", get_available_gpus())
return session
示例4: load_yaw_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def load_yaw_variables(self, YawFilePath):
""" Load varibles from a checkpoint file
@param YawFilePath Path to a valid checkpoint
"""
#It is possible to use the checkpoint file
#y_ckpt = tf.train.get_checkpoint_state(YawFilePath)
#.restore(self._sess, y_ckpt.model_checkpoint_path)
#For future use, allocating a fraction of the GPU
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) #Allocate only half of the GPU memory
if(os.path.isfile(YawFilePath)==False): raise ValueError('[DEEPGAZE] CnnHeadPoseEstimator: the yaw file path is incorrect.')
tf.train.Saver(({"conv1_yaw_w": self.hy_conv1_weights, "conv1_yaw_b": self.hy_conv1_biases,
"conv2_yaw_w": self.hy_conv2_weights, "conv2_yaw_b": self.hy_conv2_biases,
"conv3_yaw_w": self.hy_conv3_weights, "conv3_yaw_b": self.hy_conv3_biases,
"dense1_yaw_w": self.hy_dense1_weights, "dense1_yaw_b": self.hy_dense1_biases,
"out_yaw_w": self.hy_out_weights, "out_yaw_b": self.hy_out_biases
})).restore(self._sess, YawFilePath)
示例5: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def __init__(self, cluster, task, train_dir, log_device_placement=True):
""""Creates a Trainer.
Args:
cluster: A tf.train.ClusterSpec if the execution is distributed.
None otherwise.
task: A TaskSpec describing the job type and the task index.
"""
self.cluster = cluster
self.task = task
self.is_master = (task.type == "master" and task.index == 0)
self.train_dir = train_dir
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu)
self.config = tf.ConfigProto(log_device_placement=log_device_placement)
if self.is_master and self.task.index > 0:
raise StandardError("%s: Only one replica of master expected",
task_as_string(self.task))
示例6: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def __init__(self, cluster, task, train_dir, log_device_placement=True):
""""Creates a Trainer.
Args:
cluster: A tf.train.ClusterSpec if the execution is distributed.
None otherwise.
task: A TaskSpec describing the job type and the task index.
"""
self.cluster = cluster
self.task = task
self.is_master = (task.type == "master" and task.index == 0)
self.train_dir = train_dir
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
self.config = tf.ConfigProto(log_device_placement=log_device_placement,gpu_options=gpu_options)
if self.is_master and self.task.index > 0:
raise StandardError("%s: Only one replica of master expected",
task_as_string(self.task))
示例7: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def __init__(self, net_factory, data_size, batch_size, model_path):
graph = tf.Graph()
with graph.as_default():
self.image_op = tf.placeholder(tf.float32, shape=[batch_size, data_size, data_size, 3], name='input_image')
#figure out landmark
self.cls_prob, self.bbox_pred, self.landmark_pred = net_factory(self.image_op, training=False)
self.sess = tf.Session(
config=tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True)))
saver = tf.train.Saver()
#check whether the dictionary is valid
model_dict = '/'.join(model_path.split('/')[:-1])
ckpt = tf.train.get_checkpoint_state(model_dict)
print(model_path)
readstate = ckpt and ckpt.model_checkpoint_path
assert readstate, "the params dictionary is not valid"
print("restore models' param")
saver.restore(self.sess, model_path)
self.data_size = data_size
self.batch_size = batch_size
#rnet and onet minibatch(test)
示例8: train
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def train(env_id, num_timesteps, seed, policy, hparams):
ncpu = multiprocessing.cpu_count()
#if sys.platform == 'darwin': ncpu //= 2
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=hparams['gpu_fraction'])
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=ncpu,
inter_op_parallelism_threads=ncpu,
gpu_options=gpu_options)
config.gpu_options.allow_growth = False #pylint: disable=E1101
tf.Session(config=config).__enter__()
video_log_dir = os.path.join(hparams['base_dir'], 'videos', hparams['experiment_name'])
env = VecFrameStack(make_atari_env(env_id, 8, seed, video_log_dir=video_log_dir, write_attention_video='attention' in policy, nsteps=128), 4)
policy = {'cnn' : CnnPolicy, 'lstm' : LstmPolicy, 'lnlstm' : LnLstmPolicy, 'cnn_attention': CnnAttentionPolicy}[policy]
ppo2.learn(policy=policy, env=env, nsteps=128, nminibatches=4,
lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
ent_coef=.01,
lr=lambda f : f * 2.5e-4,
cliprange=lambda f : f * 0.1,
total_timesteps=int(num_timesteps * 1.1),
hparams=hparams)
示例9: predict
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def predict(images):
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
x = tf.placeholder(
shape=[None, INPUT_SEQ, INPUT_H, INPUT_W, INPUT_D], dtype=tf.float32)
y_ = tf.placeholder(shape=[None, OUTPUT_DIM], dtype=tf.float32)
core_net = vqn_model(x)
vars = tf.trainable_variables()
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars]) * 1e-3
core_net_loss = tflearn.objectives.mean_square(core_net, y_)
# + lossL2
core_train_op = tf.train.AdamOptimizer(
learning_rate=LR_RATE).minimize(core_net_loss)
core_net_acc = tf.reduce_mean(
tf.abs(core_net - y_) / (tf.abs(core_net) + tf.abs(y_) / 2))
core_net_mape = tf.subtract(1.0, tf.reduce_mean(
tf.abs(core_net - y_) / tf.abs(y_)))
train_len = X.shape[0]
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore("model/nn_model_ep_300.ckpt")
_test_y = sess.run(core_net, feed_dict={x: images})
return _test_y
示例10: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def main(_):
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=calc_gpu_fraction(FLAGS.gpu_fraction))
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
config = get_config(FLAGS) or FLAGS
if config.env_type == 'simple':
env = SimpleGymEnvironment(config)
else:
env = GymEnvironment(config)
if not tf.test.is_gpu_available() and FLAGS.use_gpu:
raise Exception("use_gpu flag is true when no GPUs are available")
if not FLAGS.use_gpu:
config.cnn_format = 'NHWC'
agent = Agent(config, env, sess)
if FLAGS.is_train:
agent.train()
else:
agent.play()
示例11: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def main(_):
if FLAGS.mode == 'prepare':
print('Prepare files')
create_wordVec(FLAGS)
create_serial(FLAGS)
else:
tf.reset_default_graph()
print('build model')
gpu_options = tf.GPUOptions(visible_device_list=FLAGS.cuda, allow_growth=True)
with tf.Graph().as_default():
set_seed()
sess = tf.Session(
config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True,
intra_op_parallelism_threads=int(multiprocessing.cpu_count() / 2),
inter_op_parallelism_threads=int(multiprocessing.cpu_count() / 2)))
with sess.as_default():
initializer = tf.contrib.layers.xavier_initializer()
with tf.variable_scope('', initializer=initializer):
model = Baseline(FLAGS)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
model.run_model(sess, saver)
示例12: setup_meta_ops
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def setup_meta_ops(self):
cfg = dict({
'allow_soft_placement': False,
'log_device_placement': False
})
utility = min(self.FLAGS.gpu, 1.)
if utility > 0.0:
self.say('GPU mode with {} usage'.format(utility))
cfg['gpu_options'] = tf.GPUOptions(
per_process_gpu_memory_fraction = utility)
cfg['allow_soft_placement'] = True
else:
self.say('Running entirely on CPU')
cfg['device_count'] = {'GPU': 0}
if self.FLAGS.train: self.build_train_op()
if self.FLAGS.summary:
self.summary_op = tf.summary.merge_all()
self.writer = tf.summary.FileWriter(self.FLAGS.summary + 'train')
self.sess = tf.Session(config = tf.ConfigProto(**cfg))
self.sess.run(tf.global_variables_initializer())
if not self.ntrain: return
self.saver = tf.train.Saver(tf.global_variables(),
max_to_keep = self.FLAGS.keep)
if self.FLAGS.load != 0: self.load_from_ckpt()
if self.FLAGS.summary:
self.writer.add_graph(self.sess.graph)
示例13: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def __init__(self):
self.graph = tf.Graph()
with self.graph.as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
self.pnet, self.rnet, self.onet = FaceDet.create_mtcnn(sess, None)
示例14: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def __init__(self, config, model, dataset):
self.config = config
self.model = model
self.train_dir = config.train_dir
log.info("self.train_dir = %s", self.train_dir)
# --- input ops ---
self.batch_size = config.batch_size
self.dataset = dataset
check_data_id(dataset, config.data_id)
_, self.batch = create_input_ops(dataset, self.batch_size,
data_id=config.data_id,
is_training=False,
shuffle=False)
self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
self.step_op = tf.no_op(name='step_no_op')
tf.set_random_seed(1234)
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = tf.Session(config=session_config)
# --- checkpoint and monitoring ---
self.saver = tf.train.Saver(max_to_keep=100)
self.checkpoint = config.checkpoint
if self.checkpoint is None and self.train_dir:
self.checkpoint = tf.train.latest_checkpoint(self.train_dir)
if self.checkpoint is None:
log.warn("No checkpoint is given. Just random initialization :-)")
self.session.run(tf.global_variables_initializer())
else:
log.info("Checkpoint path : %s", self.checkpoint)
示例15: create_session_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GPUOptions [as 别名]
def create_session_config(log_device_placement=False,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
use_tpu=False,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0):
"""The TensorFlow Session config to use."""
if use_tpu:
graph_options = tf.GraphOptions()
else:
if enable_graph_rewriter:
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
else:
graph_options = tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=False))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads)
return config